branch_name
stringclasses 149
values | text
stringlengths 23
89.3M
| directory_id
stringlengths 40
40
| languages
listlengths 1
19
| num_files
int64 1
11.8k
| repo_language
stringclasses 38
values | repo_name
stringlengths 6
114
| revision_id
stringlengths 40
40
| snapshot_id
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|
refs/heads/main
|
<repo_name>SDAR30/Pursuit-Core-Web-React-State-Lab-Tested<file_sep>/src/App.js
import React, {useState} from "react";
import "./App.css";
const App =()=>{
const [score,setScore]= useState(0);
const [power,setPower] = useState(1);
const increment = () => {
setScore(score + power);
//this.setState({ score: score + incrementValue })
}
const increaseValue = () => {
if (score >= 10) {
setScore(score-10);
setPower((prevCount) => prevCount + 1)
} else {
alert("You can't afford that!")
}
}
const resetGame = () => {
setScore(0);
setPower(1);
}
if (score < 100) {
return (<>
<h1>Current Score: {score}</h1>
<button onClick={increment}>+{power}</button>
<br></br>
<br></br>
<button onClick={increaseValue}>Pay 10 points to change from +{power} to +{power + 1}</button>
</>)
} else {
return (<>
<h1>Current Score: {score}</h1>
<h2>You Win!</h2>
<button onClick={resetGame}>Play again?</button>
</>)
}
}
export default App;
// import React from "react";
// import "./App.css";
// class App extends React.Component {
// state = { score: 0, incrementValue: 1 }
// increment = () => {
// const { score, incrementValue } = this.state;
// this.setState({ score: score + incrementValue })
// }
// increaseValue = () => {
// const { score, incrementValue } = this.state;
// if (score >= 10) {
// this.setState({
// score: score - 10, incrementValue: incrementValue + 1
// })
// } else {
// alert("You can't afford that!")
// }
// }
// resetGame = () => {
// const { score, incrementValue } = this.state;
// this.setState({ score: 0, incrementValue: 1 })
// }
// render() {
// const { score, incrementValue } = this.state;
// if (score < 100) {
// return (<>
// <h1>Current Score: {score}</h1>
// <button onClick={this.increment}>+{incrementValue}</button>
// <br></br>
// <br></br>
// <button onClick={this.increaseValue}>Pay 10 points to change from +{incrementValue} to +{incrementValue + 1}</button>
// </>)
// } else {
// return (<>
// <h1>Current Score: {score}</h1>
// <h2>You Win!</h2>
// <button onClick={this.resetGame}>Play again?</button>
// </>)
// }
// }
// }
// export default App;
|
03162f3b04343a224f3dfb91b1ec4fa0abfb6773
|
[
"JavaScript"
] | 1
|
JavaScript
|
SDAR30/Pursuit-Core-Web-React-State-Lab-Tested
|
c485527f5f0bb089a8dd94747ee2f065441a347c
|
f9d324c34ba12fac5d42b7c5c441a205b34a496d
|
refs/heads/master
|
<file_sep>#define base_code_format "lea edi, dword ptr ds:[%s+0%X]\r\n\
mov byte ptr ds:[edi],0E9\r\n\
call @cert_replace_end\r\n\
%s\r\n\
@cert_replace_end:\r\n\
pop ebx\r\n\
sub ebx,edi\r\n\
lea ebx, dword ptr ds:[ebx-5]\r\n\
mov dword ptr ds:[edi+1],ebx\r\n\
lea edi, dword ptr ds:[%s+0%X]\r\n\
mov word ptr ds:[edi],0B890\r\n\
mov dword ptr ds:[edi+2],0%s"
#define base_code_format2 "lea edi, dword ptr ds:[%s+0%X]\r\n\
mov byte ptr ds:[edi],0E9\r\n\
call @cert_replace_end\r\n\
%s\r\n\
@cert_replace_end:\r\n\
pop ebx\r\n\
sub ebx,edi\r\n\
lea ebx, dword ptr ds:[ebx-5]\r\n\
mov dword ptr ds:[edi+1],ebx\r\n"
#define repl_code_format2 "cmp dword ptr ds:[eax],0%s\r\n\
je short @do_job\r\n\
retn\r\n\
@do_job:\r\n\
pushad\r\n\
lea edi,dword ptr ds:[eax+0%s]\r\n\
call @f\r\n\
\"%s\\0\"\r\n\
@@:\r\n\
pop esi\r\n\
mov ecx,%X\r\n\
rep movs byte ptr es:[edi],byte ptr ds:[esi]\r\n\
popad\r\n\
retn"
#define repl_code_format "cmp dword ptr ds:[eax],0%s\r\n\
je short @do_job\r\n\
retn\r\n\
@do_job:\r\n\
pushad\r\n\
mov byte ptr ds:[eax+0%X],%s\r\n\
lea edi,dword ptr ds:[eax+0%s]\r\n\
call @f\r\n\
\"%s\\0\"\r\n\
@@:\r\n\
pop esi\r\n\
mov ecx,%X\r\n\
rep movs byte ptr es:[edi],byte ptr ds:[esi]\r\n\
popad\r\n\
retn"
<file_sep>#include "VersionFind_global.h"
/**********************************************************************
* Functions
*********************************************************************/
unsigned int VF_FindUsbPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //55534220646576696365
if(d[i] == 0x55 and d[i + 1] == 0x53 and d[i + 2] == 0x42 and d[i + 3] == 0x20 and d[i + 4] == 0x64 and d[i + 5] == 0x65 and d[i + 6] == 0x76 and d[i + 7] == 0x69 and d[i + 8] == 0x63 and d[i + 9] == 0x65)
{
while(d[i] != 0)
i--;
return i + 1;
}
return 0;
}
unsigned int VF_FindAnd20Pattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //83E?20
if(d[i] == 0x83 and (d[i + 1] >> 4) == 0x0E and d[i + 2] == 0x20)
return i;
return 0;
}
unsigned int VF_Find40000Pattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //00000400
if(d[i] == 0x00 and d[i + 1] == 0x00 and d[i + 2] == 0x04 and d[i + 3] == 0x00)
return i;
return 0;
}
unsigned int VF_FindShrPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //C1E?0?
if(d[i] == 0xC1 and (d[i + 1] >> 4) == 0x0E and (d[i + 2] >> 4) == 0x00)
return i;
return 0;
}
bool VF_IsMinimalProtection(char* szFileName, ULONG_PTR va, long parSectionNumber)
{
int offset = GetPE32Data(szFileName, parSectionNumber, UE_SECTIONRAWOFFSET);
BYTE firstbytes[2] = {0};
memcpy(firstbytes, (void*)(va + offset), 2);
if(firstbytes[0] == 0x60 and firstbytes[1] == 0xE8)
return false;
return true;
}
void VF_FatalError(const char* szMessage, cbErrorMessage ErrorMessageCallback)
{
ErrorMessageCallback((char*)szMessage, (char*)"Fatal Error!");
StopDebug();
}
unsigned int VF_FindarmVersion(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //3C61726D56657273696F6E (<armVersion)
if(d[i] == 0x3C and d[i + 1] == 0x61 and d[i + 2] == 0x72 and d[i + 3] == 0x6D and d[i + 4] == 0x56 and d[i + 5] == 0x65 and d[i + 6] == 0x72 and d[i + 7] == 0x73 and d[i + 8] == 0x69 and d[i + 9] == 0x6F and d[i + 10] == 0x6E)
{
while(d[i] != 0)
i--;
return i + 1;
}
return 0;
}
unsigned int VF_FindPushAddr(BYTE* d, unsigned int size, unsigned int addr)
{
BYTE b[4] = {0};
memcpy(b, &addr, 4);
for(unsigned int i = 0; i < size; i++) //68XXXXXXXX
if(d[i] == 0x68 and d[i + 1] == b[0] and d[i + 2] == b[1] and d[i + 3] == b[2] and d[i + 4] == b[3])
return i;
return 0;
}
<file_sep>#define base_code_format "\0lea edi, dword ptr ds:[%s+0%X]\r\n\
mov byte ptr ds:[edi],0E9\r\n\
lea ebx, dword ptr es:[@cert_replace]\r\n\
sub ebx,edi\r\n\
lea ebx, dword ptr ds:[ebx-5]\r\n\
mov dword ptr ds:[edi+1],ebx\r\n\
lea edi, dword ptr ds:[%s+0%X]\r\n\
mov word ptr ds:[edi],0B890\r\n\
mov dword ptr ds:[edi+2],0%s"
#define base_code_format2 "\0lea edi, dword ptr ds:[%s+0%X]\r\n\
mov byte ptr ds:[edi],0E9\r\n\
lea ebx, dword ptr es:[@cert_replace]\r\n\
sub ebx,edi\r\n\
lea ebx, dword ptr ds:[ebx-5]\r\n\
mov dword ptr ds:[edi+1],ebx\r\n"
#define repl_code_format "\0@cert_replace:\r\n\
cmp dword ptr ds:[eax],0%s\r\n\
je @do_job\r\n\
retn\r\n\
@do_job:\r\n\
pushad\r\n\
lea edi,dword ptr ds:[eax+0%s]\r\n\
lea esi,dword ptr ds:[@public]\r\n\
mov ecx,%X\r\n\
rep movs byte ptr es:[edi],byte ptr ds:[esi]\r\n\
popad\r\n\
retn\r\n\
@public:\r\n\
\"%s\\0\""
#define repl_code_format2 "\0@cert_replace:\r\n\
cmp dword ptr ds:[eax],0%s\r\n\
je @do_job\r\n\
retn\r\n\
@do_job:\r\n\
pushad\r\n\
mov byte ptr ds:[eax+2],%s\r\n\
lea edi,dword ptr ds:[eax+0%s]\r\n\
lea esi,dword ptr ds:[@public]\r\n\
mov ecx,%X\r\n\
rep movs byte ptr es:[edi],byte ptr ds:[esi]\r\n\
popad\r\n\
retn\r\n\
@public:\r\n\
\"%s\\0\""
<file_sep>#ifndef __MAIN_H__
#define __MAIN_H__
#include <windows.h>
#include <commctrl.h>
#include <stdio.h>
#include "resource.h"
#include "format.h"
#ifdef BUILD_DLL
#define DLL_EXPORT __declspec(dllexport)
#else
#define DLL_EXPORT __declspec(dllimport)
#endif
#ifdef __cplusplus
extern "C"
{
#endif
const char* DLL_EXPORT PluginInfo(void);
void DLL_EXPORT PluginFunction(HINSTANCE hInst, HWND hwndDlg, const char* register_vp, const char* program_dir, unsigned int imagebase);
#ifdef __cplusplus
}
#endif
#endif
<file_sep>#ifndef _ABOUT_H
#define _ABOUT_H
#include "_global.h"
#define caption "Armadillo Key Tool v0.3a"
#define date_compile "Jan 2015"
BOOL CALLBACK DlgAbout(HWND hwndDlg, UINT uMsg, WPARAM wParam, LPARAM lParam);
#endif
<file_sep>#include "CertTool_global.h"
HWND CT_shared; //shared window handle
char CT_szFileName[256] = ""; //debugged program
char CT_szLogFile[256] = ""; //_cert.log file
char CT_szAktLogFile[256] = ""; //_cert.tpodt file
char CT_szCryptCertFile[256] = ""; //_cert.bin file
char CT_szRawCertFile[256] = ""; //_raw.cert file
char CT_szStolenKeysRaw[256] = ""; //_stolen.keys file
char CT_szStolenKeysLog[256] = ""; //_stolenkeys.log
bool CT_logtofile = true; //Create log files?
unsigned int CT_time1 = 0; //For duration calculation.
CERT_DATA* CT_cert_data;
void CT_FatalError(const char* msg)
{
MessageBoxA(CT_shared, msg, "Fatal Error!", MB_ICONERROR);
StopDebug();
}
int CT_NextSeed(int data)
{
int a = data % 10000;
int res;
res = 10000 * ((3141 * a + (data / 10000) * 5821) % 10000u);
return (a * 5821 + res + 1) % 100000000u;
}
unsigned int CT_FindCertificateFunctionOld(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //8B4424048B5424088B0883C004890AC3
if(d[i] == 0x8B and d[i + 1] == 0x44 and d[i + 2] == 0x24 and d[i + 3] == 0x04 and d[i + 4] == 0x8B and d[i + 5] == 0x54 and d[i + 6] == 0x24 and d[i + 7] == 0x08 and d[i + 8] == 0x8B and d[i + 9] == 0x08 and d[i + 10] == 0x83 and d[i + 11] == 0xC0 and d[i + 12] == 0x04 and d[i + 13] == 0x89 and d[i + 14] == 0x0A and d[i + 15] == 0xC3)
return i + 15;
return 0;
}
unsigned int CT_FindCertificateFunctionNew(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //558BEC8B450C8B4D088B1189108B450883C0045DC3
if(d[i] == 0x55 and d[i + 1] == 0x8B and d[i + 2] == 0xEC and d[i + 3] == 0x8B and d[i + 4] == 0x45 and d[i + 5] == 0x0C and d[i + 6] == 0x8B and d[i + 7] == 0x4D and d[i + 8] == 0x08 and d[i + 9] == 0x8B and d[i + 10] == 0x11 and d[i + 11] == 0x89 and d[i + 12] == 0x10 and d[i + 13] == 0x8B and d[i + 14] == 0x45 and d[i + 15] == 0x08 and d[i + 16] == 0x83 and d[i + 17] == 0xC0 and d[i + 18] == 0x04 and d[i + 19] == 0x5D and d[i + 20] == 0xC3)
return i + 20;
return 0;
}
unsigned int CT_FindCertificateMarkers(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //002D2A00
if(d[i] == 0x00 and d[i + 1] == 0x2D and d[i + 2] == 0x2A and d[i + 3] == 0x00)
return i;
return 0;
}
unsigned int CT_FindCertificateMarkers2(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //002B2A00
if(d[i] == 0x00 and d[i + 1] == 0x2B and d[i + 2] == 0x2A and d[i + 3] == 0x00)
return i;
return 0;
}
unsigned int CT_FindCertificateEndMarkers(BYTE* mem_addr, unsigned int size)
{
for(unsigned int i = 0; i < size; i++)
{
if(mem_addr[i] == 0x00 and mem_addr[i + 1] == 0x00 and mem_addr[i + 2] == 0x00)
return i;
}
return 0;
}
unsigned int CT_FindMagicPattern(BYTE* d, unsigned int size, unsigned int* ebp_sub)
{
for(unsigned int i = 0; i < size; i++) //8813000089
if(d[i] == 0x88 and d[i + 1] == 0x13 and d[i + 2] == 0x00 and d[i + 3] == 0x00 and d[i + 4] == 0x89)
{
unsigned char ebp_sub1 = d[i + 6];
if(ebp_sub1 > 0x7F)
*ebp_sub = 0x100 - ebp_sub1;
else
*ebp_sub = 0 - ebp_sub1;
return i + 7;
}
return 0;
}
unsigned int CT_FindEndInitSymVerifyPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //00010000
if(d[i] == 0x00 and d[i + 1] == 0x01 and d[i + 2] == 0x00 and d[i + 3] == 0x00)
return i;
return 0;
}
unsigned int CT_FindPubMd5MovePattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //8B????????00
if(d[i] == 0x8B and d[i + 5] == 0x00)
return i;
return 0;
}
unsigned int CT_FindDecryptKey1Pattern(BYTE* d, unsigned int size) //C++ function to search bytes
{
for(unsigned int i = 0; i < size; i++) //E9????????6800040000
if(d[i] == 0xE9 and d[i + 5] == 0x68 and d[i + 6] == 0x00 and d[i + 7] == 0x04 and d[i + 8] == 0x00 and d[i + 9] == 0x00)
return i;
return 0;
}
unsigned int CT_FindMagicJumpPattern(BYTE* d, unsigned int size, unsigned short* data)
{
for(unsigned int i = 0; i < size; i++) //3B??74??8B
if(d[i] == 0x3B and d[i + 2] == 0x74 and d[i + 4] == 0x8B)
{
memcpy(data, d + i, 2);
return i;
}
return 0;
}
unsigned int CT_FindECDSAVerify(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //51E8????????83C40CF7D81BC083C0015DC3
if(d[i] == 0x51 and d[i + 1] == 0xE8 and d[i + 6] == 0x83 and d[i + 7] == 0xC4 and d[i + 8] == 0x0C and d[i + 9] == 0xF7 and d[i + 10] == 0xD8 and d[i + 11] == 0x1B and d[i + 12] == 0xC0 and d[i + 13] == 0x83 and d[i + 14] == 0xC0 and d[i + 15] == 0x01 and d[i + 16] == 0x5D and d[i + 17] == 0xC3)
return i;
return 0;
}
unsigned int CT_FindPushFFPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //6AFF
if(d[i] == 0x6A and d[i + 1] == 0xFF)
return i;
return 0;
}
unsigned int CT_FindTeaDecryptPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //E8????????83
if(d[i] == 0xE8 and d[i + 5] == 0x83)
return i;
return 0;
}
unsigned int CT_FindNextDwordPattern(BYTE* d, unsigned int size) //TODO: never used
{
for(unsigned int i = 0; i < size; i++) //558BEC??????????????????????????????045DC3
if(d[i] == 0x55 and d[i + 1] == 0x8B and d[i + 2] == 0xEC and d[i + 18] == 0x04 and d[i + 19] == 0x5D and d[i + 20] == 0xC3)
return i + 20;
return 0;
}
unsigned int CT_FindReturnPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //5DC[2/3]
if(d[i] == 0x5D and (d[i + 1] == 0xC2 or d[i + 1] == 0xC3))
return i + 1;
return 0;
}
unsigned int CT_FindReturnPattern2(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //C3
if(d[i] == 0xC3)
return i;
return 0;
}
unsigned int CT_FindPush100Pattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //6800010000
if(d[i] == 0x68 and d[i + 1] == 0x00 and d[i + 2] == 0x01 and d[i + 3] == 0x00 and d[i + 4] == 0x00)
return i;
return 0;
}
unsigned int CT_FindCall1Pattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //E8????????88
if(d[i] == 0xE8 and d[i + 5] == 0x88)
return i;
return 0;
}
unsigned int CT_FindCall2Pattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //E8
if(d[i] == 0xE8)
return i;
return 0;
}
unsigned int CT_FindAndPattern1(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //83E???03
if(d[i] == 0x83 and (d[i + 1] >> 4) == 0x0E and d[i + 3] == 0x03)
return i + 3;
return 0;
}
unsigned int CT_FindAndPattern2(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //81E?????????03
if(d[i] == 0x81 and (d[i + 1] >> 4) == 0x0E and d[i + 6] == 0x03)
return i + 5;
return 0;
}
unsigned int CT_FindStdcallPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //E8????????83
if(d[i] == 0xE8 and d[i + 5] == 0x83)
return i;
return 0;
}
unsigned int CT_FindVerifySymPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //F7
if(d[i] == 0xF7)
return i;
return 0;
}
unsigned int CT_FindEndLoopPattern(BYTE* d, unsigned int size)
{
for(unsigned int i = 0; i < size; i++) //E9????????8B????89
if(d[i] == 0xE9 and d[i + 5] == 0x8B and d[i + 8] == 0x89)
return i + 5;
return 0;
}
|
d4f73aea866a2cd98fd01f64366eabe8ccea51e9
|
[
"C",
"C++"
] | 6
|
C
|
ZhuHuiBeiShaDiao/akt
|
b7751486e01df2d7249468a563babdb37f827999
|
92f23b507a7cb33c7799bcbf55a85be97b7f3eab
|
refs/heads/master
|
<repo_name>Mina-Nabil/carCatalog<file_sep>/database/seeds/UserSeeder.php
<?php
use Illuminate\Database\Seeder;
use Illuminate\Support\Facades\DB;
class UserSeeder extends Seeder
{
/**
* Run the database seeds.
*
* @return void
*/
public function run()
{
DB::table('dash_users')->insert([
"DASH_USNM" => "mina",
"DASH_FLNM" => "<NAME>",
"DASH_PASS" => bcrypt('<PASSWORD>'),
"DASH_TYPE_ID" => 1,
]);
}
}
<file_sep>/database/migrations/2021_01_20_210728_create_loans_tables.php
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\DB;
use Illuminate\Support\Facades\Schema;
class CreateLoansTables extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('downpayments', function (Blueprint $table) {
$table->id();
$table->decimal("DOWN_VLUE")->unique();
});
DB::table('downpayments')->insert([
['DOWN_VLUE' => "20"], ['DOWN_VLUE' => "30"], ['DOWN_VLUE' => "35"],['DOWN_VLUE' => "40"],['DOWN_VLUE' => "45"], ['DOWN_VLUE' => "50"], ['DOWN_VLUE' => "60"], ['DOWN_VLUE' => "70"],
]);
Schema::create('banks', function (Blueprint $table) {
$table->id();
$table->string("BANK_NAME")->unique();
$table->decimal("BANK_EXPN");
});
Schema::create('insurances', function (Blueprint $table) {
$table->id();
$table->string("INSR_NAME")->unique();
$table->decimal("INSR_VLUE");
});
Schema::create('plans', function (Blueprint $table) {
$table->id();
$table->foreignId("PLAN_DOWN_ID")->constrained("downpayments");
$table->foreignId("PLAN_BANK_ID")->constrained("banks");
$table->string("PLAN_YEAR");
$table->decimal("PLAN_INTR");
$table->tinyInteger('PLAN_INSR');
$table->tinyInteger('PLAN_EMPL');
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::dropIfExists('plans');
Schema::dropIfExists('insurances');
Schema::dropIfExists('banks');
Schema::dropIfExists('downpayments');
}
}
<file_sep>/app/Models/Car.php
<?php
namespace App\Models;
use DateTime;
use Illuminate\Database\Eloquent\Model;
class Car extends Model
{
protected $table = "cars";
public $timestamps = true;
protected $appends = array('image');
protected $image;
protected $casts = [
'CAR_OFFR' => 'datetime:Y-m-d',
'CAR_TRND' => 'datetime:Y-m-d',
'created_at' => 'datetime:d-M-Y H:i',
'updated_at' => 'datetime:d-M-Y H:i',
];
public function getImageAttribute()
{
if (isset($this->image)) return $this->image;
$mainImage = $this->images()->orderByDesc('CIMG_VLUE')->first();
if ($mainImage) {
$this->image = $mainImage->CIMG_URL;
return $mainImage->CIMG_URL;
} else {
$this->image = $this->model->MODL_IMGE ?? null;
}
return $this->image;
}
public function model()
{
return $this->belongsTo('App\Models\CarModel', 'CAR_MODL_ID');
}
public function accessories()
{
return $this->belongsToMany('App\Models\Accessories', "accessories_cars", "ACCR_CAR_ID", "ACCR_ACSR_ID")
->withPivot('ACCR_VLUE');
}
public function getAccessories()
{
return $this->join('accessories_cars', 'cars.id', '=', 'ACCR_CAR_ID')
->join('accessories', 'ACCR_ACSR_ID', '=', 'accessories.id')
->select('ACCR_VLUE', 'ACCR_ACSR_ID', 'ACCR_CAR_ID', 'ACSR_NAME', 'ACSR_ARBC_NAME')
->where('ACCR_CAR_ID', $this->id)
->get();
}
public function images()
{
return $this->hasMany('App\Models\CarImage', 'CIMG_CAR_ID');
}
public function getFullAccessoriesArray()
{
//Accessories table
$allAccessories = Accessories::all();
$carAccessories = $this->getAccessories()->pluck('ACCR_VLUE', 'ACCR_ACSR_ID')->toArray();
$accessories = [];
foreach ($allAccessories as $accessory) {
if (key_exists($accessory->id, $carAccessories)) {
$accessories[$accessory->id] = ['ACSR_ARBC_NAME' => $accessory->ACSR_ARBC_NAME, 'ACSR_NAME' => $accessory->ACSR_NAME, 'isAvailable' => true, 'ACCR_VLUE' => $carAccessories[$accessory->id]];
} else {
$accessories[$accessory->id] = ['ACSR_ARBC_NAME' => $accessory->ACSR_ARBC_NAME, 'ACSR_NAME' => $accessory->ACSR_NAME, 'isAvailable' => false];
}
}
return $accessories;
}
public function toggleOffer()
{
if (isset($this->CAR_OFFR)) {
$this->CAR_OFFR = null;
if ($this->save()) return 0;
} else {
$this->CAR_OFFR = new DateTime();
if ($this->save()) return 1;
}
}
public function toggleTrending()
{
if (isset($this->CAR_TRND)) {
$this->CAR_TRND = null;
if ($this->save()) return 0;
} else {
$this->CAR_TRND = new DateTime();
if ($this->save()) return 1;
}
}
}
<file_sep>/database/migrations/2020_12_03_111216_create_models_table.php
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\Schema;
class CreateModelsTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('models', function (Blueprint $table) {
$table->id();
$table->string('MODL_NAME');
$table->string('MODL_YEAR');
$table->foreignId("MODL_BRND_ID")->constrained("brands");
$table->foreignId("MODL_TYPE_ID")->constrained("types");
$table->string('MODL_ARBC_NAME')->nullable();
$table->text('MODL_OVRV')->nullable();
$table->string('MODL_BRCH')->nullable();
$table->tinyInteger('MODL_MAIN')->default(0);
$table->string('MODL_IMGE')->nullable(); //home page car png 346 * 224
$table->tinyInteger('MODL_ACTV')->default(1);
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::dropIfExists('models');
}
}
<file_sep>/database/seeds/sections.php
<?php
use Illuminate\Database\Seeder;
use Illuminate\Support\Facades\DB;
class sections extends Seeder
{
/**
* Run the database seeds.
*
* @return void
*/
public function run()
{
DB::table('maindata')->delete();
DB::table('home_sections')->delete();
DB::table('home_sections')->insert([
'id' => 1,
'SECT_NAME' => 'Header',
'SECT_ACTV' => 1
]);
DB::table('home_sections')->insert([
'id' => 2,
'SECT_NAME' => 'Landing Image',
'SECT_ACTV' => 1
]);
DB::table('home_sections')->insert([
'id' => 3,
'SECT_NAME' => 'Top Models',
'SECT_ACTV' => 1
]);
DB::table('home_sections')->insert([
'id' => 4,
'SECT_NAME' => 'Top Car Types',
'SECT_ACTV' => 1
]);
DB::table('home_sections')->insert([
'id' => 5,
'SECT_NAME' => 'Logo bar - Partners',
'SECT_ACTV' => 1
]);
DB::table('home_sections')->insert([
'id' => 6,
'SECT_NAME' => 'Showroom stats',
'SECT_ACTV' => 1
]);
DB::table('home_sections')->insert([
'id' => 7,
'SECT_NAME' => 'Offers',
'SECT_ACTV' => 1
]);
DB::table('home_sections')->insert([
'id' => 8,
'SECT_NAME' => 'Trending cars',
'SECT_ACTV' => 1
]);
DB::table('home_sections')->insert([
'id' => 9,
'SECT_NAME' => 'Customers',
'SECT_ACTV' => 1
]);
}
}
<file_sep>/app/Http/Controllers/ModelsController.php
<?php
namespace App\Http\Controllers;
use App\Models\Brand;
use App\Models\Car;
use App\Models\CarModel;
use App\Models\CarType;
use App\Models\ModelImage;
use Illuminate\Http\Request;
use Illuminate\Validation\Rule;
class ModelsController extends Controller
{
protected $data;
protected $homeURL = 'admin/models/show';
protected $profileURL = 'admin/models/profile/';
public function profile($id)
{
$this->initProfileArr($id);
$this->initAddArr($id);
$this->data['formTitle'] = "Edit Model(" . $this->data['model']->MODL_NAME . ")";
$this->data['formURL'] = url("admin/models/update");
$this->data['imageFormURL'] = url("admin/models/add/image");
$this->data['updateImageInfoURL'] = url("admin/models/update/image");
$this->data['delImageUrl'] = url("admin/models/image/delete/");
$this->data['isCancel'] = false;
return view('models.profile', $this->data);
}
public function home()
{
$this->initDataArr();
return view('models.show', $this->data);
}
public function add()
{
$this->initAddArr();
$this->data['formTitle'] = "Add Car Model";
$this->data['formURL'] = "admin/models/insert";
$this->data['isCancel'] = false;
return view('models.add', $this->data);
}
public function insert(Request $request)
{
$request->validate([
"name" => "required",
"brand" => "required|exists:brands,id",
"type" => "required|exists:types,id",
"year" => "required",
"overview" => "required_if:isMain,on",
"image" => "required_if:isMain,on|file",
"background" => "required_if:isMain,on|image",
"pdf" => "required_if:isMain,on|mimes:pdf",
]);
$model = new CarModel();
$model->MODL_BRND_ID = $request->brand;
$model->MODL_TYPE_ID = $request->type;
$model->MODL_NAME = $request->name;
$model->MODL_ARBC_NAME = $request->arbcName;
$model->MODL_BRCH = $request->brochureCode;
$model->MODL_YEAR = $request->year;
$model->MODL_OVRV = $request->overview;
if ($request->hasFile('image')) {
$model->MODL_IMGE = $request->image->store('images/models/' . $model->MODL_NAME, 'public');
}
if ($request->hasFile('background')) {
$model->MODL_BGIM = $request->background->store('images/models/' . $model->MODL_NAME, 'public');
}
if ($request->hasFile('pdf')) {
$model->MODL_PDF = $request->pdf->store('images/models/' . $model->MODL_NAME, 'public');
}
$model->MODL_ACTV = $request->isActive == 'on' ? 1 : 0;
$model->MODL_MAIN = $request->isMain == 'on' ? 1 : 0;
$model->save();
return redirect($this->profileURL . $model->id);
}
public function update(Request $request)
{
$request->validate([
"id" => "required",
]);
$model = CarModel::findOrFail($request->id);
$request->validate([
"name" => "required",
"brand" => "required|exists:brands,id",
"type" => "required|exists:types,id",
"year" => "required",
"overview" => "required_if:isMain,on",
]);
if (is_null($model->MODL_IMGE) || $model->MODL_IMGE=="")
$request->validate([
"image" => "required_if:isMain,on|image",
]);
if (is_null($model->MODL_BGIM) || $model->MODL_BGIM=="")
$request->validate([
"background" => "required_if:isMain,on|image",
]);
if (is_null($model->MODL_PDF) || $model->MODL_PDF=="")
$request->validate([
"pdf" => "required_if:isMain,on|mimes:pdf",
]);
$model->MODL_BRND_ID = $request->brand;
$model->MODL_TYPE_ID = $request->type;
$model->MODL_NAME = $request->name;
$model->MODL_ARBC_NAME = $request->arbcName;
$model->MODL_BRCH = $request->brochureCode;
$model->MODL_YEAR = $request->year;
if ($request->hasFile('image')) {
$model->MODL_IMGE = $request->image->store('images/models/' . $model->MODL_NAME, 'public');
}
if ($request->hasFile('background')) {
$model->MODL_BGIM = $request->background->store('images/models/' . $model->MODL_NAME, 'public');
}
if ($request->hasFile('pdf')) {
$model->MODL_PDF = $request->pdf->store('images/models/' . $model->MODL_NAME, 'public');
}
$model->MODL_ACTV = $request->isActive == 'on' ? 1 : 0;
$model->MODL_MAIN = $request->isMain == 'on' ? 1 : 0;
$model->MODL_OVRV = $request->overview;
$model->save();
return redirect($this->profileURL . $model->id);
}
public function toggleMain($id)
{
$model = CarModel::findOrFail($id);
$model->toggleMain();
return back();
}
public function toggleActive($id)
{
$model = CarModel::findOrFail($id);
$model->toggleActive();
return back();
}
///////////images functions
public function attachImage(Request $request)
{
$request->validate([
"modelID" => "required|exists:models,id",
"photo" => "file",
'value' => 'required',
'color' => 'required',
]);
$model = CarModel::findOrFail($request->modelID);
$newImage = new ModelImage();
if ($request->hasFile('photo')) {
$newImage->MOIM_URL = $request->photo->store('images/models/' . $model->MODL_NAME, 'public');
}
$newImage->MOIM_MODL_ID = $request->modelID;
$newImage->MOIM_SORT = $request->value;
$newImage->MOIM_COLR = $request->color;
$newImage->save();
$newImage->compress();
return back();
}
public function delImage($id)
{
$image = ModelImage::findOrFail($id);
echo $image->deleteImage();
}
public function editImage(Request $request)
{
$request->validate([
"id" => "required",
'value' => 'required',
'color' => 'required',
]);
$image = ModelImage::findOrFail($request->id);
$image->MOIM_SORT = $request->value;
$image->MOIM_COLR = $request->color;
echo $image->save();
}
//////////////////// Data functions
private function initProfileArr($modelID)
{
$this->data['model'] = CarModel::with('cars', 'type', 'brand', 'colorImages')->findOrFail($modelID);
//Model Categories
$this->data['items'] = $this->data['model']->cars;
$this->data['title'] = "Available Categories";
$this->data['subTitle'] = "Check all Available Model categories";
$this->data['cols'] = ['Sort Value', 'Category', 'Price', 'Discount'];
$this->data['atts'] = [
'CAR_VLUE',
['dynamicUrl' => ['att' => 'CAR_CATG', 'val' => 'id', 'baseUrl' => 'admin/cars/profile/']],
['number' => ['att' => 'CAR_PRCE', 'decimals' => 0]],
['number' => ['att' => 'CAR_DISC', 'decimals' => 0]]
];
}
private function initDataArr()
{
$this->data['items'] = CarModel::orderBy('MODL_ACTV')->get();
$this->data['title'] = "Available Models";
$this->data['subTitle'] = "Check all Available Models";
$this->data['cols'] = ['Image', 'Name', 'Arabic', 'Year', 'Active', 'Main', 'Overview'];
$this->data['atts'] = [
['assetImg' => ['att' => 'MODL_IMGE']],
['dynamicUrl' => ['att' => 'MODL_NAME', 'val' => 'id', 'baseUrl' => 'admin/models/profile/']],
['dynamicUrl' => ['att' => 'MODL_ARBC_NAME', 'val' => 'id', 'baseUrl' => 'admin/models/profile/']],
'MODL_YEAR',
[
'toggle' => [
"att" => "MODL_ACTV",
"url" => "admin/models/toggle/active/",
"states" => [
"1" => "Active",
"0" => "Hidden",
],
"actions" => [
"1" => "hide the model",
"0" => "show the model",
],
"classes" => [
"1" => "label-success",
"0" => "label-danger",
],
]
],
[
'toggle' => [
"att" => "MODL_MAIN",
"url" => "admin/models/toggle/main/",
"states" => [
"1" => "True",
"0" => "False",
],
"actions" => [
"1" => "hide the model from home page",
"0" => "show the model on the home page, please make sure the model has an image and an overview",
],
"classes" => [
"1" => "label-info",
"0" => "label-warning",
],
]
],
['comment' => ['att' => 'MODL_OVRV', 'title' => 'Overview']]
];
$this->data['homeURL'] = $this->homeURL;
}
private function initAddArr()
{
$this->data['brands'] = Brand::all();
$this->data['types'] = CarType::all();
return view('models.add', $this->data);
}
}
<file_sep>/app/Models/Brand.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class Brand extends Model
{
protected $table = "brands";
public $timestamps = false;
function models()
{
return $this->hasMany('App\Models\CarModel', 'MODL_BRND_ID');
}
function cars()
{
return $this->hasManyThrough('App\Models\Car', 'App\Models\CarModel', 'MODL_BRND_ID', 'CAR_MODL_ID');
}
function toggle()
{
if ($this->BRND_ACTV == 0) {
if (isset($this->BRND_LOGO) && strlen($this->BRND_LOGO) > 0)
$this->BRND_ACTV = 1;
} else {
$this->BRND_ACTV = 0;
}
$this->save();
}
}
<file_sep>/database/seeds/maindata.php
<?php
use Illuminate\Database\Seeder;
use Illuminate\Support\Facades\DB;
class maindata extends Seeder
{
/**
* Run the database seeds.
*
* @return void
*/
public function run()
{
//type 1 text 2 paragraph 3 image 4 readmore button
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 1 , //landing image
"MAIN_ITEM" => 'Logo',
"MAIN_TYPE" => 3 , // image
"MAIN_HINT" => "Logo size should be 153x43"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 1 , //landing image
"MAIN_ITEM" => 'Default Header',
"MAIN_TYPE" => 3 , // image
"MAIN_HINT" => "Image size should be 1920x250"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Title 1',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Title Shown in home page on the first slide"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Subtitle 1',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Subtitle Text Shown in home page on the first slide under the title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Image 1',
"MAIN_TYPE" => 3 , // string (text)
"MAIN_HINT" => "Header Backgroung Image Shown in home page for the first slide -- 1920*830"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Button 1',
"MAIN_TYPE" => 4 , // string (text)
"MAIN_HINT" => "First read more button -- enter url redirect url"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Title 2',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Title Shown in home page on the second slide"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Subtitle 2',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Subtitle Text Shown in home page on the second slide under the title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Image 2',
"MAIN_TYPE" => 3 , // string (text)
"MAIN_HINT" => "Header Backgroung Image Shown in home page for the second slide -- 1920*830"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Button 2',
"MAIN_TYPE" => 4 , // string (text)
"MAIN_HINT" => "First read more button -- enter url redirect url"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Title 3',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Title Shown in home page on the third slide"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Subtitle 3',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Subtitle Text Shown in home page on the third slide under the title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Image 3',
"MAIN_TYPE" => 3 , // string (text)
"MAIN_HINT" => "Header Backgroung Image Shown in home page for the third slide -- 1920*830"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Button 3',
"MAIN_TYPE" => 4 , // button
"MAIN_HINT" => "First read more button -- enter url redirect url"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Title 4',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Title Shown in home page on the fourth slide"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Subtitle 4',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Subtitle Text Shown in home page on the fourth slide under the title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Image 4',
"MAIN_TYPE" => 3 , // string (text)
"MAIN_HINT" => "Header Backgroung Image Shown in home page for the fourth slide -- 1920*830"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Button 4',
"MAIN_TYPE" => 4 , // string (text)
"MAIN_HINT" => "First read more button -- enter url redirect url"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Title 5',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Title Shown in home page on the fifth slide"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Subtitle 5',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Header Subtitle Text Shown in home page on the fifth slide under the title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Image 5',
"MAIN_TYPE" => 3 , // string (text)
"MAIN_HINT" => "Header Backgroung Image Shown in home page for the fifth slide -- 1920*830"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 2 , //landing image
"MAIN_ITEM" => 'Slide Button 5',
"MAIN_TYPE" => 4 , // string (text)
"MAIN_HINT" => "First read more button -- enter url redirect url"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 3 , //Top Models
"MAIN_ITEM" => 'Top Models Section Title',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Top Models Section Header"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 3 , //Top Models
"MAIN_ITEM" => 'Top Models Section Text',
"MAIN_TYPE" => 2 , // string (text)
"MAIN_HINT" => "Top Models Section Text -- appears under the title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 4 , //Top Car types
"MAIN_ITEM" => 'Top Cars Section Title',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Top Car Types Section Title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 4 , //Top Car types
"MAIN_ITEM" => 'Top Cars Section Text',
"MAIN_TYPE" => 2 , // string (text)
"MAIN_HINT" => "Top Car Types Section Text -- appears under the title "
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 6 , //Show room stats
"MAIN_ITEM" => 'Years In Business - Stats',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Showroom stats section"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 6 , //Show room stats
"MAIN_ITEM" => 'New Cars for Sale - Stats',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Showroom stats section"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 6 , //Show room stats
"MAIN_ITEM" => 'Number of Sold Cars - Stats',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Showroom stats section"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 6 , //Show room stats
"MAIN_ITEM" => 'Number of clients - Stats',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Showroom stats section"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 7 , //Offers
"MAIN_ITEM" => 'Offers Section Title',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Offers section -- section title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 7 , //Offers
"MAIN_ITEM" => 'Offer Section Subtitle',
"MAIN_TYPE" => 2 , // string (text)
"MAIN_HINT" => "Offers section -- section subtitle appears under the title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 8 , //Trending
"MAIN_ITEM" => 'Trending Section Title',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Trending section -- section title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 8 , //Trending
"MAIN_ITEM" => 'Trending Section Subtitle',
"MAIN_TYPE" => 2 , // string (text)
"MAIN_HINT" => "Trending section -- section subtitle appears under the title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 9 , //Customers
"MAIN_ITEM" => 'Customers Section Title',
"MAIN_TYPE" => 1 , // string (text)
"MAIN_HINT" => "Customers section -- section title"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 9 , //Customers
"MAIN_ITEM" => 'Customers Section Subtitle',
"MAIN_TYPE" => 2 , // string (text)
"MAIN_HINT" => "Customers section -- section subtitle appears under the header"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 9 , //Customers
"MAIN_ITEM" => 'Customers Section Background Image',
"MAIN_TYPE" => 3 , // Image
"MAIN_HINT" => "Customers section -- section background image"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 6 , //Show room stats
"MAIN_ITEM" => 'Background Image',
"MAIN_TYPE" => 3 , // Image
"MAIN_HINT" => "Showroom stats dark background -- Size: 1920x400"
]);
}
}
<file_sep>/app/Http/Controllers/SiteController.php
<?php
namespace App\Http\Controllers;
use App\Mail\RequestInfo;
use App\Models\Bank;
use App\Models\ContactUs;
use App\Models\Brand;
use App\Models\Car;
use App\Models\CarModel;
use App\Models\CarType;
use App\Models\Customer;
use App\Models\Downpayment;
use App\Models\Insurance;
use App\Models\Partner;
use App\Models\Plan;
use App\Models\SiteInfo;
use Illuminate\Database\Eloquent\Builder;
use Illuminate\Http\Request;
use Illuminate\Support\Facades\Mail;
class SiteController extends Controller
{
function home(Request $request)
{
$data = self::getDefaultSiteInfo(true, "Home", null, null, true, $request);
$data['mainModels'] = CarModel::join('brands', 'MODL_BRND_ID', '=', 'brands.id')->where('BRND_ACTV', 1)
->select('brands.*', 'models.*')
->where('MODL_ACTV', 1)->where('MODL_MAIN', 1)->orderByDesc('models.id')->limit(2)->get();
$mainModelsCount = count($data['mainModels']);
if ($mainModelsCount == 0) {
$data['mainModels'] = CarModel::join('brands', 'MODL_BRND_ID', '=', 'brands.id')->where('BRND_ACTV', 1)->where('MODL_ACTV', 1)
->select('brands.*', 'models.*')->orderByDesc('models.id')->limit(2)->get();
} elseif ($mainModelsCount == 1) {
$extraModel = CarModel::join('brands', 'MODL_BRND_ID', '=', 'brands.id')->where('BRND_ACTV', 1)
->where('MODL_ACTV', 1)->where('MODL_MAIN', 0)->select('brands.*', 'models.*')->orderByDesc('models.id')
->get()->first();
if ($extraModel)
$data['mainModels']->push($extraModel);
}
if (isset($data['frontendData']['Offers']) && $data['frontendData']['Offers']['Active']) {
$data['offers'] = Car::join('models', 'CAR_MODL_ID', '=', 'models.id')
->join('brands', 'MODL_BRND_ID', '=', 'brands.id')
->select('brands.*', 'models.*', 'cars.*')
->whereNotNull('CAR_OFFR')
->where('BRND_ACTV', 1)->where('MODL_ACTV', 1)
->get();
}
if (isset($data['frontendData']['Trending cars']) && $data['frontendData']['Trending cars']['Active']) {
$data['trends'] = Car::join('models', 'CAR_MODL_ID', '=', 'models.id')
->join('brands', 'MODL_BRND_ID', '=', 'brands.id')
->select('brands.*', 'models.*', 'cars.*')
->whereNotNull('CAR_TRND')
->where('BRND_ACTV', 1)->where('MODL_ACTV', 1)
->get();
}
if (isset($data['frontendData']['Customers']) && $data['frontendData']['Customers']['Active']) {
$data['customers'] = Customer::all();
}
return view('frontend.home', $data);
}
function model(Request $request, $id)
{
$model = CarModel::with('cars', 'type', 'brand', 'colorImages')->findOrFail($id);
$model->id = $id;
$data = self::getDefaultSiteInfo(false, $model->MODL_NAME, $model->MODL_BGIM ? asset('storage/' . $model->MODL_BGIM) : null, $model->brand->BRND_NAME . ' ' . $model->MODL_NAME . ' ' . $model->MODL_YEAR . '\'s Categories', true, $request);
$data['carList'] = $model->cars;
$data['model'] = $model;
return view('frontend.list', $data);
}
function car(Request $request, $id)
{
$car = Car::with('model', 'model.brand', 'model.type')->findOrFail($id);
$data = self::getDefaultSiteInfo(false, $car->model->MODL_NAME . ' ' . $car->CAR_CATG, null, null, false, $request);
$data['similar'] = Car::with('model', 'model.brand', 'model.colorImages')->where("CAR_MODL_ID", $car->model->id)->where("cars.id", "!=", $id)->get();
$data['car'] = $car;
$data['carAccessories'] = $car->getFullAccessoriesArray();
//loan calculator
$data['downpayments'] = Downpayment::whereHas("plans", function (Builder $query) {
$query->where('PLAN_ACTV', '=', 1);
})->orderBy("DOWN_VLUE")->get();
$data['insurances'] = Insurance::all();
//URLs
$data['getCarsURL'] = url('get/cars');
$data['getYearsURL'] = url('get/years');
$data['getPlansURL'] = url('get/plans');
$data['getCarsURL'] = url('get/cars');
$data['printLoanURL'] = url('calculator/print');
return view('frontend.car', $data);
}
function compare(Request $request)
{
$data = self::getDefaultSiteInfo(false, "Compare Cars", null, "Compare up to three different cars", true, $request);
$formInputCount = 0;
if (isset($request->car1) && $request->car1 != 0) {
$data['cars'][$formInputCount] = Car::with('model', 'model.brand', 'model.type')->findOrFail($request->car1);
$data['cars'][$formInputCount]['accessories'] = $data['cars'][$formInputCount]->getFullAccessoriesArray();
$formInputCount++;
}
if (isset($request->car2) && $request->car2 != 0) {
$data['cars'][$formInputCount] = Car::with('model', 'model.brand', 'model.type')->findOrFail($request->car2);
$data['cars'][$formInputCount]['accessories'] = $data['cars'][$formInputCount]->getFullAccessoriesArray();
$formInputCount++;
}
if (isset($request->car3) && $request->car3 != 0) {
$data['cars'][$formInputCount] = Car::with('model', 'model.brand', 'model.type')->findOrFail($request->car3);
$data['cars'][$formInputCount]['accessories'] = $data['cars'][$formInputCount]->getFullAccessoriesArray();
$formInputCount++;
}
if (isset($request->car4) && $request->car4 != 0) {
$data['cars'][$formInputCount] = Car::with('model', 'model.brand', 'model.type')->findOrFail($request->car4);
$data['cars'][$formInputCount]['accessories'] = $data['cars'][$formInputCount]->getFullAccessoriesArray();
$formInputCount++;
}
if ($formInputCount > 1) {
$data['count'] = $formInputCount;
$data['headerWidth'] = (1 / ($formInputCount + 1)) * 100;
$request->session()->remove("compareArr");
return view('frontend.compare', $data);
}
if (count($data['compareArr']) < 2) {
return $this->prepareCompare($request);
}
$i = 0;
foreach ($data['compareArr'] as $carID) {
$data['cars'][$i] = Car::with('model', 'model.brand', 'model.type')->findOrFail($carID);
$data['cars'][$i]['accessories'] = $data['cars'][$i]->getFullAccessoriesArray();
$i++;
}
$data['count'] = $i;
$data['headerWidth'] = (1 / ($i + 1)) * 100;
return view('frontend.compare', $data);
}
function prepareCompare(Request $request)
{
$data = self::getDefaultSiteInfo(false, "Compare Cars", null, "Select up to 4 cars for comparison", true, $request);
$data['getCarsURL'] = url('get/cars');
if (count($data['compareArr']) == 1) {
$data['car1'] = Car::find(array_pop($data['compareArr']));
$data['cars1Model'] = Car::where('CAR_MODL_ID', $data['car1']->CAR_MODL_ID)->get();
}
return view('frontend.preparecompare', $data);
}
function calculator(Request $request)
{
$data = self::getDefaultSiteInfo(false, "Car Loans", null, "Select your car & Calculate Loan Plans", true, $request);
$data['downpayments'] = Downpayment::whereHas("plans", function (Builder $query) {
$query->where('PLAN_ACTV', '=', 1);
})->orderBy("DOWN_VLUE")->get();
$data['insurances'] = Insurance::all();
//URLs
$data['getCarsURL'] = url('get/cars');
$data['getYearsURL'] = url('get/years');
$data['getPlansURL'] = url('get/plans');
$data['getCarsURL'] = url('get/cars');
$data['printLoanURL'] = url('calculator/print');
return view('frontend.calculator', $data);
}
function contactus(Request $request)
{
$data = self::getDefaultSiteInfo(false, "Contact Us", null, "We are looking to hear from you :)", true, $request);
$data['sendMailURL'] = url('send/email');
return view("frontend.contactus", $data);
}
function sendMail(Request $request)
{
$request->validate([
"name" => "required",
"email" => "required|email",
"phone" => "required",
"message" => "required|min:20"
]);
Mail::to("<EMAIL>")->send(new RequestInfo($request->name, $request->email, $request->phone, $request->message));
echo "1";
}
function search(Request $request)
{
$data = self::getDefaultSiteInfo(false, "Find Your Car", null, "Find your search results below", true, $request);
$prices = explode(',', $request->priceRange);
$data['carList'] = self::getSearchResults($request->typeID, $request->brandID, $request->modelID, $request->year, $prices[0] ?? $data['carsMin'], $prices[1] ?? $data['carsMax']);
if ($data['carList']->count() > 0)
return view('frontend.list', $data);
else return view('frontend.nosearch', $data);
}
public static function getDefaultSiteInfo(bool $carouselHeader, string $pageTitle, string $headerImage = null, string $pageSubtitle = null, $isHeader = true, Request $request = null)
{
//make sure everychange here should be reflected on 404 page
$data['carouselHeader'] = $carouselHeader;
$data['headerImage'] = $headerImage;
$data['pageSubtitle'] = $pageSubtitle;
$data['pageTitle'] = $pageTitle;
$data['isHeader'] = $isHeader;
$data['topCars'] = Car::with(["model", "model.brand"])->orderByDesc('CAR_VLUE')->limit(5)->get();
$data['contactUs'] = ContactUs::getContactUs();
$data['frontendData'] = SiteInfo::getSiteInfo();
$data['partners'] = Partner::all();
if ($data['headerImage'] == null) {
$data['headerImage'] = (isset($data['frontendData']['Header']['Default Header']) && strlen($data['frontendData']['Header']['Default Header']) > 0) ? asset('storage/' . $data['frontendData']['Header']['Default Header']) : null;
}
//Search Form
$data['models'] = CarModel::with(["brand"])->join("brands", "MODL_BRND_ID", '=', 'brands.id')
->where('MODL_ACTV', 1)->where('BRND_ACTV', 1)->select("brands.BRND_NAME", 'models.*')->get();
$data['brands'] = Brand::where('BRND_ACTV', 1)->get();
$data['types'] = CarType::with(['cars', 'cars.model'])->get();
$data['years'] = CarModel::getModelYears();
$data['carsMin'] = Car::selectRaw('MIN(CAR_PRCE) as mini')->first()->mini ?? 0;
$data['carsMax'] = Car::selectRaw('MAX(CAR_PRCE) as maxi')->first()->maxi ?? 1000000;
$data['carsShwya'] = 0; //use to adjust price margin
//URLs
$data['searchURL'] = url('search');
$data['compareURL'] = url('compare');
$data['contactusURL'] = url('contactus');
$data['calculateURL'] = url('calculator');
$data['addToCompareURL'] = url('compare/add');
$data['removeFromCompareURL'] = url('compare/remove');
$data['compareArr'] = [];
//compare array
if ($request !== null)
$data['compareArr'] = $request->session()->get('compareArr') ?? [];
return $data;
}
public static function getSearchResults($type, $brand, $model, $year, $priceFrom, $priceTo)
{
$query = Car::join('models', 'CAR_MODL_ID', '=', 'models.id')->join('brands', 'MODL_BRND_ID', '=', 'brands.id')
->join('types', 'MODL_TYPE_ID', '=', 'types.id')
->select('cars.*', 'models.MODL_NAME', 'models.MODL_YEAR', "types.TYPE_NAME", "brands.BRND_NAME");
if ($type && is_numeric($type) && $type > 0) {
CarType::findOrFail($type);
$query = $query->where("MODL_TYPE_ID", $type);
}
if ($brand && is_numeric($brand) && $brand > 0) {
Brand::findOrFail($brand);
$query = $query->where("MODL_BRND_ID", $brand);
}
if ($model && is_numeric($model) && $model > 0) {
CarModel::findOrFail($model);
$query = $query->where("CAR_MODL_ID", $model);
}
if ($year && is_numeric($year) && $year > 2000) {
$query = $query->where("MODL_YEAR", $year);
}
if ($priceFrom && is_numeric($priceFrom)) {
$query = $query->where("CAR_PRCE", ">=", $priceFrom);
}
if ($priceTo && is_numeric($priceTo)) {
$query = $query->where("CAR_PRCE", "<=", $priceTo);
}
return $query->get();
}
public static function printLoan(Request $request)
{
$request->validate([
"carID" => "required",
"planID" => "required",
"loanGuarantee" => "required",
"downID" => "required",
"paid" => "required",
"remaining" => "required",
"years" => "required",
"rate" => "required",
"install" => "required",
"adminFees" => "required",
"insuranceComp" => "required",
"insuranceFees" => "required"
]);
$data['class'] = 'info';
$data['car'] = Car::with('model', 'model.brand')->findOrFail($request->carID);
$data['bank'] = Plan::findOrFail($request->planID)->bank;
$data['loanGuarantee'] = ($request->loanGuarantee == 1) ? "وظيـفه" : "صـاحب عمل";
$down = Downpayment::findOrFail($request->downID);
$data['downPayment'] = "(" . $down->DOWN_VLUE . "%)" . " " . number_format(round($down->DOWN_VLUE * $data['car']->CAR_PRCE / 100, 5)) . " EGP";
$data['remaining'] = $request->remaining;
$data['paid'] = $request->paid;
$data['interestRate'] = $request->rate . "%";
$data['install'] = $request->install;
$data['years'] = $request->years;
$data['adminFees'] = $request->adminFees;
$data['insuranceComp'] = $request->insuranceComp;
$data['insuranceFees'] = $request->insuranceFees;
return view('frontend.printable', $data);
}
}
<file_sep>/app/Models/CarImage.php
<?php
namespace App\Models;
use Exception;
use Illuminate\Database\Eloquent\Model;
class CarImage extends Model
{
protected $table = "cars_images";
public $timestamps = false;
public function car()
{
return $this->belongsTo('App\Models\Car', 'CIMG_CAR_ID');
}
public function deleteImage()
{
try {
unlink(public_path('storage/' . $this->CIMG_URL));
} catch (Exception $e) {
}
$this->delete();
return 1;
}
public function compress()
{
$quality = 40;
$ext = last(explode('.', $this->CIMG_URL));
$fileNoExt = str_replace('.' . $ext, '', $this->CIMG_URL);
$imagePath = public_path('storage/' . $this->CIMG_URL);
$newImagePath = $fileNoExt . '_' . $quality . '.' . $ext;
echo "Extension: " . $ext . "\n";
echo "FileNoExt: " . $fileNoExt . "\n";
echo "Path: " . $imagePath . "\n";
echo "New Path: " . $newImagePath . "\n";
if ($ext == 'png') {
try {
$image = imagecreatefrompng($imagePath);
imagejpeg($image, public_path('storage/' . $newImagePath), $quality);
$this->CIMG_URL = $newImagePath;
$this->save();
unlink($imagePath);
} catch (Exception $e) {
echo "Something went wrong here \n";
echo $e->getMessage();
echo "\n";
}
} else if ($ext == 'jpg' || $ext == 'jpeg') {
$image = self::imagecreatefromjpegexif($imagePath);
try {
imagejpeg($image, public_path('storage/' . $newImagePath), $quality);
$this->CIMG_URL = $newImagePath;
$this->save();
unlink($imagePath);
} catch (Exception $e) {
echo "Something went wrong here \n";
echo $e->getMessage();
echo "\n";
}
}
}
private static function imagecreatefromjpegexif($filename)
{
$img = imagecreatefromjpeg($filename);
$exif = exif_read_data($filename);
echo "size before: ";
echo $exif['FileSize'] . "\n";
if ($img && $exif && isset($exif['Orientation'])) {
$ort = $exif['Orientation'];
if ($ort == 6 || $ort == 5)
$img = imagerotate($img, 270, null);
if ($ort == 3 || $ort == 4)
$img = imagerotate($img, 180, null);
if ($ort == 8 || $ort == 7)
$img = imagerotate($img, 90, null);
if ($ort == 5 || $ort == 4 || $ort == 7)
imageflip($img, IMG_FLIP_HORIZONTAL);
}
return $img;
}
}
<file_sep>/routes/web.php
<?php
use App\Models\Car;
use App\Models\Plan;
use Illuminate\Http\Request;
use Illuminate\Support\Facades\Route;
/*
|--------------------------------------------------------------------------
| Web Routes
|--------------------------------------------------------------------------
|
| Here is where you can register web routes for your application. These
| routes are loaded by the RouteServiceProvider within a group which
| contains the "web" middleware group. Now create something great!
|
*/
/////////Website front end routes
Route::get('/calculator', 'SiteController@calculator');
Route::post('/calculator/print', 'SiteController@printLoan');
////////compare links
Route::get('/compare', 'SiteController@compare');
Route::post('/compare', 'SiteController@compare');
Route::post('/compare/add', function (Request $request) {
$request->validate([
"carID" => "required|exists:cars,id"
]);
$compareArr = $request->session()->get('compareArr') ?? [];
if (!in_array($request->carID, $compareArr)) {
array_push($compareArr, $request->carID);
if (count($compareArr) > 4) {
$compareArr = array_slice($compareArr, 1, 4);
}
$request->session()->put('compareArr', $compareArr);
}
});
Route::post('/compare/remove', function (Request $request) {
$request->validate([
"carID" => "required|exists:cars,id"
]);
$compareArr = $request->session()->get('compareArr') ?? [];
if (in_array($request->carID, $compareArr)) {
if (($key = array_search($request->carID, $compareArr)) !== false) {
unset($compareArr[$key]);
}
$request->session()->put('compareArr', $compareArr);
}
});
Route::post('/get/cars', function (Request $request) {
$request->validate([
"modelID" => "required|exists:models,id"
]);
return json_encode(Car::where('CAR_MODL_ID', $request->modelID)->get());
});
Route::post('get/years', function (Request $request) {
$request->validate([
"downID" => "required|exists:downpayments,id"
]);
return json_encode(Plan::getYearsByDownpayment($request->downID));
});
Route::post('get/plans', function (Request $request) {
$request->validate([
"downID" => "required|exists:downpayments,id",
"year" => "required",
"isEmployed" => "required",
]);
return json_encode(Plan::getPlansByDownpaymentAndYear($request->downID, $request->year, $request->isEmployed));
});
Route::post('/search', 'SiteController@search');
Route::post('/send/email', 'SiteController@sendMail');
//cars urls
Route::get('/car/{id}', 'SiteController@car');
Route::get('/model/{id}', 'SiteController@model');
//main pages
Route::get('/', 'SiteController@home')->name('home');
Route::get('/contactus', 'SiteController@contactus')->name('contactus');
//unauthenticated admin login pages
Route::get('admin/login', 'HomeController@login')->name('login')->middleware('web');
Route::post('admin/login', 'HomeController@authenticate')->name('login')->middleware('web');
<file_sep>/app/Http/Controllers/CarTypesController.php
<?php
namespace App\Http\Controllers;
use App\Models\CarType;
use Illuminate\Http\Request;
use Illuminate\Validation\Rule;
class CarTypesController extends Controller
{
protected $data;
protected $homeURL = 'admin/types/show';
private function initDataArr()
{
$this->data['items'] = CarType::all();
$this->data['title'] = "Available Types";
$this->data['subTitle'] = "Manage all Available Types such as: SUV - Sedan - Hatchback";
$this->data['cols'] = ['Name', 'Arabic', 'Main', 'Edit', 'Delete'];
$this->data['atts'] = [
'TYPE_NAME',
'TYPE_ARBC_NAME',
[
'toggle' => [
"att" => "TYPE_MAIN",
"url" => "admin/types/toggle/",
"states" => [
"1" => "True",
"0" => "False",
],
"actions" => [
"1" => "show the type in the home page",
"0" => "hide the type from the home page",
],
"classes" => [
"1" => "label-success",
"0" => "label-danger",
],
]
],
['edit' => ['url' => 'admin/types/edit/', 'att' => 'id']],
['del' => ['url' => 'admin/types/delete/', 'att' => 'id', 'msg' => 'delete the car type, system will not delete if there is any model linked with the type']],
];
$this->data['homeURL'] = $this->homeURL;
}
public function home()
{
$this->initDataArr();
$this->data['formTitle'] = "Add Type";
$this->data['formURL'] = "admin/types/insert";
$this->data['isCancel'] = false;
return view('settings.types', $this->data);
}
public function edit($id)
{
$this->initDataArr();
$this->data['type'] = CarType::findOrFail($id);
$this->data['formTitle'] = "Edit Type ( " . $this->data['type']->TYPE_NAME . " )";
$this->data['formURL'] = "admin/types/update";
$this->data['isCancel'] = true;
return view('settings.types', $this->data);
}
public function insert(Request $request)
{
$request->validate([
"type" => "required|unique:types,TYPE_NAME",
]);
$type = new CarType();
$type->TYPE_NAME = $request->type;
$type->TYPE_ARBC_NAME = $request->arbcName;
$type->TYPE_MAIN = $request->isActive == 'on' ? 1 : 0;
$type->save();
return redirect($this->homeURL);
}
public function update(Request $request)
{
$request->validate([
"id" => "required",
]);
$type = CarType::findOrFail($request->id);
$request->validate([
"type" => ["required", Rule::unique('types', "TYPE_NAME")->ignore($type->TYPE_NAME, "TYPE_NAME"),],
"id" => "required",
]);
$type->TYPE_NAME = $request->type;
$type->TYPE_ARBC_NAME = $request->arbcName;
$type->TYPE_MAIN = $request->isActive == 'on' ? 1 : 0;
$type->save();
return redirect($this->homeURL);
}
public function toggle($id)
{
$type = CarType::findOrFail($id);
$type->toggle();
return back();
}
public function delete($id){
$brand = CarType::withCount('models')->findOrFail($id);
if($brand->models_count == 0){
$brand->delete();
}
return back();
}
}
<file_sep>/app/Http/Controllers/InfoController.php
<?php
namespace App\Http\Controllers;
use App\Models\Section;
use App\Models\SiteInfo;
use Illuminate\Http\Request;
class InfoController extends Controller
{
protected $homeURL = 'admin/manage/site';
protected $updateURL = 'admin/update/site';
protected $addFieldURL = 'admin/add/field';
protected $deleteFieldURL = 'admin/delete/field/';
protected $toggleSectionURL = 'admin/toggle/section/';
protected $data;
function home()
{
$this->data['siteSections'] = Section::all();
foreach ($this->data['siteSections'] as $section) {
$this->data['maindata'][$section->id] = SiteInfo::where('MAIN_SECT_ID', $section->id)->get();
}
$this->data['formTitle'] = 'Manage Home Page & Site data';
$this->data['formURL'] = url($this->updateURL);
$this->data['addFieldURL'] = url($this->addFieldURL);
$this->data['deleteFieldURL'] = url($this->deleteFieldURL);
$this->data['toggleSectionURL'] = url($this->toggleSectionURL);
return view('meta.siteinfo', $this->data);
}
function deleteField(Request $request)
{
$request->validate([
"id" => 'required|exists:maindata,id'
]);
$siteInfoRow = SiteInfo::find($request->id);
echo $siteInfoRow->delete();
}
function addNew(Request $request)
{
$siteInfoRow = SiteInfo::firstOrNew(['MAIN_ITEM' => $request->field, "MAIN_SECT_ID" => $request->section]);
if (!$siteInfoRow->exists) {
$siteInfoRow->MAIN_TYPE = $request->type;
$siteInfoRow->MAIN_ITEM = $request->field;
$siteInfoRow->MAIN_SECT_ID = $request->section;
echo $siteInfoRow->save();
} else {
echo "-1";
}
}
function toggle($id)
{
$section = Section::findOrFail($id);
echo $section->toggle();
}
function update(Request $request)
{
$request->validate([
'id' => 'required',
]);
$siteInfoRow = SiteInfo::findOrFail($request->id);
if($request->hasFile('content')){
$siteInfoRow->MAIN_CNTN = $request->content->store('images/site/' . $request->id, 'public');
} elseif($request->content == "undefined") {
$siteInfoRow->MAIN_CNTN = NULL;
} else {
$siteInfoRow->MAIN_CNTN = $request->content ?? NULL;
}
echo $siteInfoRow->save();
}
function activateSection($id)
{
$section = Section::findOrFail($id);
echo $section->activate();
}
}
<file_sep>/app/Http/Controllers/PartnersController.php
<?php
namespace App\Http\Controllers;
use App\Models\Partner;
use Exception;
use Illuminate\Http\Request;
use Illuminate\Validation\Rule;
class PartnersController extends Controller
{
protected $data;
protected $homeURL = 'admin/partners/show';
private function initDataArr()
{
$this->data['items'] = Partner::all();
$this->data['title'] = "Available Partners";
$this->data['subTitle'] = "Manage all Available Partners - should appear on the logos footer on the home page";
$this->data['cols'] = ['Image', 'Name', 'Url', 'Edit', 'Delete'];
$this->data['atts'] = [
['assetImg' => ['att' => 'PRTR_IMGE']],
'PRTR_NAME',
['remoteURL' => ['att' => 'PRTR_URL']],
['edit' => ['url' => 'admin/partners/edit/', 'att' => 'id']],
['del' => ['url' => 'admin/partners/delete/', 'att' => 'id', 'msg' => 'delete the partner from the footer bar']],
];
$this->data['homeURL'] = $this->homeURL;
}
public function home()
{
$this->initDataArr();
$this->data['formTitle'] = "Add Partner";
$this->data['formURL'] = "admin/partners/insert";
$this->data['isCancel'] = false;
return view('settings.partners', $this->data);
}
public function edit($id)
{
$this->initDataArr();
$this->data['partner'] = Partner::findOrFail($id);
$this->data['formTitle'] = "Edit Partner ( " . $this->data['partner']->PRTR_NAME . " )";
$this->data['formURL'] = "admin/partners/update";
$this->data['isCancel'] = true;
return view('settings.partners', $this->data);
}
public function insert(Request $request)
{
$request->validate([
"name" => "required|unique:partners,PRTR_NAME",
"image" => "required",
"website" => "required",
]);
$partner = new Partner();
$partner->PRTR_NAME = $request->name;
$partner->PRTR_URL = $request->website;
if ($request->hasFile('image')) {
$partner->PRTR_IMGE = $request->image->store('images/partners/' . $partner->PRTR_NAME, 'public');
}
$partner->save();
return redirect($this->homeURL);
}
public function update(Request $request)
{
$request->validate([
"id" => "required",
]);
$partner = Partner::findOrFail($request->id);
$request->validate([
"name" => ["required", Rule::unique('partners', "PRTR_NAME")->ignore($partner->PRTR_NAME, "PRTR_NAME"),],
"website" => "required",
]);
$partner->PRTR_NAME = $request->name;
$partner->PRTR_URL = $request->website;
if ($request->hasFile('image')) {
$this->deleteOldPartnerPhoto($partner->PRTR_IMGE);
$partner->PRTR_IMGE = $request->image->store('images/partners/' . $partner->PRTR_NAME, 'public');
}
$partner->save();
return redirect($this->homeURL);
}
public function delete($id)
{
$partner = Partner::findOrFail($id);
$partner->delete();
return back();
}
private function deleteOldPartnerPhoto($partnerFilePath)
{
if (isset($partnerFilePath) && $partnerFilePath != '') {
try {
unlink(public_path('storage/' . $partnerFilePath));
} catch (Exception $e) {
}
}
}
}
<file_sep>/app/Models/Downpayment.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class Downpayment extends Model
{
public $timestamps = false;
protected $table = "downpayments";
function bank(){
return $this->belongsToMany("App\Models\Bank", "plans", "PLAN_DOWN_ID", "PLAN_BANK_ID");
}
function plans(){
return $this->hasMany("App\Models\Plan", "PLAN_DOWN_ID");
}
function getBanks($year){
return $this->with("bank")->bank()->wherePivot("PLAN_YEAR", $year)->get("DOWN_VLUE", "PLAN_INTR", "PLAN_INSR");
}
}
<file_sep>/database/migrations/2021_01_31_204736_update_plans_table.php
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\DB;
use Illuminate\Support\Facades\Schema;
class UpdatePlansTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
DB::table('downpayments')->insert([
['DOWN_VLUE' => "25"]
]);
Schema::table('plans', function (Blueprint $table) {
$table->tinyInteger('PLAN_ACTV')->default(1);
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::table('plans', function (Blueprint $table) {
$table->dropColumn('PLAN_ACTV');
});
DB::table('downpayments')->where('DOWN_VLUE' , "25")->delete();
}
}
<file_sep>/app/Http/Controllers/AccessoriesController.php
<?php
namespace App\Http\Controllers;
use App\Models\Accessories;
use Illuminate\Http\Request;
use Illuminate\Validation\Rule;
class AccessoriesController extends Controller
{
protected $data;
protected $homeURL = 'admin/accessories/show';
private function initDataArr()
{
$this->data['items'] = Accessories::all();
$this->data['title'] = "Available Types";
$this->data['subTitle'] = "Manage all Available Accessories/Options such as: Power Steering - ABS - Airbags";
$this->data['cols'] = ['Name', 'Arabic', 'Edit'];
$this->data['atts'] = [
'ACSR_NAME',
'ACSR_ARBC_NAME',
['edit' => ['url' => 'admin/accessories/edit/', 'att' => 'id']],
];
$this->data['homeURL'] = $this->homeURL;
}
public function home()
{
$this->initDataArr();
$this->data['formTitle'] = "Add Type";
$this->data['formURL'] = "admin/accessories/insert";
$this->data['isCancel'] = false;
return view('settings.accessories', $this->data);
}
public function edit($id)
{
$this->initDataArr();
$this->data['accessory'] = Accessories::findOrFail($id);
$this->data['formTitle'] = "Edit Type ( " . $this->data['accessory']->ACSR_NAME . " )";
$this->data['formURL'] = "admin/accessories/update";
$this->data['isCancel'] = true;
return view('settings.accessories', $this->data);
}
public function insert(Request $request)
{
$request->validate([
"name" => "required|unique:accessories,ACSR_NAME",
]);
$accessory = new Accessories();
$accessory->ACSR_NAME = $request->name;
$accessory->ACSR_ARBC_NAME = $request->arbcName;
$accessory->save();
return redirect($this->homeURL);
}
public function update(Request $request)
{
$request->validate([
"id" => "required",
]);
$accessory = Accessories::findOrFail($request->id);
$request->validate([
"name" => ["required", Rule::unique('accessories', "ACSR_NAME")->ignore($accessory->ACSR_NAME, "ACSR_NAME"),],
"id" => "required",
]);
$accessory->ACSR_NAME = $request->name;
$accessory->ACSR_ARBC_NAME = $request->arbcName;
$accessory->save();
return redirect($this->homeURL);
}
}
<file_sep>/database/migrations/2020_12_03_111252_create_accessories_table.php
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\Schema;
class CreateAccessoriesTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('accessories', function (Blueprint $table) {
$table->id();
$table->string('ACSR_NAME')->unique();
$table->string('ACSR_ARBC_NAME')->nullable();
});
Schema::create('accessories_cars', function (Blueprint $table) {
$table->id();
$table->foreignId('ACCR_CAR_ID')->constrained('cars');
$table->foreignId('ACCR_ACSR_ID')->constrained('accessories');
$table->string('ACCR_VLUE')->default(1);
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::dropIfExists('accessories_cars');
Schema::dropIfExists('accessories');
}
}
<file_sep>/app/Models/CarAccessory.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class CarAccessory extends Model
{
protected $table = "accessories_cars";
public $timestamps = false;
function car(){
return $this->belongsTo('App\Models\Car', 'ACCR_CAR_ID');
}
function accessory(){
return $this->belongsTo('App\Models\Accessories', 'ACCR_ACSR_ID');
}
function unlink(){
return $this->delete();
}
function setValue($value){
$this->ACCR_VLUE = $value;
return $this->save();
}
}
<file_sep>/app/Http/Controllers/ContactUsController.php
<?php
namespace App\Http\Controllers;
use App\Models\ContactUs;
use Illuminate\Http\Request;
class ContactUsController extends Controller
{
protected $homeURL = 'admin/manage/contact';
protected $updateURL = 'admin/update/contact';
protected $aboutUs;
function home(){
$this->aboutUs = ContactUs::getContactUs();
$this->aboutUs['formTitle'] = 'Manage "Contact Us" Info';
$this->aboutUs['formURL'] = url($this->updateURL);
return view('meta.contactus', $this->aboutUs);
}
function update(Request $request){
$request->validate([
'item' => 'required',
]);
$aboutUsRow = ContactUs::firstOrNew(['ABUT_ITEM' => $request->item]);
$aboutUsRow->ABUT_CNTN = $request->content ?? NULL;
echo $aboutUsRow->save();
}
}
<file_sep>/app/Http/Controllers/CalculatorController.php
<?php
namespace App\Http\Controllers;
use App\Models\Bank;
use App\Models\Downpayment;
use App\Models\Insurance;
use App\Models\Plan;
use Illuminate\Http\Request;
use Illuminate\Validation\Rule;
class CalculatorController extends Controller
{
public function index()
{
$data = self::getCalculatorData();
return view('meta.calculator', $data);
}
//plans functions
function addPlan(Request $request){
$request->validate([
"downpayment" => "required|exists:downpayments,id",
"bank" => "required|exists:banks,id",
"interest" => "required|numeric",
"years" => "required|numeric"
]);
$plan = new Plan();
$plan->PLAN_BANK_ID = $request->bank;
$plan->PLAN_DOWN_ID = $request->downpayment;
$plan->PLAN_INTR = $request->interest;
$plan->PLAN_INSR = $request->isInsurance ? 1:0 ;
$plan->PLAN_EMPL = $request->isEmployed ? 1:0 ;
$plan->PLAN_YEAR = $request->years;
$plan->save();
return redirect("admin/manage/calculator");
}
function editPlan(Request $request){
$request->validate([
"downpayment" => "required|exists:downpayments,id",
"bank" => "required|exists:banks,id",
"interest" => "required|numeric",
"years" => "required|numeric",
"id" => "required|exists:plans,id",
]);
$plan = Plan::findOrFail($request->id);
$plan->PLAN_BANK_ID = $request->bank;
$plan->PLAN_DOWN_ID = $request->downpayment;
$plan->PLAN_INTR = $request->interest;
$plan->PLAN_INSR = $request->isInsurance ? 1:0 ;
$plan->PLAN_EMPL = $request->isEmployed ? 1:0 ;
$plan->PLAN_YEAR = $request->years;
$plan->save();
return redirect("admin/manage/calculator");
}
function deletePlan($id){
$plan = Plan::findOrFail($id);
$plan->delete();
return redirect('admin/manage/calculator');
}
function togglePlan($id){
$plan = Plan::findOrFail($id);
$plan->toggle();
return back();
}
///banks functions
function addBank(Request $request)
{
$request->validate([
"name" => "required|unique:banks,BANK_NAME",
"expenses" => "required"
]);
$bank = new Bank();
$bank->BANK_NAME = $request->name;
$bank->BANK_EXPN = $request->expenses;
$bank->save();
return $bank->id;
}
function editBank(Request $request)
{
$request->validate([
"id" => "required",
]);
$bank = Bank::findOrFail($request->id);
$request->validate([
"id" => "required",
"name" => ["required", Rule::unique('banks', "BANK_NAME")->ignore($bank->BANK_NAME, "BANK_NAME"),],
"expenses" => "required"
]);
$bank->BANK_NAME = $request->name;
$bank->BANK_EXPN = $request->expenses;
$bank->save();
return $bank->id;
}
function deleteBank(Request $request){
$request->validate([
"id" => "required",
]);
$bank = Bank::findOrFail($request->id);
$bank->deleteAll();
return "1";
}
////insurance functions
function addInsurance(Request $request)
{
$request->validate([
"name" => "required|unique:insurances,INSR_NAME",
"rate" => "required"
]);
$insurance = new Insurance();
$insurance->INSR_NAME = $request->name;
$insurance->INSR_VLUE = $request->rate;
$insurance->save();
return $insurance->id;
}
function editInsurance(Request $request)
{
$request->validate([
"id" => "required",
]);
$insurance = Insurance::findOrFail($request->id);
$request->validate([
"id" => "required",
"name" => ["required", Rule::unique('insurances', "INSR_NAME")->ignore($insurance->INSR_NAME, "INSR_NAME"),],
"rate" => "required"
]);
$insurance->INSR_NAME = $request->name;
$insurance->INSR_VLUE = $request->rate;
$insurance->save();
return $insurance->id;
}
function deleteInsurance(Request $request){
$request->validate([
"id" => "required",
]);
$insurance = Insurance::findOrFail($request->id);
$insurance->delete();
return "1";
}
////////data function
private static function getCalculatorData()
{
$data['banks'] = Bank::all();
$data['insurances'] = Insurance::all();
$data['downpayments'] = Downpayment::orderBy("DOWN_VLUE")->get();
$data['items'] = Plan::with("bank", "downpayment")->get();
$data['title'] = "Available Brands";
$data['subTitle'] = "Check all Available Loan Plans";
$data['cols'] = ['%', 'Years', 'Bank', 'Interest', 'Insurance', 'Employed', 'On?', 'Edit', 'Delete'];
$data['atts'] = [
['foreign' => ['rel' => 'downpayment', 'att' => 'DOWN_VLUE']],
'PLAN_YEAR',
['foreign' => ['rel' => 'bank', 'att' => 'BANK_NAME']],
'PLAN_INTR',
[
'state' => [
"att" => "PLAN_INSR",
"states" => [
"1" => "True",
"0" => "False",
],
"classes" => [
"1" => "label-success",
"0" => "label-danger",
],
"text" => [
"1" => "Required",
"0" => "Not required",
],
]
],
[
'state' => [
"att" => "PLAN_EMPL",
"states" => [
"1" => "True",
"0" => "False",
],
"classes" => [
"1" => "label-success",
"0" => "label-danger",
],
"text" => [
"1" => "Employed",
"0" => "Self-employed",
],
]
],
[
'toggle' => [
"att" => "PLAN_ACTV",
"url" => "admin/plan/toggle/",
"states" => [
"1" => "True",
"0" => "False",
],
"actions" => [
"1" => "disable the plan",
"0" => "activate the plan",
],
"classes" => [
"1" => "label-success",
"0" => "label-danger",
],
]
],
['editJS' => ['func' => 'editPlan', 'att' => 'id']],
['del' => ['url' => 'admin/delete/plan/', 'att' => 'id', 'msg' => 'delete the plan']],
['hidden' => ["id" => 'planYear', "valueAtt" =>"PLAN_YEAR"]],
['hidden' => ["id" => 'planInterest', "valueAtt" =>"PLAN_INTR"]],
['hidden' => ["id" => 'planInsurance', "valueAtt" =>"PLAN_INSR"]],
['hidden' => ["id" => 'planEmployed', "valueAtt" =>"PLAN_EMPL"]],
['hidden' => ["id" => 'planBank', "valueAtt" =>"PLAN_BANK_ID"]],
['hidden' => ["id" => 'planDown', "valueAtt" =>"PLAN_DOWN_ID"]],
];
$data['addBankURL'] = url('admin/add/bank');
$data['editBankURL'] = url('admin/edit/bank');
$data['delBankURL'] = url('admin/delete/bank');
$data['addInsuranceURL'] = url('admin/add/insurance');
$data['editInsuranceURL'] = url('admin/edit/insurance');
$data['delInsuranceURL'] = url('admin/delete/insurance');
$data['addPlanURL'] = url('admin/add/plan');
$data['editPlanURL'] = url('admin/edit/plan');
$data['delPlanURL'] = url('admin/delete/plan');
return $data;
}
}
<file_sep>/app/Models/Customer.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class Customer extends Model
{
public $timestamps = false;
function toggle()
{
if ($this->CUST_ACTV == 0) {
if (isset($this->CUST_IMGE) && strlen($this->CUST_IMGE) > 0 && isset($this->CUST_TTLE) && strlen($this->CUST_TTLE) > 0 &&
isset($this->CUST_TEXT) && strlen($this->CUST_TEXT) > 0 )
$this->CUST_ACTV = 1;
} else {
$this->CUST_ACTV = 0;
}
$this->save();
}
}
<file_sep>/app/Models/CarType.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class CarType extends Model
{
protected $table = "types";
public $timestamps = false;
function models(){
return $this->hasMany('App\Models\CarModel', 'MODL_TYPE_ID');
}
function cars(){
return $this->hasManyThrough('App\Models\Car', 'App\Models\CarModel', 'MODL_TYPE_ID', 'CAR_MODL_ID');
}
function active_cars(){
return $this->hasManyThrough('App\Models\Car', 'App\Models\CarModel', 'MODL_TYPE_ID', 'CAR_MODL_ID')->where('MODL_ACTV', 1);
}
function toggle(){
if($this->TYPE_MAIN == 0) {
$this->TYPE_MAIN = 1;
} else {
$this->TYPE_MAIN = 0;
}
$this->save();
}
}
<file_sep>/database/seeds/CalculatorSectionsSeeder.php
<?php
use Illuminate\Database\Seeder;
use Illuminate\Support\Facades\DB;
class CalculatorSectionsSeeder extends Seeder
{
/**
* Run the database seeds.
*
* @return void
*/
public function run()
{
DB::table('home_sections')->insert([
'id' => 10,
'SECT_NAME' => 'Calculator Page',
'SECT_ACTV' => 1
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 10 , //Calculator
"MAIN_ITEM" => 'Calculator Background Image',
"MAIN_TYPE" => 3 , // Image
"MAIN_HINT" => "Calculator section -- section background image"
]);
DB::table('maindata')->insert([
'MAIN_SECT_ID' => 10 , //Calculator
"MAIN_ITEM" => 'Calculator Car Image',
"MAIN_TYPE" => 3 , // Image
"MAIN_HINT" => "Cut out car image -- preferred size 1243 * 532"
]);
}
}
<file_sep>/app/Models/Section.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class Section extends Model
{
protected $table = "home_sections";
public $timestamps = false;
function items()
{
return $this->hasMany('App\Models\SiteInfo', 'MAIN_SECT_ID');
}
static function setSection(string $secKey, bool $isActive)
{
return self::where('SECT_NAME', $secKey)->update([
"SECT_ACTV", $isActive ? 1 : 0
]);
}
function toggle()
{
if ($this->SECT_ACTV == 0) {
$this->SECT_ACTV = 1;
} else {
$this->SECT_ACTV = 0;
}
return $this->save();
}
}
<file_sep>/routes/admin.php
<?php
//Cars routes
use Illuminate\Http\Request;
use Illuminate\Support\Facades\Route;
Route::get('cars/show', 'CarsController@home');
Route::get('cars/add', 'CarsController@add');
Route::post('cars/images/add', 'CarsController@attachImage');
Route::get('cars/images/del/{id}', 'CarsController@deleteImage');
Route::get('cars/profile/{id}', 'CarsController@profile');
Route::post('cars/update', 'CarsController@update');
Route::post('cars/insert', 'CarsController@insert');
Route::post('cars/toggle/offer', 'CarsController@toggleOffer');
Route::post('cars/toggle/trending', 'CarsController@toggleTrending');
Route::get('cars/unlink/accessory/{carID}/{accessoryID}', 'CarsController@deleteAccessoryLink');
Route::post('cars/link/accessory', 'CarsController@linkAccessory');
Route::post('cars/load/data', 'CarsController@loadData');
Route::post('cars/load/accessories', 'CarsController@loadAccessories');
Route::post('cars/update/image', 'CarsController@editImage');
//Models routes
Route::get('models/show', 'ModelsController@home');
Route::get('models/add', 'ModelsController@add');
Route::get('models/profile/{id}', 'ModelsController@profile');
Route::post('models/update', 'ModelsController@update');
Route::post('models/insert', 'ModelsController@insert');
Route::get('models/toggle/main/{id}', 'ModelsController@toggleMain');
Route::get('models/toggle/active/{id}', 'ModelsController@toggleActive');
Route::post('models/add/image', 'ModelsController@attachImage');
Route::post('models/update/image', 'ModelsController@editImage');
Route::get('models/image/delete/{id}', 'ModelsController@delImage');
//Accessories routes
Route::get('accessories/show', 'AccessoriesController@home');
Route::get('accessories/edit/{id}', 'AccessoriesController@edit');
Route::post('accessories/update', 'AccessoriesController@update');
Route::post('accessories/insert', 'AccessoriesController@insert');
//Types routes
Route::get('types/show', 'CarTypesController@home');
Route::get('types/edit/{id}', 'CarTypesController@edit');
Route::post('types/update', 'CarTypesController@update');
Route::post('types/insert', 'CarTypesController@insert');
Route::get('types/toggle/{id}', 'CarTypesController@toggle');
Route::get('types/delete/{id}', 'CarTypesController@delete');
//Brands routes
Route::get('brands/show', 'BrandsController@home');
Route::get('brands/edit/{id}', 'BrandsController@edit');
Route::post('brands/update', 'BrandsController@update');
Route::post('brands/insert', 'BrandsController@insert');
Route::get('brands/toggle/{id}', 'BrandsController@toggle');
Route::get('brands/delete/{id}', 'BrandsController@delete');
//Partners routes
Route::get('partners/show', 'PartnersController@home');
Route::get('partners/edit/{id}', 'PartnersController@edit');
Route::post('partners/update', 'PartnersController@update');
Route::post('partners/insert', 'PartnersController@insert');
Route::get('partners/toggle/{id}', 'PartnersController@toggle');
//Customers routes
Route::get('customers/show', 'CustomersController@home');
Route::get('customers/edit/{id}', 'CustomersController@edit');
Route::post('customers/update', 'CustomersController@update');
Route::post('customers/insert', 'CustomersController@insert');
Route::get('customers/toggle/{id}', 'CustomersController@toggle');
Route::get('customers/delete/{id}', 'CustomersController@delete');
//Calculator routes
Route::get('manage/calculator', 'CalculatorController@index');
Route::post('add/bank', 'CalculatorController@addBank');
Route::post('edit/bank', 'CalculatorController@editBank');
Route::post('delete/bank', 'CalculatorController@deleteBank');
Route::post('add/insurance', 'CalculatorController@addInsurance');
Route::post('edit/insurance', 'CalculatorController@editInsurance');
Route::post('delete/insurance', 'CalculatorController@deleteInsurance');
Route::post('add/plan', 'CalculatorController@addPlan');
Route::post('edit/plan', 'CalculatorController@editPlan');
Route::get('delete/plan/{id}', 'CalculatorController@deletePlan');
Route::get('plan/toggle/{id}', 'CalculatorController@togglePlan');
//Dashboard users
Route::get("dash/users/all", 'DashUsersController@index');
Route::post("dash/users/insert", 'DashUsersController@insert');
Route::get("dash/users/edit/{id}", 'DashUsersController@edit');
Route::post("dash/users/update", 'DashUsersController@update');
//About Us routes
Route::get("manage/contact", 'ContactUsController@home');
Route::post("update/contact", 'ContactUsController@update');
//Website Section route
Route::get("manage/site", 'InfoController@home');
Route::get("toggle/section/{id}", 'InfoController@toggle');
Route::post("update/site", 'InfoController@update');
Route::post("add/field", 'InfoController@addNew');
Route::post("delete/field/", 'InfoController@deleteField');
Route::get('logout', 'HomeController@logout')->name('logout');
Route::get('/', 'HomeController@admin')->name('admin');
<file_sep>/app/Http/Controllers/CarsController.php
<?php
namespace App\Http\Controllers;
use App\Models\Accessories;
use App\Models\Car;
use App\Models\CarAccessory;
use App\Models\CarImage;
use App\Models\CarModel;
use Illuminate\Http\Request;
class CarsController extends Controller
{
protected $data;
protected $homeURL = "admin/cars/show";
public function home()
{
$this->initDataArr();
return view('cars.show', $this->data);
}
public function profile($id)
{
$this->initProfileArr($id);
$this->data['formTitle'] = "Edit " . $this->data['car']->model->MODL_NAME . ' ' . $this->data['car']->CAR_CATG;
$this->data['formURL'] = url("admin/cars/update");
$this->data['updateImageInfoURL'] = url("admin/cars/update/image");
$this->data['toggleOffer'] = url("admin/cars/toggle/offer");
$this->data['toggleTrending'] = url("admin/cars/toggle/trending");
$this->data['isCancel'] = false;
return view('cars.profile', $this->data);
}
public function insert(Request $request)
{
$request->validate([
"model" => "required|exists:models,id",
"category" => "required",
"price" => "required",
"cc" => "required_if:isActive,on",
"hpwr" => "required_if:isActive,on",
"torq" => "required_if:isActive,on",
"trns" => "required_if:isActive,on",
"speed" => "required_if:isActive,on",
"height" => "required_if:isActive,on",
"rims" => "required_if:isActive,on",
"tank" => "required_if:isActive,on",
"seat" => "required_if:isActive,on",
"dimn" => "required_if:isActive,on",
]);
$car = new Car();
//info
$car->CAR_MODL_ID = $request->model;
$car->CAR_CATG = $request->category;
$car->CAR_PRCE = $request->price;
$car->CAR_DISC = $request->discount ?? 0;
$car->CAR_VLUE = $request->sort ?? 500;
//specs
$car->CAR_ENCC = $request->cc;
$car->CAR_HPWR = $request->hpwr;
$car->CAR_TORQ = $request->torq;
$car->CAR_TRNS = $request->trns;
$car->CAR_ACC = $request->acc;
$car->CAR_TPSP = $request->speed;
$car->CAR_HEIT = $request->height;
$car->CAR_TRNK = $request->tank;
$car->CAR_RIMS = $request->rims;
$car->CAR_SEAT = $request->seat;
$car->CAR_DIMN = $request->dimn;
//overview
$car->CAR_TTL1 = $request->title1;
$car->CAR_PRG1 = $request->prgp1;
$car->CAR_TTL2 = $request->title2;
$car->CAR_PRG2 = $request->prgp2;
$car->save();
return redirect('admin/cars/profile/' . $car->id);
}
public function update(Request $request)
{
$request->validate([
"id" => "required",
"model" => "required|exists:models,id",
"category" => "required",
"price" => "required",
"cc" => "required_if:isActive,on",
"hpwr" => "required_if:isActive,on",
"torq" => "required_if:isActive,on",
"trns" => "required_if:isActive,on",
"speed" => "required_if:isActive,on",
"height" => "required_if:isActive,on",
"rims" => "required_if:isActive,on",
"tank" => "required_if:isActive,on",
"seat" => "required_if:isActive,on",
"dimn" => "required_if:isActive,on",
]);
$car = Car::findOrFail($request->id);
//info
$car->CAR_MODL_ID = $request->model;
$car->CAR_CATG = $request->category;
$car->CAR_PRCE = $request->price;
$car->CAR_DISC = $request->discount ?? 0;
$car->CAR_VLUE = $request->sort ?? 500;
//specs
$car->CAR_ENCC = $request->cc;
$car->CAR_HPWR = $request->hpwr;
$car->CAR_TORQ = $request->torq;
$car->CAR_TRNS = $request->trns;
$car->CAR_ACC = $request->acc;
$car->CAR_TPSP = $request->speed;
$car->CAR_HEIT = $request->height;
$car->CAR_TRNK = $request->tank;
$car->CAR_RIMS = $request->rims;
$car->CAR_SEAT = $request->seat;
$car->CAR_DIMN = $request->dimn;
//overview
$car->CAR_TTL1 = $request->title1;
$car->CAR_PRG1 = $request->prgp1;
$car->CAR_TTL2 = $request->title2;
$car->CAR_PRG2 = $request->prgp2;
$car->save();
return redirect('admin/cars/profile/' . $car->id);
}
public function add()
{
$this->initAddArr();
$this->data['formTitle'] = "New Car Profile";
$this->data['formURL'] = "admin/cars/insert";
$this->data['isCancel'] = false;
return view('cars.add', $this->data);
}
///////////images functions
public function attachImage(Request $request)
{
$request->validate([
"carID" => "required|exists:cars,id",
"photo" => "file",
'value' => 'required'
]);
$car = Car::findOrFail($request->carID);
$newImage = new CarImage();
if ($request->hasFile('photo')) {
$newImage->CIMG_URL = $request->photo->store('images/cars/' . $car->CAR_CATG, 'public');
}
$newImage->CIMG_CAR_ID = $request->carID;
$newImage->CIMG_VLUE = $request->value;
$newImage->save();
$newImage->compress();
return back();
}
public function deleteImage($id)
{
$image = CarImage::findOrFail($id);
echo $image->deleteImage();
}
public function editImage(Request $request){
$request->validate([
"id" => "required",
'value' => 'required',
]);
$image = CarImage::findOrFail($request->id);
$image->CIMG_VLUE = $request->value;
echo $image->save();
}
public function linkAccessory(Request $request)
{
$request->validate([
'carID' => 'required|exists:cars,id',
'accessID' => 'required|exists:accessories,id'
]);
$car = Car::findOrFail($request->carID);
$accessory = Accessories::findOrFail($request->accessID);
$res = $car->accessories()->syncWithoutDetaching([$accessory->id => ['ACCR_VLUE' => ($request->value ?? '')]]);
if (count($res["attached"]) > 0 || count($res["updated"]) > 0)
echo 1;
else
echo 0;
}
public function deleteAccessoryLink($carID, $accessoryId)
{
$car = Car::findOrFail($carID);
$accessory = Accessories::findOrFail($accessoryId);
echo $car->accessories()->detach($accessory);
}
public function loadData(Request $request){
$car = Car::with(["model", "model.brand"])->findOrFail($request->carID);
echo json_encode($car);
return;
}
public function loadAccessories(Request $request){
$request->validate([
"id" => "required",
"carID" => "required"
]);
$otherCar = Car::findOrFail($request->carID);
$car = Car::findOrFail($request->id);
$otherAccessories = $otherCar->getAccessories();
$otherAccessories = $otherAccessories->mapWithKeys(function ($item){
return [$item->ACCR_ACSR_ID =>[ "ACCR_VLUE" => $item->ACCR_VLUE ]];
});
$car->accessories()->sync($otherAccessories->all());
return back();
}
public function toggleTrending(Request $request){
$request->validate([
"carID" => "required"
]);
$car = Car::findOrFail($request->carID);
echo $car->toggleTrending();
}
public function toggleOffer(Request $request){
$request->validate([
"carID" => "required"
]);
$car = Car::findOrFail($request->carID);
echo $car->toggleOffer();
}
//////////////////// Data functions
private function initProfileArr($carID)
{
$this->data['car'] = Car::with('model', 'model.brand', 'model.type', 'accessories', 'images')->findOrFail($carID);
$this->data['cars'] = Car::with('model', 'model.brand')->get();
$this->data['accessories'] = $this->data['car']->getFullAccessoriesArray();
$this->data['unlinkAccessoryURL'] = url('admin/cars/unlink/accessory/');
$this->data['linkAccessoryURL'] = url('admin/cars/link/accessory');
$this->data['loadAccessoriesURL'] = url('admin/cars/load/accessories');
$this->data['loadCarURL'] = url("admin/cars/load/data");
//Images table
$this->data['images'] = $this->data['car']->images;
//edit form
$this->data['models'] = CarModel::with('brand', 'type')->get();
//add photo form
$this->data['imageFormURL'] = url('admin/cars/images/add');
$this->data['delImageUrl'] = url('admin/cars/images/del/');
}
private function initDataArr()
{
$this->data['items'] = Car::with(["model.brand", "model.type"])->orderBy('CAR_VLUE', 'desc')->get();
$this->data['title'] = "Available Cars";
$this->data['subTitle'] = "Check all Available Cars";
$this->data['cols'] = ['Category', 'Model', 'Year', 'Active?'];
$this->data['atts'] = [
['dynamicUrl' => ['att' => 'CAR_CATG', 'val' => 'id', 'baseUrl' => 'admin/cars/profile/']],
['foreignUrl' => ['rel' => 'model', 'att' => 'MODL_NAME', 'baseUrl' => 'admin/models/profile', 'urlAtt' => 'id']],
['foreign' => ['rel' => 'model', 'att' => 'MODL_YEAR']],
[
'state' => [
"att" => "CAR_ACTV",
"text" => [
"1" => "Active",
"0" => "Hidden",
],
"classes" => [
"1" => "label-success",
"0" => "label-danger",
],
]
],
];
$this->data['homeURL'] = $this->homeURL;
}
private function initAddArr()
{
$this->data['models'] = CarModel::with('brand', 'type')->get();
$this->data['cars'] = Car::with('model', 'model.brand')->get();
$this->data['loadCarURL'] = url("admin/cars/load/data");
}
}
<file_sep>/database/migrations/2020_12_03_111222_create_cars_table.php
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\Schema;
class CreateCarsTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('cars', function (Blueprint $table) {
$table->id();
$table->foreignId('CAR_MODL_ID')->constrained('models');
$table->string('CAR_CATG');
$table->integer('CAR_PRCE');
$table->integer('CAR_DISC')->default(0);
$table->integer('CAR_VLUE')->default(500); //the higher the better
$table->tinyInteger('CAR_ACTV')->default(0);
//Car specs -- all nullable as they can add car before publish
$table->string('CAR_HPWR')->nullable();
$table->integer('CAR_SEAT')->nullable();
$table->string('CAR_ACC')->nullable(); //0-100 acceleration
$table->string('CAR_ENCC')->nullable();
$table->string('CAR_TORQ')->nullable();
$table->string('CAR_TRNS')->nullable(); //transmission type
$table->integer('CAR_TPSP')->nullable(); //top speed
$table->integer('CAR_HEIT')->nullable();
$table->integer('CAR_RIMS')->nullable(); //wheel raduis
$table->integer('CAR_TRNK')->nullable(); //fuel trunk capacity
//Car marketing info
$table->string('CAR_TTL1')->nullable();
$table->text('CAR_PRG1')->nullable();
$table->string('CAR_TTL2')->nullable();
$table->text('CAR_PRG2')->nullable();
//Car Offer
$table->dateTime('CAR_OFFR')->nullable(); //is offer on car
$table->dateTime('CAR_TRND')->nullable(); //is trending car
$table->timestamps();
$table->softDeletes();
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::dropIfExists('cars');
}
}
<file_sep>/app/Http/Controllers/BrandsController.php
<?php
namespace App\Http\Controllers;
use App\Models\Brand;
use Exception;
use Illuminate\Http\Request;
use Illuminate\Validation\Rule;
class BrandsController extends Controller
{
protected $data;
protected $homeURL = 'admin/brands/show';
private function initDataArr()
{
$this->data['items'] = Brand::all();
$this->data['title'] = "Available Brands";
$this->data['subTitle'] = "Manage all Available Brands that should appear on this website such as Peugeot";
$this->data['cols'] = ['Logo', 'Name', 'Arabic', 'Active', 'Edit', 'Delete'];
$this->data['atts'] = [
['assetImg' => ['att' => 'BRND_LOGO']],
'BRND_NAME',
'BRND_ARBC_NAME',
[
'toggle' => [
"att" => "BRND_ACTV",
"url" => "admin/brands/toggle/",
"states" => [
"1" => "True",
"0" => "False",
],
"actions" => [
"1" => "disable the brand",
"0" => "activate the brand, please make sure a logo is attached",
],
"classes" => [
"1" => "label-success",
"0" => "label-danger",
],
]
],
['edit' => ['url' => 'admin/brands/edit/', 'att' => 'id']],
['del' => ['url' => 'admin/brands/delete/', 'att' => 'id', 'msg' => 'delete the brand, system will not delete if there is any model linked with the brand']],
];
$this->data['homeURL'] = $this->homeURL;
}
public function home()
{
$this->initDataArr();
$this->data['formTitle'] = "Add Brand";
$this->data['formURL'] = "admin/brands/insert";
$this->data['isCancel'] = false;
return view('settings.brands', $this->data);
}
public function edit($id)
{
$this->initDataArr();
$this->data['brand'] = Brand::findOrFail($id);
$this->data['formTitle'] = "Edit Brand ( " . $this->data['brand']->BRND_NAME . " )";
$this->data['formURL'] = "admin/brands/update";
$this->data['isCancel'] = true;
return view('settings.brands', $this->data);
}
public function insert(Request $request)
{
$request->validate([
"name" => "required|unique:brands,BRND_NAME",
"logo" => "required_if:isActive,on"
]);
$brand = new Brand();
$brand->BRND_NAME = $request->name;
$brand->BRND_ARBC_NAME = $request->arbcName;
if ($request->hasFile('logo')) {
$brand->BRND_LOGO = $request->logo->store('images/brands/' . $brand->BRND_NAME, 'public');
}
$brand->BRND_ACTV = $request->isActive == 'on' ? 1 : 0;
$brand->save();
return redirect($this->homeURL);
}
public function update(Request $request)
{
$request->validate([
"id" => "required",
]);
$brand = Brand::findOrFail($request->id);
$request->validate([
"name" => ["required", Rule::unique('brands', "BRND_NAME")->ignore($brand->BRND_NAME, "BRND_NAME"),],
"id" => "required",
"logo" => "required_if:isActive,on"
]);
$brand->BRND_NAME = $request->name;
$brand->BRND_ARBC_NAME = $request->arbcName;
if ($request->hasFile('logo')) {
$this->deleteOldBrandPhoto($brand->BRND_LOGO);
$brand->BRND_LOGO = $request->logo->store('images/brands/' . $brand->BRND_NAME, 'public');
}
$brand->BRND_ACTV = $request->isActive == 'on' ? 1 : 0;
$brand->save();
return redirect($this->homeURL);
}
public function toggle($id)
{
$brand = Brand::findOrFail($id);
$brand->toggle();
return back();
}
public function delete($id){
$brand = Brand::withCount('models')->findOrFail($id);
if($brand->models_count == 0){
$brand->delete();
}
return back();
}
private function deleteOldBrandPhoto($brandFilePath)
{
if (isset($brandFilePath) && $brandFilePath != '') {
try {
unlink(public_path('storage/' . $brandFilePath));
} catch (Exception $e) {
}
}
}
}
<file_sep>/app/Models/Plan.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class Plan extends Model
{
public $timestamps = false;
protected $table = "plans";
function bank()
{
return $this->belongsTo("App\Models\Bank", "PLAN_BANK_ID");
}
function downpayment()
{
return $this->belongsTo("App\Models\Downpayment", "PLAN_DOWN_ID");
}
function toggle(){
$this->PLAN_ACTV = (1 + $this->PLAN_ACTV) % 2;
$this->save();
}
static function getYearsByDownpayment($downpaymentID)
{
return self::where("PLAN_DOWN_ID", $downpaymentID)->where("PLAN_ACTV", 1)->selectRaw("DISTINCT PLAN_YEAR")->get();
}
static function getPlansByDownpaymentAndYear($downpaymentID, $year, $isEmployed)
{
return self::join('banks', 'banks.id', '=', 'PLAN_BANK_ID')->where("PLAN_YEAR", $year)->where("PLAN_EMPL", $isEmployed)
->where("PLAN_DOWN_ID", $downpaymentID)->where("PLAN_ACTV", 1)->select("plans.id", "BANK_NAME", "PLAN_INTR", "PLAN_INSR", "BANK_EXPN")->get();
}
}
<file_sep>/app/Models/CarModel.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class CarModel extends Model
{
protected $table = "models";
public $timestamps = false;
public function brand(){
return $this->belongsTo('App\Models\Brand', 'MODL_BRND_ID');
}
public function type(){
return $this->belongsTo('App\Models\CarType', 'MODL_TYPE_ID');
}
public function cars(){
return $this->hasMany('App\Models\Car', 'CAR_MODL_ID');
}
public function colorImages(){
return $this->hasMany('App\Models\ModelImage', 'MOIM_MODL_ID');
}
function toggleMain(){
if($this->MODL_MAIN == 0) {
if(isset($this->MODL_IMGE) && strlen($this->MODL_IMGE)>0 && isset($this->MODL_OVRV) && strlen($this->MODL_OVRV)>0)
$this->MODL_MAIN = 1;
} else {
$this->MODL_MAIN = 0;
}
$this->save();
}
function toggleActive(){
if($this->MODL_ACTV == 0) {
$this->MODL_ACTV = 1;
} else {
$this->MODL_ACTV = 0;
}
$this->save();
}
static function getModelYears(){
return self::selectRaw('DISTINCT MODL_YEAR')->join('brands', 'brands.id', '=', 'MODL_BRND_ID')
->where('MODL_ACTV', 1)->where('BRND_ACTV', 1)->get()->pluck('MODL_YEAR');
}
}
<file_sep>/app/Models/SiteInfo.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
use Illuminate\Support\Facades\DB;
class SiteInfo extends Model
{
protected $table = "maindata";
public $timestamps = false;
public $fillable = ['MAIN_ITEM', 'MAIN_CNTN'];
static public function getSiteInfo()
{
$infoArray = DB::table('maindata')->rightJoin('home_sections', 'MAIN_SECT_ID', '=', 'home_sections.id')
->select('maindata.id', 'MAIN_ITEM', 'MAIN_CNTN', 'MAIN_SECT_ID', 'SECT_NAME', 'SECT_ACTV')
->where('SECT_ACTV', 1)
->get();
$infoMap = [];
foreach ($infoArray as $row) {
$infoMap[$row->SECT_NAME][$row->MAIN_ITEM] = $row->MAIN_CNTN;
$infoMap[$row->SECT_NAME]['Active'] = $row->SECT_ACTV;
}
return $infoMap;
}
static public function setSiteInfo($item, $content)
{
return DB::table('maindata')->where('MAIN_ITEM', $item)->update([
"MAIN_CNTN" => $content
]);
}
}
<file_sep>/app/Models/Bank.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class Bank extends Model
{
public $timestamps = false;
protected $table = "banks";
function plans(){
return $this->hasMany('App\Models\Plan', "PLAN_BANK_ID");
}
function deleteAll(){
$this->plans()->delete();
$this->delete();
}
}
<file_sep>/app/Models/ContactUs.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class ContactUs extends Model
{
protected $table = "about";
public $timestamps = false;
protected $fillable = ['ABUT_ITEM', 'ABUT_CNTN'];
static public function getContactUs(){
$dbTable = self::all();
$mappedArray = $dbTable->mapWithKeys(function ($row){
return [$row['ABUT_ITEM'] => $row['ABUT_CNTN']];
});
return $mappedArray->toArray();
}
}
<file_sep>/database/migrations/2021_01_12_155437_update_models_table.php
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\Schema;
class UpdateModelsTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::table('models', function (Blueprint $table){
$table->string("MODL_PDF")->nullable();
$table->string("MODL_BGIM")->nullable();
});
Schema::create('model_images', function(Blueprint $table){
$table->id();
$table->foreignId("MOIM_MODL_ID")->constrained('models');
$table->string("MOIM_URL");
$table->integer("MOIM_SORT");
$table->string("MOIM_COLR");
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::table('models', function (Blueprint $table){
$table->dropColumn("MODL_PDF");
$table->dropColumn("MODL_BGIM");
});
Schema::dropIfExists("model_images");
}
}
|
e65cf3792994998b63f338b693551af4ddf3b99b
|
[
"PHP"
] | 35
|
PHP
|
Mina-Nabil/carCatalog
|
8dcd7b99c703ec2005a474f641a9a775da8b8cb5
|
0c30e8526037a84f482ed4f88711ef953c75ddc3
|
refs/heads/master
|
<repo_name>iDimov/layout_webpack<file_sep>/source/index.js
import './sass/main.sass';
import './css/ext.css';
console.log('look at me');<file_sep>/README.md
# for layout with webpack
pug + sass + css + webpack + browsersync
## How to use the files?
1. Clone this repo
2. Run `npm install` to install all the dependencies
3. Run `yarn run start` for development mode or:
```javascript
"scripts": {
"start": "webpack-dev-server --env development",
"build": "rm -rf build && webpack --env production && webpack -w",
"webpack": "webpack -w",
"serv": "static build"
}
```
<file_sep>/build/js/index.js
webpackJsonp([0],[
/* 0 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
Object.defineProperty(__webpack_exports__, "__esModule", { value: true });
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__sass_main_sass__ = __webpack_require__(1);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__sass_main_sass___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0__sass_main_sass__);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__css_ext_css__ = __webpack_require__(2);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__css_ext_css___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1__css_ext_css__);
console.log('look at me');
/***/ }),
/* 1 */
/***/ (function(module, exports) {
// removed by extract-text-webpack-plugin
/***/ }),
/* 2 */
/***/ (function(module, exports) {
// removed by extract-text-webpack-plugin
/***/ })
],[0]);
|
1af48f8e8fd4dedd827a233c4cbd3b06dbc3dba6
|
[
"JavaScript",
"Markdown"
] | 3
|
JavaScript
|
iDimov/layout_webpack
|
a1f4d817333242e4f6819fd84899517c45201832
|
cb4879dc6705fae3cdb3027534f0b25c5fc413ea
|
refs/heads/master
|
<repo_name>utec-cs1103-2019-01/agregacion-y-herencia-fresh-avocado<file_sep>/Agregacion-y-Herencia/Volumen.cpp
//
// Created by <NAME> on 2019-05-03.
//
#include "Libro.h"
#include "Volumen.h"
#include "Revista.h"
Volumen::~Volumen() {
}
void Volumen::mostrar() {
auto var = dynamic_cast<Libro*>(this);
if (var != nullptr) {
var->mostrar();
} else {
auto aux = dynamic_cast<Revista*>(this);
aux->mostrar();
}
}
<file_sep>/Agregacion-y-Herencia/Biblioteca.cpp
//
// Created by <NAME> on 2019-05-03.
//
#include <vector>
#include "Biblioteca.h"
using namespace std;
Biblioteca::~Biblioteca() {
for (int i = 0; i < volumenes.size(); ++i) {
delete volumenes[i];
}
}
void Biblioteca::mostrar_biblioteca() {
for (int i = 0; i < volumenes.size(); ++i) {
cout << "Volumen #" << i+1 << " | ";
volumenes[i]->mostrar();
}
}
void Biblioteca::incluir(Volumen* v) {
volumenes.push_back(v);
}
<file_sep>/Agregacion-y-Herencia/Revista.h
//
// Created by <NAME> on 2019-05-03.
//
#ifndef AGREGACION_Y_HERENCIA_REVISTA_H
#define AGREGACION_Y_HERENCIA_REVISTA_H
#include "Volumen.h"
class Revista: public Volumen {
int id_revista;
public:
Revista(string nombre, int numero_de_volumen, int id_revista): Volumen(nombre, numero_de_volumen), id_revista{id_revista} {}
void mostrar() override;
};
#endif //AGREGACION_Y_HERENCIA_REVISTA_H
<file_sep>/Agregacion-y-Herencia/Revista.cpp
//
// Created by <NAME> on 2019-05-03.
//
#include <iostream>
#include "Revista.h"
using namespace std;
void Revista::mostrar() {
cout << "Revista #" << id_revista << " | " << "Título: " << nombre << endl;
}<file_sep>/Agregacion-y-Herencia/Libro.h
//
// Created by <NAME> on 2019-05-03.
//
#ifndef AGREGACION_Y_HERENCIA_LIBRO_H
#define AGREGACION_Y_HERENCIA_LIBRO_H
#include <string>
#include "Volumen.h"
using namespace std;
class Libro: public Volumen {
int id_libro;
public:
Libro(string nombre, int numero_de_volumen, int id_libro): Volumen(nombre, numero_de_volumen), id_libro{id_libro} {}
void mostrar() override;
};
#endif //AGREGACION_Y_HERENCIA_LIBRO_H
<file_sep>/Agregacion-y-Herencia/CMakeLists.txt
cmake_minimum_required(VERSION 3.13)
project(Agregacion_y_Herencia)
set(CMAKE_CXX_STANDARD 17)
add_executable(Agregacion_y_Herencia main.cpp Biblioteca.h Biblioteca.cpp Volumen.h Volumen.cpp Libro.h Libro.cpp Revista.h Revista.cpp catch.hpp test.cpp)<file_sep>/Agregacion-y-Herencia/Libro.cpp
//
// Created by <NAME> on 2019-05-03.
//
#include <iostream>
#include "Libro.h"
void Libro::mostrar() {
cout << "Libro #" << id_libro << " | " << "Título: " << nombre << endl;
}<file_sep>/Agregacion-y-Herencia/test.cpp
//
// Created by <NAME> on 2019-05-03.
//
#include "catch.hpp"
#include "Biblioteca.h"
#include "Libro.h"
#include "Revista.h"
#include "Volumen.h"
SCENARIO("Agregar a la biblioteca.") {
GIVEN("Un libro y una revista.") {
WHEN("Agregamos un libro y una revista a la bibliteca.") {
THEN("Comprobar que se han agregado ambos elementos.") {
Biblioteca biblioteca;
Volumen* v1 = new Libro("100 años de soledad", 1, 1);
Volumen* v2 = new Revista("Time", 2, 1);
biblioteca.incluir(v1);
biblioteca.incluir(v2);
REQUIRE(biblioteca.get_numero_de_volumenes() == 2);
}
}
}
}
<file_sep>/Agregacion-y-Herencia/Biblioteca.h
//
// Created by <NAME> on 2019-05-03.
//
#ifndef AGREGACION_Y_HERENCIA_BIBLIOTECA_H
#define AGREGACION_Y_HERENCIA_BIBLIOTECA_H
#include "Volumen.h"
#include <iostream>
#include <vector>
using namespace std;
class Biblioteca {
vector<Volumen*> volumenes = {};
int max_volumenes = 0, max_revistas = 0;
public:
Biblioteca() {}
~Biblioteca();
void mostrar_biblioteca();
void incluir(Volumen*);
int get_numero_de_volumenes() { return volumenes.size(); }
};
#endif //AGREGACION_Y_HERENCIA_BIBLIOTECA_H
<file_sep>/Agregacion-y-Herencia/Volumen.h
//
// Created by <NAME> on 2019-05-03.
//
#ifndef AGREGACION_Y_HERENCIA_VOLUMEN_H
#define AGREGACION_Y_HERENCIA_VOLUMEN_H
#include <string>
using namespace std;
class Volumen {
protected:
string nombre;
int numero_de_volumen;
public:
Volumen(string nombre, int numero_de_volumen): nombre{nombre}, numero_de_volumen{numero_de_volumen} {}
~Volumen();
virtual void mostrar();
};
// Volumen* v = new Libro("L");
// auto var = dynamic_cast<Libro*>(v);
// if (var != nullptr) {
// v es un libro;
// }
#endif //AGREGACION_Y_HERENCIA_VOLUMEN_H
|
a2e98051545f4ceb5f2863b087854ff745c9b30e
|
[
"CMake",
"C++"
] | 10
|
C++
|
utec-cs1103-2019-01/agregacion-y-herencia-fresh-avocado
|
99c5699d572d819e3a544bd7ebd7cd34c34f6359
|
a209d1bc449f54f9e55481385ead33ff2ee29807
|
refs/heads/master
|
<repo_name>atacraft/mon_site<file_sep>/app/controllers/app_controller.rb
class AppController < ApplicationController
def login
if params[:user]
log = params[:user][:login]
pass = <PASSWORD>_<PASSWORD>(params[:user][:password])
user = User.where(login: log, password: pass).first
if user
session[:user_id] = user.id
session[:user_login] = user.login
redirect_to(root_path, notice: "Successfully logged in.")
else
redirect_to(login_path, notice: "Wrong login or password.")
end
else
#redirection incorrecte
#redirect_to(login_path, notice: "debug: params empty, remove the mess after!")
end
end
def logout
session[:user_id] = nil
session[:user_login] = nil
redirect_to(root_path, notice: "Successfully logged out.")
end
end
<file_sep>/app/models/comment.rb
class Comment < ActiveRecord::Base
belongs_to :post
belongs_to :user
attr_accessible :opinion, :post_id, :user_id
validates :post_id, presence: true
validates :user_id, presence: true
validates :opinion, presence: true
end
<file_sep>/app/controllers/application_controller.rb
class ApplicationController < ActionController::Base
protect_from_forgery
helper_method :connected, :crypter_pass, :current_user, :redirection
def redirection
if !connected
redirect_to(root_path, notice: "You must log in to access this area.")
end
end
def crypter_pass(password)
return Digest::MD5.hexdigest("#{password}")
end
def connected
if session[:user_id]
true
else
false
end
end
end
<file_sep>/app/models/user.rb
class User < ActiveRecord::Base
attr_accessible :email, :first_name, :lastname, :login, :password, :password_confirmation
validates :email, uniqueness: true, presence: true
validates :login, uniqueness: true, presence: true
validates :password, presence: true, confirmation: true
validates_confirmation_of :password
end
|
078a0ba0724a5511a6b05504d857b7f7e9d846d2
|
[
"Ruby"
] | 4
|
Ruby
|
atacraft/mon_site
|
d67217329a27259a6c3b1134006d089c2fc417c2
|
3ccd16e7414d1f7915f212decc803c0b9c538bb0
|
refs/heads/master
|
<repo_name>bdlindsay/cs4760assignment3<file_sep>/monitor.h
#ifndef MONITOR_H
#define MONITOR_H
#include "condition.h"
#include <time.h>
// monitor.h
// <NAME>
// cs4760 assignment3
void enter_monitor(int proc_num, int shm_id, void (*enter_cs)(int));
#endif
<file_sep>/condition.c
#include "condition.h"
// condition.c
// <NAME>
// cs4760 assignment3
void wait_cond(cond_t *cond) {
cond->num_waiting_procs++; // # processes waiting on this condition
if(cond->next_count > 0) { // is someone waiting inside monitor
signal_sem(cond->sem_id,1); // next.signal() - wake it up
} else {
signal_sem(cond->sem_id,0); // mutex.signal() - No, free mutex so others enter
}
wait_sem(cond->sem.val,0); // sem.wait() - start waiting for condition
cond->num_waiting_procs--; // wait over. decrement variable
}
void signal_cond(cond_t *cond) {
if (cond->num_waiting_procs <= 0) // do nothing if no one waiting
return;
cond->next_count++; // # of ready proccesses inside monitor
signal_sem(cond->sem.val,0); // sem.signal()
wait_sem(cond->sem_id,1); // next.wait() - you wait. let signaled process run
cond->next_count--; // one less process in monitor
}
// call for each cond_t with the same key_sem & key_shm
cond_t* initcondition() {
int shm_id;
cond_t *cond;
// get and attach shared memory for cond_t *cond
shm_id = shmget(IPC_PRIVATE, sizeof(cond_t*),IPC_CREAT | 0755);
cond = (cond_t*) shmat(shm_id,0,0);
cond->shm_id = shm_id;
// init int vars
cond->num_waiting_procs = 0;
cond->next_count = 0;
// allocate mutex and next
if ((cond->sem_id = semget(IPC_PRIVATE,2, IPC_CREAT | 0755)) == -1) {
perror("semget");
raise(SIGINT);
}
// init mutex to 1
if((initelement(cond->sem_id,0,1)) != 0) {
perror("semctl:initelement");
raise(SIGINT);
}
// init next to 0
if ((initelement(cond->sem_id,1,0)) != 0) {
perror("semctl:initelemnt");
raise(SIGINT);
}
// allocate sem_t sem
if ((cond->sem.val = semget(IPC_PRIVATE,1,IPC_CREAT | 0755)) == -1) {
perror("semget");
raise(SIGINT);
}
// init sem to 1
if ((initelement(cond->sem.val,0,1)) != 0) {
perror("semctl:initelement");
raise(SIGINT);
}
return cond;
}
// cleanup condition allocations
void cleanupcond(cond_t *cond) {
int i;
int id;
// removes set, ignores semnum arg
if((i = semctl(cond->sem_id,0,IPC_RMID)) != 0) {
perror("semctl:IPC_RMID");
}
// remove sem_t sem - semaphore set
if((i = semctl(cond->sem.val,0,IPC_RMID)) != 0) {
perror("semctl:IPC_RMID");
}
// save id of shared cond_t *cond
id = cond->shm_id;
// detatch cond_t *cond
if((i = shmdt(cond)) == -1) {
perror("shmdt");
}
// remove shared cond_t *cond
if ((shmctl(id,IPC_RMID,NULL)) == -1) {
perror("shmctl:IPC_RMID");
}
}
<file_sep>/condition.h
#ifndef CONDITION_H
#define CONDITION_H
#include "semaphore.h"
#include <sys/shm.h>
#include <signal.h>
// condition.h
// <NAME>
// cs4760 assignment3
typedef struct condition {
int num_waiting_procs;
sem_t sem; // sem.val holds the sem_id of the semaphore sem
int next_count;
int sem_id; // sem_id for next and mutex
//sem_t next; // shared semaphore index 0
//sem_t mutex; // shared semaphore index 1
int shm_id;
} cond_t;
cond_t* initcondition();
void cleanupcond(cond_t *cond);
void wait_cond(cond_t *cond);
void signal_cond(cond_t *cond);
#endif
<file_sep>/monitor.c
#include "monitor.h"
// monitor.c
// <NAME>
// cs4760 assignment3
// monitor takes a function pointer (function w/ int as parameter)
// and an int to run with the function pointer
void enter_monitor(int proc_num, int shm_id, void (*enter_cs)(int)) {
time_t tcurrent;
struct tm *timeinfo;
cond_t *cond;
cond = shmat(shm_id,0,0);
wait_sem(cond->sem_id,0); // mutex.wait() - wait for free critical section
wait_cond(cond);
time(&tcurrent);
timeinfo = localtime(&tcurrent);
fprintf(stderr, "Process %d entering critical section at %d:%02d:%02d\n",
proc_num,timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec);
// execute the function sent to the monitor
(*enter_cs)(proc_num);
time(&tcurrent);
timeinfo = localtime(&tcurrent);
fprintf(stderr, "Process %d exiting critical section at %d:%02d:%02d\n",
proc_num,timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec);
if (cond->next_count > 0) {
signal_sem(cond->sem_id,1); // next.signal();
signal_cond(cond);
} else {
signal_cond(cond);
}
signal_sem(cond->sem_id,0); // done with critical section
shmdt(cond);
}
<file_sep>/master.h
#ifndef MASTER_H
#define MASTER_H
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/sem.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include "monitor.h"
// master.h
// <NAME>
// cs4760 assignment3
typedef enum {false, true} bool;
void critical_section(int i);
const int p_n = 19; // process number to send each process
const int n = 18; // its respective place in the flag array (1 less)
#endif
<file_sep>/Makefile
CC = gcc
CFLAGS = -g
RM = rm
EXEM = master
EXES = slave
SRCSM = master.c monitor.c condition.c semaphore.c
SRCSS = slave.c monitor.c condition.c semaphore.c
OBJSM = ${SRCSM:.c=.o}
OBJSS = ${SRCSS:.c=.o}
.c:.o
$(CC) $(CFLAGS) -c $<
all : $(EXEM) $(EXES)
$(EXEM) : $(OBJSM)
$(CC) -o $@ $(OBJSM)
$(OBJSM) : master.h monitor.h
$(EXES) : $(OBJSS)
$(CC) -o $@ $(OBJSS)
$(OBJSS) : master.h monitor.h
clean :
$(RM) -f $(EXES) $(EXEM) $(OBJSS) $(OBJSM)
cleanexe :
$(RM) -f slave master
<file_sep>/slave.c
#include "master.h"
// slave.c
// <NAME>
// Project 2 CS4760
int process_num; // global to inform user on SIGINT
char *msg; // global to release on SIGINT if necessary
FILE *fp; // global to close on SIGINT if necessary
cond_t *cond = NULL; // condition for this process
void intr_handler();
void process(int i);
main (int argc, char *argv[]) {
process_num = atoi(argv[1]); // process num sent from parent process
int shm_id = atoi(argv[2]); // shm for shared cond_t
int p_index = process_num - 1;
time_t tcurrent; // for time reporting
struct tm *timeinfo; // for time reporting
int write_count = 0;
signal(SIGINT,intr_handler);
// try to enter monitor 3 times to write to file
while (write_count < 3) {
time(&tcurrent);
timeinfo = localtime(&tcurrent);
fprintf(stderr, "Process %d attempt to enter monitor at %d:%02d:%02d\n",
process_num, timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec);
enter_monitor(process_num, shm_id, &critical_section);
write_count++;
}
}
void critical_section(int id) {
time_t tcurrent;
struct tm *timeinfo;
int r;
srandom(time(NULL));
// open file
fp = fopen("cstest", "a");
if (!fp) { // error checking on file open
perror("fopen error");
return;
}
// sleep for 0-2 seconds
r = random() % 3;
sleep(r);
// create/alloc char* for file write
time(&tcurrent);
timeinfo = localtime(&tcurrent);
asprintf(&msg, "File modified by process number %d at time %d:%02d:%02d\n",
id, timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec);
// write to file
fprintf(fp, "%s", msg);
// sleep for 0-2 seconds
r = random() % 3;
sleep(r);
// clean up
free(msg);
msg = NULL;
fclose(fp);
fp = NULL;
}
// interupt handler
void intr_handler() {
signal(SIGINT,SIG_DFL); // change SIGINT back to default handling
if (msg != NULL) { // if allocated memory, free it
free(msg);
}
if (fp != NULL) { // if file open, close it
fclose(fp);
}
fprintf(stderr,"Recieved SIGINT: Process %d cleaned up and dying.\n",
process_num);
// let it do default actions for SIGINT by resending now
raise(SIGINT);
}
<file_sep>/semaphore.h
#ifndef SEMAPHORE_H
#define SEMAPHORE_H
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/sem.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
// semaphore.h
// <NAME>
// cs4760 assignment3
;typedef struct semaphore {
int val;
} sem_t;
void wait_sem(int sem_id, int sem_num);
void signal_sem(int sem_id, int sem_num);
int initelement(int id, int num, int val);
#endif
<file_sep>/master.c
#include "master.h"
// master.c
// <NAME>
// Project 3 CS4760
char *arg2; // to send execl process args
char *arg3; // to send shm_id to child process
int pids[18] = { 0 };
bool timed_out = false;
const int SIZE = 3;
cond_t *cond = NULL; // shared condition for monitor access
// signal handler prototypes
void timeout();
void free_mem();
main(int argc, char *argv[]) {
char *arg1 = "slave"; // to send execl process argv[0]
arg2 = malloc(sizeof(int)); // to send execl process args
arg3 = malloc(sizeof(int)); // to send execl process args
int pid;
int act_procs = 0; // active process counter
int i = 0; // index var
// signal handling: timeout - 60s, on ctrl-c free memory allocated and quit
signal(SIGALRM,timeout);
signal(SIGINT,free_mem);
if (argc > 1) { // user specified timeout period
i = atoi(argv[1]);
fprintf(stderr,"Setting timeout for %d seconds\n",i);
alarm(i);
} else { // default
fprintf(stderr,"Setting timeout for 5 minutes (default)\n");
alarm(300);
}
// allocates all vars and shared memory for cond_t
cond = (cond_t*)initcondition();
sprintf(arg3,"%d",cond->shm_id);
// fork for each child process to create
for(i = 1; i <= p_n; i++) { // 1 through 19
sprintf(arg2,"%d", i); // var for process number for each process
act_procs++; // increment each time a new process is created
if (act_procs > 20) {
fprintf(stderr,"Too many processes created. Fatal error.");
raise(SIGINT);
}
pid = fork();
if (pid < 0) { // error checking
perror("fork:");
}
if (pid == 0) { // don't let children spawn more children
break; // for clarity, could just use execl at this point
} else {
pids[(i-1)] = pid; // save each process pid
}
}
if (pid == 0) { // children process actions
execl("slave", arg1, arg2, arg3, 0); // start a slave process
}
else if (pid > 0) { // parent process actions
for(i = 0; i < n; i++) { // wait for children to finish
wait();
act_procs--;
}
printf("In master-finished tasks. Cleaning up and quiting.\n");
// clean up semaphore sets and dealloc cond_t *cond
cleanupcond(cond);
// free argument memory process num transfer
free(arg2);
free(arg3);
} // end else for pid > 0 -> parent process actions
} // end main
void free_mem() {
int i; // counter
fprintf(stderr,"Received SIGINT. Cleaning up and quiting.\n");
// kill each process if program timed out
if (timed_out = true) {
for(i = 0; i < 19; i++) { // 0-18
kill(pids[i],SIGINT); // kill child process
waitpid(pids[i],NULL,0);
}
// to be safe
system("killall slave");
}
// clean up semaphore sets and dealloc cond_t *cond
cleanupcond(cond);
// free allocated mem for arg sending
free(arg2);
free(arg3);
signal(SIGINT,SIG_DFL); // restore default action to SIGINT
raise(SIGINT); // take normal action for SIGINT after cleanup
}
void timeout() {
// timeout duration passed send SIGINT
timed_out = true;
fprintf(stderr,"Timeout duration reached.\n");
raise(SIGINT);
}
|
8351055b6038d8584f927cd3f1435c043e282504
|
[
"C",
"Makefile"
] | 9
|
C
|
bdlindsay/cs4760assignment3
|
684fe45265d711530d57586e52c7684505870372
|
32e8ea0911440c55562838c1384acb8138c11b08
|
refs/heads/master
|
<repo_name>huyhoangbrent/BrentsMusicPlayerApp<file_sep>/app/src/main/java/com/example/huyho/serviceapp/Test.java
package com.example.huyho.serviceapp;
public class Test {
}
|
6fbe4452b0accdedd76e3930a22d3ff01ab81c34
|
[
"Java"
] | 1
|
Java
|
huyhoangbrent/BrentsMusicPlayerApp
|
74f372e4c7da1f3d4e2cee36fc08cdf0955ffe7f
|
670bda62f61eb7e486a2bc142fde316f55295cd3
|
refs/heads/master
|
<repo_name>Cracklings/LearnGL<file_sep>/LICENSE.md
Do whatever you want with this if it's useful for you in any way.
<file_sep>/main.cpp
#define GLEW_STATIC
#include <iostream>
#include <gl/glew.h>
#include <SDL/SDL.h>
#include <string>
#include <ShaderReader.h>
const int screenWidth {1920};
const int screenHeight{1080};
int main(int argc, char *argv[])
{
SDL_Init (SDL_INIT_EVERYTHING);
SDL_Window* window{nullptr};
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 5);
window = SDL_CreateWindow("SDL2 is back!", 0,0, screenWidth, screenHeight, SDL_WINDOW_OPENGL);
SDL_GLContext context{SDL_GL_CreateContext(window)};
glewExperimental = GL_TRUE;
if(glewInit()){
std::cout << "GLEW initialization failed." << std::endl;
return 1;
}
float vertices[] {
.0f, .5f,
.5f,-.5f,
-.5f,-.5f
};
// Buffers
GLuint vbo {0};
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
std::cout << "VBO " << vbo << " generated!" << std::endl;
// Shaders
GLuint hVertShader = glCreateShader(GL_VERTEX_SHADER);
const char* vert_c_str = ReadShaderTo("base.vert").c_str();
glShaderSource(hVertShader, 1, &vert_c_str, NULL);
GLuint hFragShader = glCreateShader(GL_FRAGMENT_SHADER);
const char* frag_c_str = ReadShaderTo("base.frag").c_str();
glShaderSource(hFragShader, 1, &frag_c_str, NULL);
glCompileShader(hVertShader);
glCompileShader(hFragShader);
SDL_Event e;
while(true){
if(SDL_PollEvent(&e)){
if(e.type == SDL_QUIT) break;
if(e.type == SDL_KEYUP && e.key.keysym.sym == SDLK_ESCAPE) break;
}
SDL_GL_SwapWindow(window);
}
SDL_GL_DeleteContext(context);
SDL_Quit ();
return 0;
}
<file_sep>/ShaderReader.cpp
#include <ShaderReader.h>
#include <iostream>
#include <fstream>
using namespace std;
string ReadShaderTo(const char* path){
ifstream file;
string line {""};
string returnValue {""};
file.open(path);
if(file.is_open()){
while(getline(file, line)){
returnValue += line + "\n";
}
file.close();
// cout << path << ":" << endl << returnValue << endl;
return returnValue;
}
else {
std::cout << "No shader file named " << path << " was found!" << std::endl;
return "";
}
}
void compileShader(){
}
<file_sep>/ShaderReader.h
#ifndef SHADERREADER_H
#define SHADERREADER_H
#include <string>
enum class ShaderType {
VertexShader,
FragmentShader,
GeometryShader
};
std::string ReadShaderTo(const char*);
void compileShader();
#endif // SHADERREADER_H
<file_sep>/README.md
# LearnGL
<p>Learning how to use OpenGL and Git.</p>
<p>Absolute beginner. </p>
<p>Nothing to look for here.</p>
|
018330ba5e9ee46b45282d11a7532e9ecab208f1
|
[
"Markdown",
"C++"
] | 5
|
Markdown
|
Cracklings/LearnGL
|
1ffb5b175290254b3dedc55625c33daa65ef3a2e
|
a7369bafe2cbb115f60741cba30ed002f23fe914
|
refs/heads/master
|
<repo_name>kvermun/IEEE_Conference<file_sep>/IEEEApp/TestApp/MainWindow.xaml.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using Microsoft.Kinect;
namespace TestApp
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
public MainWindow()
{
InitializeComponent();
}
private void kinectSensorChooser1_KinectSensorChanged(object sender, DependencyPropertyChangedEventArgs e)
{
KinectSensor old = (KinectSensor)e.OldValue;
StopKinect(old);
KinectSensor sensor = (KinectSensor)e.NewValue;
sensor.DepthStream.Enable();
sensor.ColorStream.Enable();
sensor.SkeletonStream.Enable();
sensor.AllFramesReady += new EventHandler<AllFramesReadyEventArgs>(sensor_AllFramesReady);
try
{
sensor.Start();
}
catch
{
kinectSensorChooser1.AppConflictOccurred();
}
}
void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
}
private void Window_Loaded(object sender, RoutedEventArgs e)
{
kinectSensorChooser1.KinectSensorChanged += new DependencyPropertyChangedEventHandler(kinectSensorChooser1_KinectSensorChanged);
}
private void StopKinect(KinectSensor sensor)
{
if (sensor != null)
{
if (sensor.IsRunning)
{
//stop sensor
sensor.Stop();
//stop audio if not null
if (sensor.AudioSource != null)
{
sensor.AudioSource.Stop();
}
}
}
}
private void Window_Closing(object sender, System.ComponentModel.CancelEventArgs e)
{
kinectSensorChooser1.Kinect.DepthStream.Disable();
kinectSensorChooser1.Kinect.ColorStream.Disable();
kinectSensorChooser1.Kinect.SkeletonStream.Disable();
StopKinect(kinectSensorChooser1.Kinect);
}
}
}
|
a5fe9de4f40dcb860de3e79cd8a4eeb951e3ae05
|
[
"C#"
] | 1
|
C#
|
kvermun/IEEE_Conference
|
dc07de5f0f83e2b1056fe9ede4a7c24529e2639c
|
adff5d22f2bd69bb7215ccef931d508dcb750b88
|
refs/heads/main
|
<file_sep>
/*
// 윈도우가 로드된 후에 이벤트 리스너(function) 함수 실행
// window.onload = function(){}
// 돔트리(DOM TREE) 구조(Structure)를 읽어 들이고 난 후, 실행할 수 있도록 합니다. = 자바스크립트를 어디다가써도 실행가능하게해줌
/*
window.onload = function(){
//fnFirst('window.onload 실행','바보'); //전달인자(전달인수)=argument | 매개변수한테 전달하는 값=매개변수가없으면 전달인자도 쓰나마나임
var divBtn = document.querySelector('#scriptWrap');
divBtn.onclick = function(){
fnFirst('온로드 후 버튼 클릭 이벤트 리스너 값입니다.');
}
}
*/
// 1. 선언적 함수 (이름있는 함수) -> 이름을 불러줘야 실행이 됨
/*
// fnFirst('기본함수 호출 실행','박소혜'); //위에서 호출(가능)
function fnFirst(z,k,x,y,){ //이때 z가 매개변수(parameter)
3.👆쿠폰 저장해놓음
alert( z + k + x + y ); //z 매개변수, 매개변수랑 전달인자랑 갯수 안 맞으면 undefined
console.log( z, k, x, y );
4.👆쿠폰사용(1개든 2개든 상관없음)
}
//fnFirst('기본함수 호출 실행','박소혜','아아아아'); //전달인자(전달인수)=argument | 매개변수한테 전달하는 값=매개변수가없으면 전달인자도 쓰나마나임
//1.👆고객부름 2.👆쿠폰받음 //아래에서 호출
*/
//이름있는(선언적)함수 특징 1) 함수 선언문 위/아래 상관없이 호출실행 가능;
/*
//버튼(요소Element) 클릭 이벤트
var divBtn = document.querySelector('#scriptWrap');
divBtn.onclick = function(){
fnFirst();
}
*/
/*
//////2. 리터럴(이름없는) 함수 //////
//특징 1) 변수로 대입하는 함수
// 2) 반드시 선언된 함수 아래서만 호출 살행 가능
3) 매개변수가 있으면 반드시 전달해라
//unNameFn('http://www.naver.com'); //이거 리터럴함수라 얜 안 불러짐 = Uncaught TypeError: unNameFn is not a function(오류)
/*
var unNameFn = function(z){ //parameter(매개변수) = argument값을 받아줌
alert(z);
location.href = z; //location(윈도우객체 : 윈도우안에서움직이게함 | BOM구조 )
window/location.href = z; 본래의 문장(위에꺼 자주쓰는 약식)
}
unNameFn('http://www.tjoeun.co.kr'); //argument(전달인자)
//unNameFn이라는 함수를 호출할 때 더조은 홈페이지가 z한테 전달이 됨
*/
//////3. 즉시 실행 함수 //////
/*
(function(){
alert('즉시 실행 함수');
})(); //이 괄호가 즉시 실행하게 해줌
(function(babo,jQuery,win/*parameter(매개변수)*///){
//console.log(babo,jQuery,win) //함수에서받은 매개변수는 함수 내에서 사용할수잇음, 서로 짝이 안 맞으면 undefined
//})('전달인자값1','전달인자값2'/*argumrnt(전달인자)*/);
//제이쿼리 실행 함수
//반드시 오픈소스가 있어야 사용이 가능
/*jQuery(function(){ //다음의 것들을 제이쿼리로 실행합니다.
alert('제이쿼리 실행 경고창!!!!!!!!!!!!!!!!!!!');
/*⭐*//*jQuery('#scriptWrap').html('<h1>제이쿼리 출력문</h1>'); //css규칙은 그대로 지킴.
/*$('#scriptWrap').html('<h2>제이쿼리 출력문</h2>'); //제이쿼리 내의 변수는 중복이 되니까 조심
});
$(function(){
alert('제이쿼리 실행문 $사용(jQuery 대신 문자사용)')
$('#scriptWrap').text('<h3><i>제이쿼리는 출력문이 2가지다. 1)html()태그사용가능 2)text()오로지(Only)텍스트만</i></h3>')// text: 태그인식못함 / html 태그인식가능
/*⭐*//*document.write('<h1>자바스크립트 코딩 출력문~</h1>'); //웹에 직접 코딩하는거라 직접 태그 가능
console.log('콘솔창 출력 오류검출 디버깅용 유효성검사')
alert('경고창!!!')
}); //어떤 값을 받고 되돌려주는 함수 = 콜백함수 = 이벤트핸들러
*/
/*
// 입력 방법
1) jQuery()
2) jQuery(function(){});
3) jQuery((){
});
1) $();
2) $(function(){});
3) $(function(){
});
4) $(function(){
})();
*/
// 즉시 실행함수로 제이쿼리의 단점 충돌을 차단
// 변수 중복 사용 공용 차단
/*
// 1)
();
// 2)
(function(){});
// 3)
(function(){
});
*/
// 4)
/*
(function($,window,document,undefined/*parameter(매개변수)*///){
//console.log( $ ); //-> ƒ (a,b){return new n.fn.init(a,b)} = 제이쿼리(이제부터 제이쿼리 사용이 가능)
//$('#scriptWrap').html('<h1>제이쿼리 즉시 실행문</h1>')
//$('#scriptWrap').css({color:'#c00',fontSize:50,textAlign:'center'})
//$('#scriptWrap').css({color:'#c00','font-size':50,'text-align':'center'}) 이렇게 써도 됨
//})(jQuery,window,document/* argument(전달인자) */);
/* ㅜㅜ */
(function(jQuery,window,document,undefined){
})(jQuery,window,document)
/*
제이쿼리 기본형
(function($){
})(jQuery);
*/<file_sep># tjoeun_10_05-10_08_education\
[더조은컴퓨터아카데미 프론트엔드 양성하기 4주차 수업]
(To-do list 만드는거 먼저 블로깅하고->추가적인걸 블로깅하기)
📆 20_10_05_월_Javascript_Day1
🖱 자바스크립트 기본 개념 익히기
🔍 DOM vs BOM
- 둘 다 자바스크립트로 조작할 수 있음
1. DOM(Document Object Modeling) : 문서 객체 모델
- 선개념 : 돔 구조부터 배워야하는 이유
1) DOM (https://developer.mozilla.org/ko/docs/Web/API/Document_Object_Model/%EC%86%8C%EA%B0%9C)
- 자바스크립트가 이용하는 것 = DOM 구조.
- 자바스크립트 활용 방법
(1) 마우스효과, 클릭효과, 키보드효과 등
(2) 효율적인 문서작업
(3) 더 시각적이고 섬세한 브라우저 제작 가능 등.
2) DOM Tree : 웹 문서가 DOM으로 표현되는 방법
- 모든 [요소]들 = [객체] 형태 -> 자바스크립트를 사용하여 접근 가능
// 💬요소 = 부모 요소 + 자식 요소
// 💬객체 = 변수 하나에 다양한 정보를 담기 위해 사용하는 복합 자료형
- 노드(웹문서 요소 속성) + 가지(연결관계)로 표현
- HTML 요소 뿐만 아니라 텍스트, 이미지, 주석까지 html에 입력되어있는 모든 것을 자식으로 간주
ex] html에서의 최상위요소(부모요소) ~ 하위요소(자식요소) :
<!DOCTYPE html>
<html>
<head>
<title>My title</title>
</head>
<body>
<h1>This is heading</h1>
<a href ="This is Link"></a>
</body>
</html>
-> DOCTYPE - html - head,body - title,h1,a - "My title", "This is heading", "This is Link"
(document - root element - element - element - text, text, attribute)
- root element : 다른 요소 노드들의 시작점
- 관계 (1) head 노드의 부모 노드 : html 노드
(2) head 노드의 자식 노드 : title 노드
3) DOM 정의
- 객체 지향 모델로써 구조화된 문서를 표현하는 형식.
- 자바스크립트는 DOM Tree 구조르 이용하여 웹 문서요소 조작 가능
2.BOM(Browser Object Medelling) : 브라우저 객체 모델
- 자바스크립트로 관리 가능하게 브라우저 요소를 객체화
1) 브라우저 내장 객체
Window - Document - History - Location - Navigator - Screen
브라우저 창 - HTML 문서 객체 - 방문 기록 저장 객체 - 현 페이지의 URL정보 가지고 있는 객체 - 웹 브라우저 정보를 가지고 있는 객체 - 화면 정보를 다루는 객체
2) Window 객체 함수
- 브라우저 창 정보 불러오기/변경하기
- 객체 이름 + .
ex] window.innerWidth
🖱 자바스크립트 경고창 만들기
1. <body> 안 해당 태그 밑에 다음과 같이 입력.
1) <script></script>
2) <script>
alert(" ");
</script>
1) 👇경고창을 나타나게 해주는 함수 alert()
이 때, alert(" "); 👈 3) 세미콜론 꼭 붙이기
2) ☝경고창에 나타날 문구 쓰기
2.
<file_sep>// function (){} // 지금까지 했던 이름없는 함수
//////1. 선언적(이름있는) 함수 //////
//fnFirst('기본함수 호출 실행','박소혜'); //위에서 호출
function fnFirst(z){//이때 z가 매개변수(parameter) // 선언적함수(이름있는 함수) -> 이름을 불러줘야 실행이 됨
//3.👆쿠폰 저장해놓음
alert(z);
//4.👆쿠폰사용(1개든 2개든 상관없음)
}
//fnFirst('기본함수 호출 실행','박소혜'); //전달인자(전달인수)=argument | 매개변수한테 전달하는 값=매개변수가없으면 전달인자도 쓰나마나임
//1.👆고객부름 2.👆쿠폰받음 //아래에서 호출
//이름있는(선언적)함수 특징 1) 함수 선언문 위/아래 상관없이 호출실행 가능;
//////2. 리터럴(이름없는) 함수 //////
//특징 1) 변수로 대입하는 함수
// 2) 반드시 선언된 함수 아래서만 호출 살행 가능
//unNameFn('http://www.naver.com'); //이거 리터럴함수라 얜 안 불러짐 = Uncaught TypeError: unNameFn is not a function(오류)
/*
var unNameFn = function(z){ //parameter(매개변수) = argument값을 받아줌
//alert(z);
location.href = z; //location(윈도우객체 : 윈도우안에서움직이게함 | BOM구조 )
//window/location.href = z; 본래의 문장(위에꺼 자주쓰는 약식)
}
unNameFn('http://www.t<EMAIL>'); //argument(전달인자)
//unNameFn이라는 함수를 호출할 때 더조은 홈페이지가 z한테 전달이 됨
*/
//////3. 즉시 실행 함수 //////
(function(){
alert('즉시 실행 함수');
}) (); //이 괄호가 즉시 실행하게 해줌
/*
//버튼(요소Element) 클릭 이벤트
var divBtn = document.querySelector('#scriptWrap');
divBtn.onclick = function(){
fnFirst();
}
*/
// 윈도우가 로드된 후에 이벤트 리스너(function) 함수 실행
// window.onload = function(){}
// 돔트리 구조를 읽어 들이고 난 후, 실행할 수 있도록 합니다.
/*
window.onload = function(){
//fnFirst('window.onload 실행','바보'); //전달인자(전달인수)=argument | 매개변수한테 전달하는 값=매개변수가없으면 전달인자도 쓰나마나임
var divBtn = document.querySelector('#scriptWrap');
divBtn.onclick = function(){
fnFirst('온로드 후 버튼 클릭 이벤트 리스너 값입니다.');
}
}
*/
|
ac518d67cb23566debf97666384f942cf2f19fca
|
[
"JavaScript",
"Markdown"
] | 3
|
JavaScript
|
ps9610/tjoeun_10_05-10_08_education
|
9320f2a86ab0b2cd134aeb8de210a725b42b0a13
|
f5edce0c4ce48a631eb4b847b098af0200fca1e7
|
refs/heads/master
|
<file_sep>using System;
using Motion.Core.WSHandler;
using Motion.Mobile.Core.BLE;
using Motion.Mobile.Utilities;
namespace Motion.Core.SyncHandler
{
public interface ISyncDeviceHandler
{
event EventHandler IncrementProgressBar;
event EventHandler<SyncDoneEventArgs> SyncDone;
void SetAdapter(IAdapter adapter);
void SetDevice(IDevice device);
void SetWebService(IWebServicesWrapper webservice);
void StartSync(Constants.ScanType scanType);
ICharacteristic GetServicesCharacteristic(Constants.CharacteristicsUUID uuid);
void NotifyStateUpdateDone(object sender, CharacteristicReadEventArgs e);
void ReceiveResponse(object sender, CommandResponseEventArgs e);
void StartWriteSettings();
void ProcessCommands();
void CleanUp();
}
}
<file_sep>using System;
namespace Motion.Core.SyncHandler
{
public class Utils
{
public Utils()
{
}
public static bool isValidDevice(String advertiseName)
{
bool result = false;
advertiseName = advertiseName.Replace("PE", "").Replace("FT", "");
if (/*advertiseName.StartsWith("932") ||
advertiseName.StartsWith("936") ||
advertiseName.StartsWith("939") ||*/
advertiseName.StartsWith("961"))
{
result = true;
}
//if (advertiseName.StartsWith("H25FE2"))
//{
// result = true;
//}
return result;
}
public static bool TerminatorFound(byte terminatorChar, int terminatorLength, byte[] data)
{
bool found = false;
int count = 0;
foreach(byte b in data) {
if (b == terminatorChar)
{
count++;
}
else {
count = 0;
}
if (count >= terminatorLength)
{
found = true;
break;
}
}
return found;
}
public static bool LastPacketReceived(int packetIndex,byte[] data)
{
bool lastPacket = false;
if (data[packetIndex] == 00)
{
lastPacket = true;
}
return lastPacket;
}
}
}
|
77368f2639f4804a939f6fc2d6e16111ecf0cd0e
|
[
"C#"
] | 2
|
C#
|
cnapenas/SyncHandler
|
644d72fc73bd0746f57db63b91c97251be668ddf
|
bf975bc3f230a1201a8af120d340f07c67b2dd99
|
refs/heads/master
|
<repo_name>mindfreakthemon-uawc/ua-web-challenge-vi<file_sep>/src/js/tests/filters/gaussian-test.js
define(['filters/gaussian', 'tests/imageloader'],
function (gaussian, imageloader) {
module('Gaussian Definition Test');
test('filter should be an object', function () {
equal(typeof gaussian, 'object', 'it is');
equal(gaussian.name, 'gaussian', 'has name = gaussian');
});
test('filter should contain options', function () {
equal(typeof gaussian.options, 'object', 'has options');
equal(typeof gaussian.options.radius, 'object', 'has radius option');
});
asyncTest('filter should work', function () {
expect(1);
imageloader('gaussian-10', function (modifiedImageData, originalImageData) {
gaussian.runner(originalImageData, {
radius: 10
});
equal(originalImageData.data.length, modifiedImageData.data.length, 'works');
start();
});
});
});
<file_sep>/src/js/filters.js
define([
'filters/gaussian',
'filters/median',
'filters/noise'
],
function () {
var map = {};
Array.prototype.forEach.call(arguments, function (filter) {
if (!filter.hasOwnProperty('label')) {
filter.label = filter.name;
}
if (!filter.hasOwnProperty('options')) {
filter.options = {};
}
if (!filter.hasOwnProperty('runner')) {
filter.runner = function () {};
}
map[filter.name] = filter;
});
return map;
}
);
<file_sep>/src/js/loader.js
define(function () {
var loader = document.getElementById('loader');
return {
hide: function () {
// hide loader when necessary
var i = +loader.dataset.waiting;
if (i === 1) {
loader.classList.add('hidden');
loader.dataset.waiting = 0;
} else {
loader.dataset.waiting = i - 1;
}
},
show: function () {
// increment loader count
loader.dataset.waiting = (loader.dataset.waiting | 0) + 1;
loader.classList.remove('hidden');
}
}
});
<file_sep>/src/js/tests/tests.js
require({
baseUrl: 'js'
}, [
'tests/filters/gaussian-test',
'tests/filters/median-test',
'tests/filters/noise-test'
],
function () {
QUnit.start();
});
<file_sep>/src/js/tests/filters/noise-test.js
define(['filters/noise', 'tests/imageloader'],
function (noise, imageloader) {
module('Noise Definition Test');
test('filter should be an object', function () {
equal(typeof noise, 'object', 'it is');
equal(noise.name, 'noise', 'has name = noise');
});
test('filter should contain options', function () {
equal(typeof noise.options, 'object', 'has options');
equal(typeof noise.options.strength, 'object', 'has strength option');
equal(typeof noise.options.amount, 'object', 'has amount option');
equal(typeof noise.options.mono, 'object', 'has mono option');
});
asyncTest('filter should work', function () {
expect(1);
imageloader('noise-0.5-0.5', function (modifiedImageData, originalImageData) {
noise.runner(originalImageData, {
strength: 0.5,
amount: 0.5,
mono: false
});
equal(originalImageData.data.length, modifiedImageData.data.length, 'works');
start();
});
});
});
<file_sep>/src/js/filters/noise.js
define(function () {
/**
* @param imageData
* @param options {{amount: number, strength: number, mono: boolean}}
*/
function noise(imageData, options) {
var pixels = imageData.data;
var offset, offsetY, x, y, r, g, b, pixelNoise;
var mono = options.mono,
noise = 128 * options.strength,
noise2 = noise / 2,
random = Math.random;
for (y = imageData.height; y >= 0; y--) {
offsetY = (y - 1) * imageData.width * 4;
for(x = imageData.width; x >= 0; x--) {
offset = offsetY + (x - 1) * 4;
if (random() < options.amount) {
if (false && mono) {
pixelNoise = random() * noise - noise2;
r = pixels[offset + 0] + pixelNoise;
g = pixels[offset + 1] + pixelNoise;
b = pixels[offset + 2] + pixelNoise;
} else {
r = pixels[offset + 0] - noise2 + (random() * noise);
g = pixels[offset + 1] - noise2 + (random() * noise);
b = pixels[offset + 2] - noise2 + (random() * noise);
}
if (r < 0) {
r = 0;
}
if (g < 0) {
g = 0;
}
if (b < 0) {
b = 0;
}
if (r > 255) {
r = 255;
}
if (g > 255) {
g = 255;
}
if (b > 255) {
b = 255;
}
pixels[offset + 0] = r;
pixels[offset + 1] = g;
pixels[offset + 2] = b;
}
}
}
}
return {
label: 'Noise',
name: 'noise',
runner: noise,
options: {
amount: {
label: 'Amount',
min: 0,
max: 1,
value: 0.5,
step: 0.01,
type: 'range'
},
strength: {
label: 'Strength',
min: 0,
max: 1,
value: 0.5,
step: 0.01,
type: 'range'
},
mono: {
label: 'Mono noise',
type: 'checkbox'
}
}
};
});
<file_sep>/src/js/filters/median.js
define(function () {
/**
* @param imageData
* @param options {{window: number}}
*/
function median(imageData, options) {
var pixels = imageData.data;
var x, y, c0, c1, c2, u, v, windowIndex, imageIndex;
var mid = Math.floor(
Math.pow(options.window, 2) / 2);
for (x = 0; x < imageData.width; x++) {
for (y = 0; y < imageData.height; y++) {
c0 = [];
c1 = [];
c2 = [];
imageIndex = (x + y * imageData.width) * 4;
for (u = 0; u < options.window; u++) {
for (v = 0; v < options.window; v++) {
windowIndex = (imageIndex + (u + v * imageData.width) * 4) % pixels.length;
c0.push(pixels[windowIndex + 0]);
c1.push(pixels[windowIndex + 1]);
c2.push(pixels[windowIndex + 2]);
}
}
c0.sort();
c1.sort();
c2.sort();
pixels[imageIndex + 0] = c0[mid];
pixels[imageIndex + 1] = c1[mid];
pixels[imageIndex + 2] = c2[mid];
}
}
}
return {
label: 'Median',
name: 'median',
runner: median,
options: {
window: {
label: 'Window size',
min: 1,
max: 10,
value: 3,
type: 'range'
}
}
};
});
<file_sep>/src/js/main.js
require({
baseUrl: 'js',
paths: {
jquery: '../lib/jquery-2.1.1.min',
cropper: '../lib/cropper/cropper.min',
bootstrap: '../lib/bootstrap/js/bootstrap',
jade: '../lib/jade/runtime'
},
shim: {
'bootstrap' : ['jquery']
}
},
['filters', 'templates', 'area', 'loader', 'options'],
function (filters, templates, area, loader, options) {
var fileEntry,
originalImageData,
safeTimeout,
worker;
var file = document.getElementById('file'),
scene = document.getElementById('scene'),
image = document.getElementById('image'),
list = document.getElementById('filter-list'),
canvas = document.getElementById('canvas'),
context = canvas.getContext('2d');
var unload = document.getElementById('unload'),
upload = document.getElementById('upload'),
apply = document.getElementById('apply'),
cancel = document.getElementById('cancel'),
save = document.getElementById('save');
var form = document.getElementById('form'),
x1 = document.getElementById('x1'),
y1 = document.getElementById('y1'),
x2 = document.getElementById('x2'),
y2 = document.getElementById('y2');
/**
* When user selects an image
*/
file.addEventListener('change', function (e) {
fileEntry = e.target.files[0];
image.src = URL.createObjectURL(fileEntry);
});
image.addEventListener('load', function () {
var w = image.naturalWidth,
h = image.naturalHeight;
x1.value = 0;
y1.value = 0;
x2.value = x2.max = x1.max = w;
y2.value = y2.max = y1.max = h;
canvas.width = w;
canvas.height = h;
context.clearRect(0, 0, w, h);
context.drawImage(image, 0, 0);
// save initial imageData
originalImageData = context.getImageData(0, 0, w, h);
// create new worker
worker = new Worker('js/thread.js');
worker.addEventListener('message', function (e) {
context.putImageData(e.data.imageData, e.data.x1, e.data.y1);
loader.hide();
});
// initial render
options.update();
// hide upload form
// and show controls
form.classList.remove('hidden');
canvas.classList.remove('hidden');
upload.classList.add('hidden');
area.setup(function (data) {
x1.value = data.x1;
y1.value = data.y1;
x2.value = data.x2;
y2.value = data.y2;
// safe timeout for serial updates
clearTimeout(safeTimeout);
safeTimeout = setTimeout(updateFilterContext, 100);
});
URL.revokeObjectURL(fileEntry);
});
/**
* When user clicks on unload button
*/
unload.addEventListener('click', function () {
file.value = null;
// hide controls
// and show upload form
form.classList.add('hidden');
canvas.classList.add('hidden');
upload.classList.remove('hidden');
area.clear();
// terminating worker
worker.terminate();
// clear timeout just to be sure
clearTimeout(safeTimeout);
});
/**
* When user decides to save current filter
*/
apply.addEventListener('click', function () {
originalImageData = context.getImageData(0, 0, canvas.width, canvas.height);
options.update();
});
/**
* When user decides to roll back current filter
*/
cancel.addEventListener('click', function () {
context.putImageData(originalImageData, 0, 0);
options.update();
});
/**
* Download the image
*/
save.addEventListener('click', function () {
var a = document.createElement('a');
a.download = 'image.png';
a.href = canvas.toDataURL('image/png');
a.dispatchEvent(new MouseEvent('click'));
});
/**
* When user selects filters, display filter's options
*/
list.addEventListener('change', function (e) {
options.update(e.target.value);
});
/**
* When user changes some parameters in the control form
*/
form.addEventListener('change', function () {
// safe timeout for serial updates
clearTimeout(safeTimeout);
safeTimeout = setTimeout(updateFilterContext, 500);
});
form.addEventListener('submit', function (e) {
e.preventDefault();
});
/**
*
*/
function updateFilterContext() {
var filterName = form.elements.filterName.value,
filter = filters[filterName],
options = {};
if (!filter) {
return;
}
// reset context to saved state
context.putImageData(originalImageData, 0, 0);
// gathering options for filter
Object.keys(filter.options)
.forEach(function (optionName) {
var option = filter.options[optionName];
switch (option.type) {
case 'checkbox':
options[optionName] = form.elements[optionName].checked;
break;
case 'range':
options[optionName] = parseFloat(form.elements[optionName].value);
break;
}
});
var x1val = +x1.value,
y1val = +y1.value,
x2val = +x2.value,
y2val = +y2.value;
worker.postMessage({
filterName: filterName,
imageData: context.getImageData(x1val, y1val, x2val - x1val, y2val - y1val),
options: options,
x1: x1val,
y1: y1val,
x2: x2val,
y2: y2val
});
loader.show();
}
// preparing radio-boxes for filters
list.innerHTML = templates['filters']({
filters: filters
});
});
<file_sep>/src/js/thread.js
importScripts('../lib/require.js');
require({
baseUrl: '.'
},
['filters'],
function (filters) {
self.addEventListener('message', function (e) {
var data = e.data,
filter = filters[data.filterName];
filter.runner(data.imageData, data.options, data);
self.postMessage(data);
});
}
);
<file_sep>/README.md
ua-web-challenge-vi
===================
## How to start the application
* install grunt-cli with `npm install -g grunt-cli`
* `git clone` this repository
* cd into ua-web-challenge-vi filter
* run `npm install` to install app's dependencies
* run `grunt` to build the app and start local web server
* open http://localhost:9000/ for app's main window
* open http://localhost:9000/tests.html for qunit tests
## How to add new filter
You must define AMD module which returns filter descriptor.
```javascript
define(function () {
return {
label: '<filter-label>',
name: '<filter-name>',
runner: function noise(imageData, options) {
// function receives imageData from the canvas
// element and must perform process over it
},
options: {
optionName: {
label: '<option-label>',
min: 0,
max: 1,
value: 0.5, // initial value
step: 0.01, // for type=range
checked: true, // for type=checkbox
type: 'range' // or 'checkbox'
},
// ...
}
};
});
```
Then you simply place created file in src/js/filters/ and include it as a dependency in src/js/filters.js.
All the rest will happen automagically.
<file_sep>/src/js/filters/gaussian.js
define(function () {
var mul_table = [512, 512, 456, 512, 328, 456, 335, 512, 405, 328, 271, 456, 388, 335, 292, 512, 454, 405, 364,
328, 298, 271, 496, 456, 420, 388, 360, 335, 312, 292, 273, 512],
shg_table = [ 9, 11, 12, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17,
18, 18, 18, 18, 18, 18, 18, 18, 18, 19 ];
/**
* @constructor
*/
function Stack() {
this.r = 0;
this.g = 0;
this.b = 0;
this.a = 0;
this.next = null;
}
/**
* @param imageData
* @param options {{radius: number}}
*/
function gaussian(imageData, options) {
var pixels = imageData.data;
var x, y, i, p, yp, yi, yw,
r_sum, g_sum, b_sum, a_sum,
r_out_sum, g_out_sum, b_out_sum, a_out_sum,
r_in_sum, g_in_sum, b_in_sum, a_in_sum,
pr, pg, pb, pa, rbs;
var div = options.radius + options.radius + 1,
widthMinus1 = imageData.width - 1,
heightMinus1 = imageData.height - 1,
radiusPlus1 = options.radius + 1,
sumFactor = radiusPlus1 * (radiusPlus1 + 1) / 2;
var stackStart = new Stack(),
stack = stackStart,
stackIn = null,
stackOut = null,
stackEnd;
for (i = 1; i < div; i++) {
stack = stack.next = new Stack();
if (i == radiusPlus1) {
stackEnd = stack;
}
}
stack.next = stackStart;
yw = yi = 0;
var mul_sum = mul_table[options.radius],
shg_sum = shg_table[options.radius];
for (y = 0; y < imageData.height; y++) {
r_in_sum = g_in_sum = b_in_sum = a_in_sum = r_sum = g_sum = b_sum = a_sum = 0;
r_out_sum = radiusPlus1 * (pr = pixels[yi]);
g_out_sum = radiusPlus1 * (pg = pixels[yi + 1]);
b_out_sum = radiusPlus1 * (pb = pixels[yi + 2]);
a_out_sum = radiusPlus1 * (pa = pixels[yi + 3]);
r_sum += sumFactor * pr;
g_sum += sumFactor * pg;
b_sum += sumFactor * pb;
a_sum += sumFactor * pa;
stack = stackStart;
for (i = 0; i < radiusPlus1; i++) {
stack.r = pr;
stack.g = pg;
stack.b = pb;
stack.a = pa;
stack = stack.next;
}
for (i = 1; i < radiusPlus1; i++) {
p = yi + ((widthMinus1 < i ? widthMinus1 : i) << 2);
r_sum += (stack.r = (pr = pixels[p])) * (rbs = radiusPlus1 - i);
g_sum += (stack.g = (pg = pixels[p + 1])) * rbs;
b_sum += (stack.b = (pb = pixels[p + 2])) * rbs;
a_sum += (stack.a = (pa = pixels[p + 3])) * rbs;
r_in_sum += pr;
g_in_sum += pg;
b_in_sum += pb;
a_in_sum += pa;
stack = stack.next;
}
stackIn = stackStart;
stackOut = stackEnd;
for (x = 0; x < imageData.width; x++) {
pixels[yi + 3] = pa = (a_sum * mul_sum) >> shg_sum;
if (pa != 0) {
pa = 255 / pa;
pixels[yi] = ((r_sum * mul_sum) >> shg_sum) * pa;
pixels[yi + 1] = ((g_sum * mul_sum) >> shg_sum) * pa;
pixels[yi + 2] = ((b_sum * mul_sum) >> shg_sum) * pa;
} else {
pixels[yi] = pixels[yi + 1] = pixels[yi + 2] = 0;
}
r_sum -= r_out_sum;
g_sum -= g_out_sum;
b_sum -= b_out_sum;
a_sum -= a_out_sum;
r_out_sum -= stackIn.r;
g_out_sum -= stackIn.g;
b_out_sum -= stackIn.b;
a_out_sum -= stackIn.a;
p = (yw + ((p = x + options.radius + 1) < widthMinus1 ? p : widthMinus1)) << 2;
r_in_sum += (stackIn.r = pixels[p]);
g_in_sum += (stackIn.g = pixels[p + 1]);
b_in_sum += (stackIn.b = pixels[p + 2]);
a_in_sum += (stackIn.a = pixels[p + 3]);
r_sum += r_in_sum;
g_sum += g_in_sum;
b_sum += b_in_sum;
a_sum += a_in_sum;
stackIn = stackIn.next;
r_out_sum += (pr = stackOut.r);
g_out_sum += (pg = stackOut.g);
b_out_sum += (pb = stackOut.b);
a_out_sum += (pa = stackOut.a);
r_in_sum -= pr;
g_in_sum -= pg;
b_in_sum -= pb;
a_in_sum -= pa;
stackOut = stackOut.next;
yi += 4;
}
yw += imageData.width;
}
for (x = 0; x < imageData.width; x++) {
g_in_sum = b_in_sum = a_in_sum = r_in_sum = g_sum = b_sum = a_sum = r_sum = 0;
yi = x << 2;
r_out_sum = radiusPlus1 * (pr = pixels[yi]);
g_out_sum = radiusPlus1 * (pg = pixels[yi + 1]);
b_out_sum = radiusPlus1 * (pb = pixels[yi + 2]);
a_out_sum = radiusPlus1 * (pa = pixels[yi + 3]);
r_sum += sumFactor * pr;
g_sum += sumFactor * pg;
b_sum += sumFactor * pb;
a_sum += sumFactor * pa;
stack = stackStart;
for (i = 0; i < radiusPlus1; i++) {
stack.r = pr;
stack.g = pg;
stack.b = pb;
stack.a = pa;
stack = stack.next;
}
yp = imageData.width;
for (i = 1; i <= options.radius; i++) {
yi = (yp + x) << 2;
r_sum += (stack.r = (pr = pixels[yi])) * (rbs = radiusPlus1 - i);
g_sum += (stack.g = (pg = pixels[yi + 1])) * rbs;
b_sum += (stack.b = (pb = pixels[yi + 2])) * rbs;
a_sum += (stack.a = (pa = pixels[yi + 3])) * rbs;
r_in_sum += pr;
g_in_sum += pg;
b_in_sum += pb;
a_in_sum += pa;
stack = stack.next;
if (i < heightMinus1) {
yp += imageData.width;
}
}
yi = x;
stackIn = stackStart;
stackOut = stackEnd;
for (y = 0; y < imageData.height; y++) {
p = yi << 2;
pixels[p + 3] = pa = (a_sum * mul_sum) >> shg_sum;
if (pa > 0) {
pa = 255 / pa;
pixels[p] = ((r_sum * mul_sum) >> shg_sum) * pa;
pixels[p + 1] = ((g_sum * mul_sum) >> shg_sum) * pa;
pixels[p + 2] = ((b_sum * mul_sum) >> shg_sum) * pa;
} else {
pixels[p] = pixels[p + 1] = pixels[p + 2] = 0;
}
r_sum -= r_out_sum;
g_sum -= g_out_sum;
b_sum -= b_out_sum;
a_sum -= a_out_sum;
r_out_sum -= stackIn.r;
g_out_sum -= stackIn.g;
b_out_sum -= stackIn.b;
a_out_sum -= stackIn.a;
p = (x + (((p = y + radiusPlus1) < heightMinus1 ? p : heightMinus1) * imageData.width)) << 2;
r_sum += (r_in_sum += (stackIn.r = pixels[p]));
g_sum += (g_in_sum += (stackIn.g = pixels[p + 1]));
b_sum += (b_in_sum += (stackIn.b = pixels[p + 2]));
a_sum += (a_in_sum += (stackIn.a = pixels[p + 3]));
stackIn = stackIn.next;
r_out_sum += (pr = stackOut.r);
g_out_sum += (pg = stackOut.g);
b_out_sum += (pb = stackOut.b);
a_out_sum += (pa = stackOut.a);
r_in_sum -= pr;
g_in_sum -= pg;
b_in_sum -= pb;
a_in_sum -= pa;
stackOut = stackOut.next;
yi += imageData.width;
}
}
}
return {
label: 'Gaussian blur',
name: 'gaussian',
runner: gaussian,
options: {
radius: {
label: 'Radius',
min: 1,
max: 25,
value: 10,
type: 'range'
}
}
};
});
<file_sep>/src/js/tests/filters/median-test.js
define(['filters/median', 'tests/imageloader'],
function (median, imageloader) {
module('Median Definition Test');
test('filter should be an object', function () {
equal(typeof median, 'object', 'it is');
equal(median.name, 'median', 'has name = median');
});
test('filter should contain options', function () {
equal(typeof median.options, 'object', 'has options');
equal(typeof median.options.window, 'object', 'has window option');
});
asyncTest('filter should work', function () {
expect(1);
imageloader('median-3', function (modifiedImageData, originalImageData) {
median.runner(originalImageData, {
window: 3
});
equal(originalImageData.data.length, modifiedImageData.data.length, 'works');
start();
});
});
});
|
f785c348d8fe1f401fa28e86de0f84718250b9b5
|
[
"JavaScript",
"Markdown"
] | 12
|
JavaScript
|
mindfreakthemon-uawc/ua-web-challenge-vi
|
bb969229d43d060578696939d9bf7fa0298ce492
|
65cdd1dc66a85457f4ce43aec43f48b5ef14da89
|
refs/heads/master
|
<file_sep>device:
driver assist chips in autonomous, or semi-autonomous cars
working:
provide feedback to the driver by constatly monitoring the signals
from the environment. The signals gathered from the environment could be
through cameras, radars, break temperature, tire preassure, etc. The
feedback to the driver could range from taking complete control of the car (in
case of emergency) to gentle reminder to the driver.
challenges:
[1] Reliability: a faulty operation could endanger the lives. Designing these system
for extreme reliability is important. Redundancy would be one of the ways of
accomplishing the reliability goals. And, with redundancy comes the challenges
of synchronizing internal states of the two computers in case of a failure.
The synchronization and handing off control from the primary computer to the
secondary computer in case of failure will be challenging
[2] Prioritization: the device should be smart enough to treat different
signals from the environment with different priorities.
[3] Response time: the device can't be sluggish in responding to signals from
the environment
[4] Test and Debug: Test and debug with limited debug registers will make this challenging
[5] Verification/Validation: Simulating all possible real world scenario will make the
testing and validation effort challenging
[6] Response time test: The device needs to respond within a fixed time. This means
the device needs to be tested under worst case conditions -- where there are multiple
high priority signals from the environment all happening at the same time. Designing
unit tests that simulate these conditions will be challenging. <file_sep>#ifndef STACK_H
#define STACK_H
//function to initialize the stack internals
void StackInit(void);
//function to push an element onto the stack
int StackPush(int data);
//function to pop and element off the stack
int StackPop(int* data);
//function that returns 1 if stack is empty
int StackIsEmpty(void);
//funciton that returns 1 if stack if full
int StackIsFull(void);
#endif<file_sep>//header file
#include "defines.h"
#include "stack.h"
#include <assert.h>
int main()
{
int result;
//testcase 1: fill up stack, pop stack without overflow
//check Last In First Out data flow
//############################################################
//Arrange
StackInit();
//Act
for(int i = 0; i<STACK_SIZE;i++) {
StackPush(i+1);
}
//Assert
for(int i = 0; i<STACK_SIZE;i++) {
StackPop(&result);
assert((STACK_SIZE-i) == result);
}
//testcase 2: is empty test under initialization
//############################################################
//Arrange
StackInit();
//Act
//Assert
assert(1 == StackIsEmpty());
//testcase 3: is empty test after poping all elements
//############################################################
//Arrange
StackInit();
//Act
for(int i = 0; i<STACK_SIZE;i++) {
StackPush(i+1);
}
for(int i = 0; i<STACK_SIZE;i++) {
StackPop(&result);
}
//Assert
assert(1 == StackIsEmpty());
//testcase 4: is full test under initialization
//############################################################
//Arrange
StackInit();
//Act
//Assert
assert(0 == StackIsFull());
//testcase 5: is full test after filling up the stack
//############################################################
//Arrange
StackInit();
//Act
for(int i = 0; i<STACK_SIZE;i++) {
StackPush(i+1);
}
//Assert
assert(1 == StackIsFull());
//testcase 6: full test
//didn't feel like spending much time on this test
//############################################################
//Arrange
StackInit();
//Act and Assert for StackPush
//replace 1 with constrained random number
for(int i = 0; i<(STACK_SIZE+1);i++) {
if(!StackIsFull()) {
assert(0 == StackIsFull());
StackPush(i+1);
}
else {
assert(1 == StackIsFull());
}
}
//didn't feel like spending much time on the StackPop test
return 0;
}
<file_sep>//scratch pad <br />
//############################################################################################## <br />
//0x7FFF_FFFF -> 0111 1111 1111 1111 1111 1111 1111 1111 -> n= , v=0, c=0 <br />
//0x8000_0000 -> 1000 0000 0000 0000 0000 0000 0000 0000 -> n=1, v=1, c=0 <br />
//0x8000_0001 -> 1000 0000 0000 0000 0000 0000 0000 0001 -> n=1, v=0, c=0 <br />
//0xFFFF_FFFF -> 1111 1111 1111 1111 1111 1111 1111 1111 -> n=1, v=0, c=0 (-1, big positive number) <br />
//0x0000_0000 -> 0000 0000 0000 0000 0000 0000 0000 0000 -> n=0, v=0, c=1, z=1 <br />
//0x0000_0001 -> 0000 0000 0000 0000 0000 0000 0000 0001 -> n=?, v=0, c=0 <br />
//question 1<br />
//int counter<br />
//init : 0x7FFF_FFFF<br />
//++ : 0x8000_0000<br />
//##############################################################################################<br />
1.a: -2147483648<br />
1.b: 0x80000000<br />
1.c: N=1, V=1<br />
N flag answer<br />
Negative condition flag is set to bit 31 of the result of the <br />
last flag-setting instruction.<br />
Result value (counter variable) after 1 increment: 0x80000000. And,<br />
0x8000_0000 in binary is : 1000_0000_0000_0000_0000_0000_0000_0000<br />
The MSB (bit 32 of counter variable) causes the N bit to be set to 1.<br />
side-note: the general purpose registers are not signed or unsigned. To the hardware<br />
they are just sequnce of bits.<br />
V flag answer<br />
V bit represents the overflow condition flag. The V bit is set to one because the<br />
general purpose register transitions from 0x7FFF_FFFF to 0x8000_0000.<br />
If the application is using the counter variable as a signed value, <br />
this would represent a overflow condition. The hardware need to a way to keep track of this<br />
transition (from 0x7FFF_FFFF to 0x80000_0000) and the V bit is way to do that.<br />
side note: The hardware has no concept of Positive or negative numbers. It's just a sequence<br />
of bits. The:<br />
[1] N flag is exposed to the software so that it can infer that the value that it's dealing with<br />
is a negative number (in case the variable is treated as a signed number)<br />
[2] V flag is exposed to the software to infer a rollover from the most positive value to the most<br />
negative value.<br />
(in case the application is using this variable as a signed variable)<br /><br />
//question 2<br />
//int counter<br />
//init : 0xFFFF_FFFF<br />
//++ : 0x0000_0000; ; carry 1<br />
//##############################################################################################<br />
2.a: 0<br />
2.b: N=0, V=0<br />
N flag answer<br />
Incremeniting 0xFFFF_FFFF by one causes the MSB to be set to zero<br />
Therefore, N takes on a value of zero. This operation results in a 33rd bit (not seen).<br />
The Carry flag (C) is set to one to indicate the generation of the 33rd bit.<br /><br />
V flag answer<br />
The V flag is only set if the counter transitions over from 0x7FFF_FFFF to 0x8000_0000.<br /><br />
side note: The hardware has no concept of Positive or negative numbers. It's just a sequence<br />
of bits. The:<br />
[1] V flag is exposed to the software to be used to infer a rollover of the most positive value.<br />
(in case the application is using this variable as a signed variable)<br />
[2] the C flag is exposed to the software to infer a rollover from the most positive value to the zero.<br />
(in case the application is using this variable as a unsigned variable)<br />
//question 3<br />
//unsigned int counter<br />
//init : 0x7FFF_FFFF<br />
//++ : 0x8000_0000<br />
//##############################################################################################<br />
3.a: 2147483648<br />
3.b: N=1, V=1; same explanation as 1.c above<br />
//question 4<br />
//unsigned int counter<br />
//init : 0xFFFF_FFFF<br />
//++ : 0x0000_0000; carry 1<br />
//##############################################################################################<br />
4.a: 0<br />
4.b: N=0,V=0; same explanation as 2<br />
//question 5<br />
//##############################################################################################<br />
5.a: global scope<br />
5.b: variable is not visible in locals view<br />
5.c: we can track counter value in three windows:[1] auto window, [2] watch window, [3] memory window<br />
5.d: 0x20000000<br />
//question 6<br />
//##############################################################################################<br />
6.a: the value of the counter is 6<br />
6.b: the counter value changes because the statements: [a] ++(\*p_int), and [b] counter++, both<br />
operate (increment by one operation) on 0x20000000 memory location which is the location of the<br />
counter variable.<br />
//question 7<br />
//##############################################################################################<br />
7.a: 0x20000000<br />
7.b: RAM<br />
7.c: 4<br />
<file_sep>#ifndef MYDELAY
#define MYDELAY
#include <stdint.h>
void delay(uint32_t mscount);
#endif<file_sep>#include "stm32l475xx.h"
#include "delay.h"
//void delay (uint32_t mscount);
void SysTick_Initialize();
int counter_g;
int main()
{
//warmup code to see if LED works
/*
RCC->AHB2ENR |= RCC_AHB2ENR_GPIOBEN;
GPIOB->MODER &= ~GPIO_MODER_MODE14_1;
GPIOB->MODER |= GPIO_MODER_MODE14_0;
GPIOB->ODR ^= GPIO_ODR_OD14;
*/
//setup LED registers
RCC->AHB2ENR |= RCC_AHB2ENR_GPIOAEN;
GPIOA->MODER &= ~GPIO_MODER_MODE5_1;
GPIOA->MODER |= GPIO_MODER_MODE5_0;
//SysTick_Initialize
SysTick_Initialize();
while(1){
delay(5000);
GPIOA->ODR ^= GPIO_ODR_OD5;
}
}
void SysTick_Initialize(void){
SysTick->LOAD = 4000;// 0xE000E014 - Counts down to 0.
SysTick->VAL = 0x0;// 0xE000E018 - Clears initial value
SysTick->CTRL = 0x7;// 0xE000E010 - Enable interrupts
}
void SysTick_Handler (void) {
__disable_irq();
counter_g--;
__enable_irq();
}
<file_sep>1.a : prior to calling the swap function, the main function stores the input arguments to the swap_pointer into R0 and R1 (general purpose registers)
1.b : R0, and R1 contains the memory location of the pointers to the x and y variable(pointer to a pointer)<file_sep><file_sep>//vector : blocks of 4 bytes one after the other
//use non-standard C-syntax to place at intvec section
//not portable
//Global variable
//make it read only by making it const. RW can't be in ROM
//even though you do a @ ".intvec
//define prototypes, and put them in table
void __iar_program_start(void);
void Unused_Handler(void);
extern int CSTACK$$Limit;
extern void SysTick_Handler();
int const __vector_table[] @ ".intvec" = {
(int)&CSTACK$$Limit,// Pointer to Top of Stack
(int)&__iar_program_start,// Pointer to Reset Handler
(int)&Unused_Handler,
(int)&Unused_Handler,
(int)&Unused_Handler,
(int)&Unused_Handler,
(int)&Unused_Handler,
0, // Reserved
0, // Reserved
0, // Reserved
0, // Reserved
(int)&Unused_Handler,
(int)&Unused_Handler,
0, // Reserved
(int)&Unused_Handler,
(int)&SysTick_Handler
};
void Unused_Handler (void)
{
while(1)
{
}
}
<file_sep>/*
KIT : B-L475E-IOT01A1
PART : STM32L475VGT6
1. Enable clock
RCC_BASE = 0x40021000
RCC_AHB2ENR_offset = 0x4C
set bit[1] to 1
2. Set GPIOB to Output mode
GPIO_BASE = 0x48000400
GPIOx_MODER = 0x00 //enable port mode as in,out
set bit[29:28] to 0x01 to enable PB14 as output
3. Write to GPIO Data Register to toggle LED
GPIO_BASE = 0x48000400
GPIOx_ODR = 0x14
set bit[14] to 1 -> 0x4000 //turn led on
set bit[14] to 0 -> 0x0 //turn led off
*/
//base address and offset address
//define <replace this> <with this> //pre processing part
#define RCC_BASE (0x40021000)
#define GPIOB_BASE (0x48000400)
#define GPIOA_BASE (0x48000000)
#define RCC_AHB2ENR_OFFSET (0x4C)
#define GPIO_MODER_OFFSET (0x00)
#define GPIO_ODR_OFFSET (0x14)
//register access macros
#define REG_RCC_AHB2ENR (*(unsigned int*)(RCC_BASE + RCC_AHB2ENR_OFFSET))
#define REG_GPIOB_MODE (*(unsigned int*)(GPIOB_BASE + GPIO_MODER_OFFSET))
#define REG_GPIOB_ODR (*(unsigned int*)(GPIOB_BASE + GPIO_ODR_OFFSET))
#define REG_GPIOA_MODE (*(unsigned int*)(GPIOA_BASE + GPIO_MODER_OFFSET))
#define REG_GPIOA_ODR (*(unsigned int*)(GPIOA_BASE + GPIO_ODR_OFFSET))
//application specific macros
#define ORD14 (1 << 14)
#define ORD5 (1 << 5)
#define GPIOB_EN (1 << 1)
#define GPIOA_EN (1 << 0)
/*
//morse code notes
- symbol1 = dot (one unit)
- symbol2 = dash (3 x the duration of dot)
- inter symbol space in a character(iss) = 1 x the duration of the dot
- inter letter space in a word(ils) = 3 x the duration of a dot
- inter word space in a sentence = 7 x the duratoin of a dot
aditya
a:dot,iss,dash
ils
d:dash,iss,dot,iss,dot
ils
i:dot,iss,dot
ils
t:dash
ils
y:dash,iss,dot,iss,dash,iss,dash
ils
*/
#if 1
void symbol_delay(int time_unit,int multiplier)
{
int counter=multiplier*time_unit;
while(counter > 0)
{
counter--;
}
}
void dot(int time_unit) {
REG_GPIOA_ODR |= ORD5;//on
symbol_delay(time_unit,1);
REG_GPIOA_ODR &= ~ORD5;//off
}
void dash(int time_unit) {
REG_GPIOA_ODR |= ORD5;//on
symbol_delay(time_unit,3);
REG_GPIOA_ODR &= ~ORD5;//off
}
void inter_symbol_interval(int time_unit){
symbol_delay(time_unit,1);
}
void inter_letter_interval(int time_unit){
symbol_delay(time_unit,3);
}
void character_a(int time_unit)
{
//a:dot,iss,dash
dot(time_unit);
inter_symbol_interval(time_unit);
dash(time_unit);
}
void character_d(int time_unit)
{
//d:dash,iss,dot,iss,dot
dash(time_unit);
inter_symbol_interval(time_unit);
dot(time_unit);
inter_symbol_interval(time_unit);
dot(time_unit);
}
void character_i(int time_unit)
{
//i:dot,iss,dot
dot(time_unit);
inter_symbol_interval(time_unit);
dot(time_unit);
}
void character_t(int time_unit)
{
//t:dash
dash(time_unit);
}
void character_y(int time_unit)
{
//y:dash,iss,dot,iss,dash,iss,dash
dash(time_unit);
inter_symbol_interval(time_unit);
dot(time_unit);
inter_symbol_interval(time_unit);
dash(time_unit);
inter_symbol_interval(time_unit);
dash(time_unit);
}
int main()
{
int time_unit=500000;
REG_RCC_AHB2ENR = GPIOA_EN;
REG_GPIOA_MODE &= 0xFFFFF7FF;
char first_name[] = {'a','d','i','t','y','a','\0'};
//enhancement use generic terminal count (while and null character)
for (int i = 0; i < 6; i++){
if(first_name[i] == 'a') {
character_a(time_unit);
}
else if(first_name[i] == 'd') {
character_d(time_unit);
}
else if(first_name[i] == 'i') {
character_i(time_unit);
}
else if(first_name[i] == 't') {
character_t(time_unit);
}
else if(first_name[i] == 'y') {
character_y(time_unit);
}
inter_letter_interval(time_unit);
}
}
#endif
<file_sep>2.a: 396 bytes
2.b: 8196 bytes
2.c: program in main function is using the most ROM
2.d: not counting stack, it's again main function
3:
ROM usage optimization:
(1) avoid un-necessary initialization
(2) Keep Vector Interrupt Table simple -- don't define un-necessary handlers if your application doesn't need it
RAM usage optimization:
(1) reduce size of Stack -- since embedded SW is tailor made, why use more stack than what is needed? First, understand the input into the function
and later optimize it for stack usage.
(2) use const keywoard for true constants<file_sep>#include "delay.h"
void delay(unsigned int endcount) {
while(endcount > 0) {
endcount--;
}
}
<file_sep>Two's complement is used to represent negative numbers because from the
hardware perspective an Adder can be used for both addition and subtraction
with little changes to the hardware (extra not gate). This saves power by not having special hardware dedicated for subtraction.<file_sep>
//funciton prototypes
int func1_three(int a,int b,int c);
int func1_four(int a,int b,int c,int d);
int func1_five(int a,int b,int c,int d,int e);
int func1_six(int a,int b,int c,int d,int e,int f);
//function definition
int func1_three(int a,int b,int c) {
volatile int total;
total = a + b + c;
return (total);
}
int func1_four(int a,int b,int c,int d) {
volatile int total;
total = a + b + c + d;
return (total);
}
int func1_five(int a,int b,int c, int d, int e) {
volatile int total;
total = a + b + c + d + e;
return (total);
}
int func1_six(int a,int b,int c, int d, int e, int f) {
volatile int total;
total = a + b + c + d + e + f;
return (total);
}
void delay (void)
{
volatile int counter=0;
}
int main()
{
int counter=0;
int my_output;
counter++;
counter++;
counter++;
counter++;
my_output = func1_six(1,2,3,4,5,6);
counter++;
counter++;
return 0;
}
<file_sep>/*
KIT : B-L475E-IOT01A1
PART : STM32L475VGT6
1. Enable clock
RCC_BASE = 0x40021000
RCC_AHB2ENR_offset = 0x4C
set bit[1] to 1
2. Set GPIOB to Output mode
GPIO_BASE = 0x48000400
GPIOx_MODER = 0x00 //enable port mode as in,out
set bit[29:28] to 0x01 to enable PB14 as output
3. Write to GPIO Data Register to toggle LED
GPIO_BASE = 0x48000400
GPIOx_ODR = 0x14
set bit[14] to 1 -> 0x4000 //turn led on
set bit[14] to 0 -> 0x0 //turn led off
*/
//base address and offset address
//define <replace this> <with this> //pre processing part
#define RCC_BASE (0x40021000)
#define GPIOB_BASE (0x48000400)
#define GPIOA_BASE (0x48000000)
#define RCC_AHB2ENR_OFFSET (0x4C)
#define GPIO_MODER_OFFSET (0x00)
#define GPIO_ODR_OFFSET (0x14)
//register access macros
#define REG_RCC_AHB2ENR (*(unsigned int*)(RCC_BASE + RCC_AHB2ENR_OFFSET))
#define REG_GPIOB_MODE (*(unsigned int*)(GPIOB_BASE + GPIO_MODER_OFFSET))
#define REG_GPIOB_ODR (*(unsigned int*)(GPIOB_BASE + GPIO_ODR_OFFSET))
#define REG_GPIOA_MODE (*(unsigned int*)(GPIOA_BASE + GPIO_MODER_OFFSET))
#define REG_GPIOA_ODR (*(unsigned int*)(GPIOA_BASE + GPIO_ODR_OFFSET))
//application specific macros
#define ORD14 (1 << 14)
#define ORD5 (1 << 5)
#define GPIOB_EN (1 << 1)
#define GPIOA_EN (1 << 0)
#if 1
int main()
{
int counter=1;
REG_RCC_AHB2ENR = GPIOA_EN;
REG_GPIOA_MODE &= 0xFFFFF7FF;
while(1) {
counter=1;
while(counter < 500000) {
counter++;
}
//0000_0000_0001_0000 : ORD5
//1111_1111_1110_1111 : ~ORD5
REG_GPIOA_ODR ^= ORD5;
}
}
#endif
<file_sep>#include "stm32l475xx.h"
#include "delay.h"
#include "delay.c"
//objective: blink LED1 (PA5; GPIO BANKA) using CIPS
int main()
{
//enable clock for GPIO bank A
RCC->AHB2ENR |= RCC_AHB2ENR_GPIOAEN;
//mode:01
GPIOA->MODER |= GPIO_MODER_MODE5_0;
GPIOA->MODER &= ~GPIO_MODER_MODE5_1;
while(1) {
//drive GPIO to blink(on) the LED
GPIOA->ODR |= GPIO_ODR_OD5;
delay(1000000);
//drive GPIO to blink(off) the LED
GPIOA->ODR &= ~GPIO_ODR_OD5;
delay(1000000);
}
return 0;
}
<file_sep>2.a:
case 1: 5 arguments
the calling function (main), passes argument to the called function (func1_five) using:
[1] R0-R3, and
[2] Stack: one of the variable is stored in the stack. The processor, once in the called function,
retrieves the variable from stack using [SP #offset] instruction when it is needed
case 1: 6 arguments
the calling function (main), passes argument to the called function (func1_six) using:
[1] R0-R3, and
[2] Stack: two variables are stored to stack. The processor, once in the called function,
retrieves the variable from stack -- using [SP #offset] instruction -- into one of the general purpose
registers before using it
2.b: The compiler before calling the called function (with more than 4 arguments), generated assembly code
to store the extra variables (variables count that exceeds 4) into the stack.
2.c: The compiler once inside the called function, generated extra code to retreive the extra variables (ones that exceed 4 variable count)
from the stack into the general purpose registers
2.d: The compiler pushed a bunch of general purpose registers into the stack. It happened:
[1] once in the calling(main) function before calling the called function (func1_five),
[2] once in the called function (func1_five) right at the start
The processor probably did this to preserve the value of the variables before doing a jump to called function.
I couldn't understand the why certain registers were pushed into the stack -- on the surface it looked it wouldn't have
made any difference had the processor not pushed those general porpose registers.<file_sep>3.a :
It's a method to encode text. It has the following elements:
- symbol1 = dot (one unit)
- symbol2 = dash (3 x the duration of dot)
- inter symbol space in a character = 1 x the duration of the dot
- inter letter space in a word = 3 x the duration of a dot
- inter word space in a sentence = 7 x the duratoin of a dot
3.b :
<NAME> and later expanded by <NAME>
<file_sep>
//base address
#define BASE_GPIOA (0x48000000)
#define BASE_RCC (0x40021000)
//register offsets
#define OFFSET_RCC_AHB2ENR (0x4C)
#define OFFSET_GPIOx_MODER (0x00)
#define OFFSET_GPIOx_ODR (0x14)
//registers
#define REG_RCC_AHB2ENR (*(unsigned int*)(BASE_RCC+OFFSET_RCC_AHB2ENR))
#define REG_GPIOA_MODER (*(unsigned int*)(BASE_GPIOA+OFFSET_GPIOx_MODER))
#define REG_GPIOA_ODR (*(unsigned int*)(BASE_GPIOA+OFFSET_GPIOx_ODR))
//setbit
#define SET5 (1 << 5);
int main()
{
int counter=0;
//without bitband
REG_RCC_AHB2ENR |= 0x1;
//with bitband
//*((unsigned int*)((0x42000000)+(0x2104C * 32)+(0*4))) = 0x1;
REG_GPIOA_MODER &= 0xFFFFF7FF;
//ODR:XXXX_
// 0000 = 0
// 0000 = 0
// 0010 = 2
// 0000 = 0
//LED1@PA5
while(1) {
counter=0;
while(counter < 100000) {
counter++;
}
REG_GPIOA_ODR ^= SET5;
counter=0;
while(counter < 100000) {
counter++;
}
REG_GPIOA_ODR ^= SET5;
}
}<file_sep>void swap_pointer(int** x_p, int** y_p);
void swap_pointer(int** x_p, int** y_p) {
int* temp;
temp = *x_p;
*x_p = *y_p;
*y_p = temp;
}
int main()
{
int x = 15;
int y = 10;
int* x_p;
int* y_p;
x_p = &x;
y_p = &y;
swap_pointer(&x_p,&y_p);
return 0;
}
<file_sep>#include <stdio.h>
int IsBigEndian(void);
int IsLittleEndian(void);
int IsBigEndian(void) {
char mychar;
int myint;
myint = 0x41424244;
mychar = *((char*)(&(myint)));
if(mychar == 0x41) {
return 1;
}
return 0;
}
int IsLittleEndian(void) {
char mychar;
int myint;
myint = 0x41424244;
mychar = *((char*)(&(myint)));
if(mychar == 0x44) {
return 1;
}
return 0;
}
int main()
{
if(IsBigEndian()){
printf("Big Endian");
}
if(IsLittleEndian()){
printf("Little Endian");
}
return 0;
}
<file_sep>1.a : The compiler produces a store instruction (STR) to the bit band alias region to set a specific bit in the RCC register
1.b : The compiler produced the following instructions:
- LDR to load the contents of the RCC register into a temporary register (say R1)
- ORRS.W to perform a bitwise OR operation for entire 32 bits (.W) and store it in temporary register (say R1)
- STR operation to write the results in R1 to the RCC register (address stored in another temp register; say R0)<file_sep>#include "delay.h"
#include <stdint.h>
extern int counter_g;
void delay (uint32_t mscount) {
counter_g=mscount;
while(counter_g > 0){
}
}<file_sep>#include "stack.h"
#include "defines.h"
int my_array[STACK_SIZE];
int* current_p;
int* overflow_p;
int* bottom_p;
void StackInit(void) {
//set pointers
//<q> this feels like a bad idea: what happens
//if the pointer points to some dangerous section in memory (post
//decrement)? Is it a good idea to even have the pointer point to some
//other part of the memory?
overflow_p = (&my_array[0]-1);
bottom_p = &my_array[STACK_SIZE-1];
current_p = &my_array[STACK_SIZE-1];
//erase array
for(int i=0;i<STACK_SIZE;i++){
my_array[i]=0;
}
}
int StackPush(int data) {
if(current_p == overflow_p) {
return 1;
}
else {
*current_p = data;
current_p--;
return 0;
//<q> should return 0 be placed inside the else
//part, or outside the else part?
}
}
int StackPop(int* result) {
//nothing to pop
if(current_p == bottom_p){
return 1;
}
else {
*result = *++current_p;
return 0;
}
}
int StackIsEmpty(void) {
if(current_p == bottom_p) {
return 1;
}
else {
return 0;
}
}
int StackIsFull(void) {
if(current_p == overflow_p) {
return 1;
}
else {
return 0;
}
}<file_sep>#ifndef DELAY_H
#define DELAY_H
void delay(unsigned int endcount);
#endif
|
2ab0bbf435d07b5104b20646ef082f91ab4659e3
|
[
"Markdown",
"C"
] | 25
|
Markdown
|
adashoka/embsys310
|
cfc34b75e78cdc96813318d8d6a7b0a550760beb
|
4d8c02207ed6013722ab5d7df319708398bfffc0
|
refs/heads/master
|
<file_sep># pyPaillier
Paillier cryptosystem written with pure python.
## Abort paillier cryptosystem
Paillier crypto system is a homomorphic additively encryption, see [Paillier cryptosystem - Wikipedia](https://en.wikipedia.org/wiki/Paillier_cryptosystem)
## Requirement
python 3.x
## Get the code and start demo
`git clone https://github.com/L1v1T/pyPaillier.git`
`cd pyPaillier`
`python ./demo.py`
<file_sep>import random
# modulo #
def mod(a, n):
return int(a%n)
# Fast modular exponentiation #
'''
input: base g , exponent a and modulo p
output: (g**a)mod p
'''
def fast_pow(g, a, p):
e = mod(a, p - 1)
if e == 0:
return 1
import math
r = int(math.log2(e))# + 1 - 1
x = g
for i in range(0, r):
x = mod(x**2, p)
if (e & (1 << (r - 1 - i))) == (1 << (r - 1 - i)):
x = mod(g * x, p)
return int(x)
### test fast_pow ###
#print(fast_pow(5, 12, 23))
# Miller-Rabin primality test #
'''
input: large integer u and large positive integer T
output: if u pass the primality test, return True, else return False
'''
def isPrime_MR(u, T):
# calculate v and w , let u - 1 = w * 2^v
v = 0
w = u - 1
while mod(w, 2) == 0:
v += 1
w = w // 2
for _ in range(1, T + 1):
nextj = False
a = random.randint(2, u - 1)
b = fast_pow(a, w, u)
if b == 1 or b == u - 1:
nextj = True
continue
for _ in range(1, v):
b = mod(b**2, u)
if b == u - 1:
nextj = True
break
if b == 1:
return False
if not nextj:
return False
return True
### test isPrime_MR ###
#print(isPrime_MR(0xBDB6F4FE3E8B1D9E0DA8C0D46F4C318CEFE4AFE3B6B8551F, 10))
#print(isPrime_MR(23, 10))
#print(isPrime_MR(17, 10))
# output a 'bitlen'-bit prime
def randprime(bitlen):
lowbound = (1 << bitlen) + 1
upbound = (1 << (bitlen + 1)) - 1
while(True):
rint = random.randint(lowbound, upbound)
if mod(rint, 2) == 1 and isPrime_MR(rint, 15):
return rint
### test randprime ###
#print(randprime(1000))
# swap
def swap(a, b):
return b, a
# even
def is_even(a):
if (mod(a, 2)) == 0:
return True
else:
return False
### test is_even ###
#print(is_even(335))
# greatest common divisor
# using Stein algorithm
def gcd(a, b):
if a < 0:
a = -a
if b < 0:
b = -b
if b == 0:
if a != 0:
a, b = swap(a, b)
else:
return 0
k = 0
while is_even(a) and is_even(b):
a = a >> 1
b = b >> 1
k += 1
if is_even(b):
a, b = swap(a, b)
while True:
while is_even(a):
a = a >> 1
if a < b:
a, b = swap(a, b)
a = (a - b) >> 1
if a == 0:
d = int(b * (2**k))
return d
### test gcd ###
#print(gcd(1543535,276465))
# least common multiple
def lcm(a, b):
return (a * b) // gcd(a, b)
### test lcm ###
#print(lcm(142353,65134))
# inversion
'''
input: group element a, modulo n, Euler function of modulo--euler_n
output: a^(-1) mod n
'''
def inverse(a, n):
s , old_s = 0, 1
t , old_t = 1, 0
r , old_r = n, a
while r != 0:
q = old_r // r
old_r, r = r, old_r - q * r
old_s, s = s, old_s - q * s
old_t, t = t, old_t - q * t
return mod(old_s, n)
### test inverse ###
# -7*47 + 11*30 = 1
# print(inverse(30,47))<file_sep>from paillier import *
import time
print("Generating key pair...")
n, g, l, m = keyGen(2048)
print("Generated key pair.")
plaintext1 = 1234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234
plaintext2 = 4321432143214321432143214321432143214321432143214321432143214321432143214321432143214321432143214321432143214321432143214321432143214321432143214321432143214321432143214321432143214321
tstart = time.time()
c1 = encrypt(plaintext1, n, g)
c2 = encrypt(plaintext2, n, g)
tend = time.time()
print("c1: " + str(c1))
print("c2: " + str(c2))
print("average time: " + str((tend - tstart) / 2))
c_ = plaintextAdd(c1, c2, n, g)
tstart = time.time()
m1 = decrypt(c1, n, g, l, m)
m2 = decrypt(c2, n, g, l, m)
m_bar = decrypt(c_, n, g, l, m)
tend = time.time()
print("c_: " + str(c_))
print("m1: " + str(m1))
print("m2: " + str(m2))
print("m_bar: " + str(m_bar))
print("average time: " + str((tend - tstart) / 3))<file_sep>import integer
# L function: L = (x - 1) / n #
def funcL(x, n):
return (x - 1) // n
# generate a random element belongs to multiplicative group of integers modulo n #
def sampleGen(n):
g = integer.random.randint(1, n - 1)
while integer.gcd(g, n) != 1:
g = integer.random.randint(1, n - 1)
return g
# key generation #
'''
input: bit length of integer n
output: public key(n, g) and private key (labmda, miu)
'''
def keyGen(sbit):
p = integer.randprime(int(sbit/2))
q = integer.randprime(int(sbit/2))
while integer.gcd(p*q, (p-1)*(q-1)) != 1:
p = integer.randprime(int(sbit/2))
q = integer.randprime(int(sbit/2))
n = p * q
n2 = n * n
lamb = integer.lcm(p - 1, q - 1)
g = sampleGen(n2)
while integer.gcd(funcL(integer.fast_pow(g, lamb, n2), n), n) != 1:
g = sampleGen(n2)
miu = integer.inverse(funcL(integer.fast_pow(g, lamb, n2), n), n)
'''
lamb = (p - 1) * (q - 1)
g = n + 1
miu = inverse(lamb, n, lamb)
'''
return n, g, lamb, miu
### test keyGen ###
'''
n, g, l, m = keyGen(2048)
print("public key:")
print("n = " + str(n))
print("g = " + str(g))
print("private key:")
print("labmda = " + str(l))
print("miu = " + str(m))
'''
# Encryption #
'''
input: plain text message m, public key n, g
output: cipher text c
'''
def encrypt(m, n, g):
if m < 0 or m >= n:
raise Exception("message m must be not less than 0 and less than n")
r = integer.random.randint(1, n - 1)
n2 = int(n**2)
while integer.gcd(r, n2) != 1:
r = integer.random.randint(1, n - 1)
'''
r = randprime(int(math.log2(n)))
while r > n - 1:
r = randprime(int(math.log2(n)))
n2 = n * n
'''
c = integer.mod(integer.fast_pow(g, m, n2) * integer.fast_pow(r, n, n2), n2)
return c
# Decryption #
'''
input: cipher text c, public key n, g, private key labmda, miu
output: recovered plain text m_bar
'''
def decrypt(c, n, g, lamb, miu):
n2 = n * n
if integer.gcd(c, n2) != 1:
print("error")
if c < 1 or c >= n2 or integer.gcd(c, n2) != 1:
raise Exception("cipher c must be in Group Z_*_n^2")
m_bar = integer.mod(funcL(integer.fast_pow(c, lamb, n2), n) * miu, n)
return m_bar
# homomorphic addition #
'''
input: cipher text c1, c2, public key n
output: cipher text of addition result c1 + c2
'''
def plaintextAdd(c1, c2, n, g):
n2 = n * n
c_ = integer.mod(c1 * c2, n2)
return c_
### test paillier cryptsystem ###
'''
n, g, l, m = keyGen(2048)
plaintext1 = 1234
plaintext2 = 4321
c1 = encrypt(plaintext1, n, g)
c2 = encrypt(plaintext2, n, g)
print("c1: " + str(c1))
print("c2: " + str(c2))
c_ = plaintextAdd(c1, c2, n, g)
print("c_: " + str(c_))
m1 = decrypt(c1, n, g, l, m)
print("m1: " + str(m1))
m2 = decrypt(c2, n, g, l, m)
print("m2: " + str(m2))
m_bar = decrypt(c_, n, g, l, m)
print("m_bar: " + str(m_bar))
'''
|
de29831589d8dd2c8af14f0c966cfa6fb747c5cb
|
[
"Markdown",
"Python"
] | 4
|
Markdown
|
organizations-loops/pyPaillier
|
91595a93e73b9958e26add53da6146dc0d4fcf9c
|
5a975b672b13cee0c8e082170b538fa1a5f0ad4f
|
refs/heads/master
|
<file_sep>using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Net;
using System.Net.Sockets;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
using Timer= System.Threading.Timer;
namespace Logger
{
class UdpWorker
{
private Timer _timer;
private UdpClient udpClient;
private IPEndPoint ipDestEndPoint;
public OvenData Oven
{
get{return _oven;}
}
private OvenData _oven;
// This method will be called when the thread is started.
public UdpWorker(OvenData oven)
{
_oven = oven;
var port = oven.UdpPort;
var ip = IPAddress.Parse(oven.UdpIp);
ipDestEndPoint = new IPEndPoint(ip,port);
udpClient = new UdpClient
{
Client =
{
ReceiveTimeout = 300,
Blocking = false
}
};
Random rnd = new Random();
int r = rnd.Next(100, 500); // creates a number between 1 and 12
_timer = new Timer(TimerCallback, null, 0, 2000+r);
}
public void DoWork()
{
while (!_shouldStop)
{
Thread.Sleep(1000);
}
_timer.Dispose();
}
private void Work()
{
byte[] data = {(byte) '/', (byte) ' '};
udpClient.Send(data, 2, ipDestEndPoint);
_oven.FailedCount++;
try
{
//data = udpClient.Receive(ref ipDestEndPoint);
udpClient.BeginReceive(new AsyncCallback(RecieveMessage), null);
}
catch (SocketException te)
{
}
}
public void RequestStop()
{
_shouldStop = true;
}
// Volatile is used as hint to the compiler that this data
// member will be accessed by multiple threads.
private volatile bool _shouldStop;
private void RecieveMessage(IAsyncResult ar)
{
// IPEndPoint remote = new IPEndPoint(IPAddress.Any, 0);
byte[] data = udpClient.EndReceive(ar, ref ipDestEndPoint);
var dataS = Encoding.ASCII.GetString(data);
_oven.LastReadTemperature =Convert.ToDecimal(LoggerGetTemperature(dataS));
_oven.LastReadTime = DateTime.Now;
_oven.FailedCount = 0;
_oven.Status = LoggerGetOvenStatus(dataS);
}
private string LoggerGetTemperature(string data)
{
data = data.Trim();
var s = data.Split(':');
if (s.Length != 3)
{
return "";
}
var t = Convert.ToInt32(s[2]);
decimal y = t;
y = y / 4;
return y.ToString("F");
}
private string LoggerGetOvenStatus(string data)
{
var s = data.Split(':');
string hasil = "";
if (s.Length != 3)
{
return "";
}
var t = Convert.ToInt32(s[0]);
if ((t & 1) == 1)
{
hasil = "RUN";
}
if ((t & 1) == 0)
{
hasil = "FINISH";
}
return hasil;
}
private void TimerCallback(Object o)
{
Work();
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SQLite;
using System.IO;
using Setting.Data;
namespace Setting
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private Data.Data SettingData;
private void Form1_Load(object sender, EventArgs e)
{
if (!IsDataBaseExist(DataBaseName))
{
CreateDataBase(DataBaseName);
}
SettingData = GetSetting(DataBaseName);
DataShowToTb(SettingData);
}
private const string DataBaseName = "Logger.sqlite";
private Data.Data GetSetting(string dbname)
{
Data.Data data=new Data.Data();
var connectionString = "Data Source=" + dbname + ";Version=3;";
const string sql = "select * from Setting";
using (SQLiteConnection c = new SQLiteConnection(connectionString))
{
c.Open();
using (SQLiteCommand cmd = new SQLiteCommand(sql, c))
{
using (SQLiteDataReader reader = cmd.ExecuteReader())
{
if (reader.Read())
{
data.DataBaseName = reader["DbDatabaseName"].ToString();
data.Server = reader["DbServer"].ToString();
data.UserName = reader["DbDatabaseUserName"].ToString();
data.Password = reader["<PASSWORD>"].ToString();
data.Interval = reader["UploadInterval"].ToString();
data.Mode = reader["Mode"].ToString();
}
}
}
}
return data;
}
private bool IsDataBaseExist(string dbname)
{
return File.Exists(dbname);
}
private bool CreateDataBase(string dbname)
{
SQLiteConnection.CreateFile(dbname);
var connectionString = "Data Source=" + dbname + ";Version=3;";
string sql =
@"CREATE TABLE ""Setting"" (""DbServer"" varchar,""DbDatabaseName"" varchar,""DbDatabasePassword"" varchar,""UploadInterval"" INTEGER DEFAULT (null) , ""DbDatabaseUsername"" VARCHAR, ""Mode"" INTEGER DEFAULT 0)";
using (SQLiteConnection c = new SQLiteConnection(connectionString))
{
c.Open();
using (SQLiteCommand command = new SQLiteCommand(sql, c))
{
command.ExecuteNonQuery();
}
}
sql =
@"INSERT INTO Setting (DbServer,DbDatabaseName,DbDatabasePassword,UploadInterval,DbDatabaseUsername,Mode) VALUES (@server,@db,@passw,@interval,@user,@mode)";
using (SQLiteConnection c = new SQLiteConnection(connectionString))
{
c.Open();
using (SQLiteCommand command = new SQLiteCommand(sql, c))
{
command.Parameters.AddWithValue("@server", "127.0.0.1");
command.Parameters.AddWithValue("@db", "MYOVEN");
command.Parameters.AddWithValue("@passw", "<PASSWORD>");
command.Parameters.AddWithValue("@interval", 20);
command.Parameters.AddWithValue("@user", "sa");
command.Parameters.AddWithValue("@mode", "0");
command.ExecuteNonQuery();
}
}
return true;
}
private bool UpdateDatabase(Data.Data data,string dbname)
{
var connectionString = "Data Source=" + dbname + ";Version=3;";
var sql = @"UPDATE Setting SET DbServer= @server, DbDatabaseName=@db, DbDatabasePassword=<PASSWORD>, UploadInterval=@interval,
DbDatabaseUsername=@user, Mode=@mode";
using (SQLiteConnection c = new SQLiteConnection(connectionString))
{
c.Open();
using (SQLiteCommand command = new SQLiteCommand(sql, c))
{
command.Parameters.AddWithValue("@server", data.Server);
command.Parameters.AddWithValue("@db", data.DataBaseName);
command.Parameters.AddWithValue("@passw", data.Password);
command.Parameters.AddWithValue("@interval", data.Interval);
command.Parameters.AddWithValue("@user", data.UserName);
command.Parameters.AddWithValue("@mode", data.Mode);
command.ExecuteNonQuery();
}
}
return true;
}
private bool DataShowToTb(Data.Data data)
{
textDBServer.Text = data.Server;
textDBPassword.Text = data.Password;
textDBName.Text = data.DataBaseName;
textUserName.Text = data.UserName;
textUploadInterval.Text = data.Interval;
textMode.Text = data.Mode;
return true;
}
private bool LoadFromTb(Data.Data data)
{
data.Server = textDBServer.Text;
data.Password = <PASSWORD>;
data.DataBaseName = textDBName.Text;
data.UserName = textUserName.Text;
data.Interval = textUploadInterval.Text;
data.Mode = textMode.Text;
return true;
}
private void button2_Click(object sender, EventArgs e)
{
LoadFromTb(SettingData);
UpdateDatabase(SettingData,DataBaseName);
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Setting.Data
{
class Data
{
public string Server { get; set; }
public string DataBaseName { get; set; }
public string UserName { get; set; }
public string Password{ get; set; }
public string Interval { get; set; }
public string Mode { get; set; }
public Data(string server, string dbname, string username, string password, string interval,string mode )
{
this.Server = server;
this.DataBaseName = dbname;
this.UserName = username;
this.Password = <PASSWORD>;
this.Interval = interval;
this.Mode = mode;
}
public Data()
{
// TODO: Complete member initialization
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Logger
{
public delegate void ChangedEventHandler(object sender, EventArgs e);
class OvenData
{
private int _failedCount;
private string _status;
public string OvenId;
public string UdpIp;
public int UdpPort;
public decimal LastReadTemperature = 0;
public DateTime LastReadTime= DateTime.Now;
public int FailedLimit = 2;
public string Status {
get { return _status; }
set
{
if (Mode != 1)
{
_status = value;
OnStatusChanged(EventArgs.Empty);
}
}
}
public int Mode;
public int FailedCount
{
get { return _failedCount;}
set
{
_failedCount = value;
if ((_failedCount >= FailedLimit) && (Mode == 1))
{
if (_status != "FINISH")
{
_status = "FINISH";
OnStatusChanged(EventArgs.Empty);
}
}
if ((value==0) && (Mode == 1))
{
if (_status != "RUN")
{
_status = "RUN";
OnStatusChanged(EventArgs.Empty);
}
}
}
}
//
public event ChangedEventHandler StatusChanged;
protected virtual void OnStatusChanged(EventArgs e)
{
if (StatusChanged != null)
StatusChanged(this, e);
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace Logger
{
internal static class Program
{
/// <summary>
/// The main entry point for the application.
/// </summary>
[STAThread]
private static void Main()
{
const string appGuid = "23D08852-57F0-432F-9459-A24866BD5BC0";
using (var mutex = new Mutex(false, "Global\\" + appGuid))
{
if (!mutex.WaitOne(0, false))
{
MessageBox.Show("An Application's Instance already running");
return;
}
if (!File.Exists("Logger.sqlite"))
{
MessageBox.Show("Cannot Find file : Logger.sqlite");
return;
}
Application.EnableVisualStyles();
Application.SetCompatibleTextRenderingDefault(false);
Application.Run(new Form1());
}
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Data.SqlClient;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Net.Sockets;
using System.Data.SQLite;
using System.Drawing.Text;
using System.Globalization;
using System.IO;
using System.Linq.Expressions;
using System.Net;
using System.Threading;
using Microsoft.ApplicationBlocks.Data;
namespace Logger
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private List<UdpWorker> _udpWorkers;
private List<Thread> _workerThreads;
private string _dbConnection;
private int _interval;
private int _listWalker=-1;
private bool _readyToSend = true;
private SQLiteConnection SqlLiteConnection;
private List<OvenData> _ovenList = new List<OvenData>();
private UdpClient udpClient;
private IPEndPoint ipDestEndPoint;
public int Mode;
private void InitializeComponent()
{
this.components = new System.ComponentModel.Container();
this.button1 = new System.Windows.Forms.Button();
this.panel1 = new System.Windows.Forms.Panel();
this.richTextBox1 = new System.Windows.Forms.RichTextBox();
this.dataGridView1 = new System.Windows.Forms.DataGridView();
this.OvenId = new System.Windows.Forms.DataGridViewTextBoxColumn();
this.Status = new System.Windows.Forms.DataGridViewTextBoxColumn();
this.LastTemp = new System.Windows.Forms.DataGridViewTextBoxColumn();
this.LastReadTime = new System.Windows.Forms.DataGridViewTextBoxColumn();
this.UploaderTimer = new System.Windows.Forms.Timer(this.components);
this.LoggerTimer = new System.Windows.Forms.Timer(this.components);
this.panel1.SuspendLayout();
((System.ComponentModel.ISupportInitialize)(this.dataGridView1)).BeginInit();
this.SuspendLayout();
//
// button1
//
this.button1.Location = new System.Drawing.Point(592, 162);
this.button1.Name = "button1";
this.button1.Size = new System.Drawing.Size(75, 23);
this.button1.TabIndex = 1;
this.button1.Text = "button1";
this.button1.UseVisualStyleBackColor = true;
this.button1.Click += new System.EventHandler(this.button1_Click);
//
// panel1
//
this.panel1.Controls.Add(this.richTextBox1);
this.panel1.Location = new System.Drawing.Point(0, 427);
this.panel1.Name = "panel1";
this.panel1.Size = new System.Drawing.Size(775, 58);
this.panel1.TabIndex = 2;
//
// richTextBox1
//
this.richTextBox1.BackColor = System.Drawing.SystemColors.ButtonFace;
this.richTextBox1.Dock = System.Windows.Forms.DockStyle.Bottom;
this.richTextBox1.Location = new System.Drawing.Point(0, 0);
this.richTextBox1.Name = "richTextBox1";
this.richTextBox1.Size = new System.Drawing.Size(775, 58);
this.richTextBox1.TabIndex = 1;
this.richTextBox1.Text = "";
//
// dataGridView1
//
this.dataGridView1.AutoSizeColumnsMode = System.Windows.Forms.DataGridViewAutoSizeColumnsMode.Fill;
this.dataGridView1.ColumnHeadersHeightSizeMode = System.Windows.Forms.DataGridViewColumnHeadersHeightSizeMode.AutoSize;
this.dataGridView1.Columns.AddRange(new System.Windows.Forms.DataGridViewColumn[] {
this.OvenId,
this.Status,
this.LastTemp,
this.LastReadTime});
this.dataGridView1.Dock = System.Windows.Forms.DockStyle.Top;
this.dataGridView1.Location = new System.Drawing.Point(0, 0);
this.dataGridView1.Name = "dataGridView1";
this.dataGridView1.Size = new System.Drawing.Size(778, 421);
this.dataGridView1.TabIndex = 3;
//
// OvenId
//
this.OvenId.HeaderText = "Oven Name";
this.OvenId.Name = "OvenId";
//
// Status
//
this.Status.HeaderText = "Status";
this.Status.Name = "Status";
//
// LastTemp
//
this.LastTemp.HeaderText = "Last Read Temperature (C)";
this.LastTemp.Name = "LastTemp";
//
// LastReadTime
//
this.LastReadTime.HeaderText = "Last Read Time";
this.LastReadTime.Name = "LastReadTime";
//
// UploaderTimer
//
this.UploaderTimer.Interval = 10000;
this.UploaderTimer.Tick += new System.EventHandler(this.Uploader_Tick);
//
// LoggerTimer
//
this.LoggerTimer.Interval = 1000;
this.LoggerTimer.Tick += new System.EventHandler(this.LoggerTimer_Tick);
//
// Form1
//
this.ClientSize = new System.Drawing.Size(778, 490);
this.Controls.Add(this.dataGridView1);
this.Controls.Add(this.panel1);
this.Controls.Add(this.button1);
this.Name = "Form1";
this.ShowInTaskbar = false;
this.FormClosing += new System.Windows.Forms.FormClosingEventHandler(this.Form1_FormClosing);
this.Load += new System.EventHandler(this.Form1_Load);
this.panel1.ResumeLayout(false);
((System.ComponentModel.ISupportInitialize)(this.dataGridView1)).EndInit();
this.ResumeLayout(false);
}
private void Form1_Load(object sender, EventArgs e)
{
//
//
_dbConnection = GetDatabaseConnection();
Mode = GetMode();
_ovenList = GetOvenDatas(_dbConnection);
OvenListToGridView(_ovenList);
_interval = GetInterval();
UploaderTimer.Interval = _interval;
_listWalker = 0;
StartWorkers();
LoggerTimer.Enabled = true;
UploaderTimer.Enabled = true;
}
private void StartWorkers()
{
_udpWorkers = new List<UdpWorker>();
foreach (OvenData od in _ovenList)
{
_udpWorkers.Add(new UdpWorker(od));
}
_workerThreads = new List<Thread>();
foreach (UdpWorker udp in _udpWorkers)
{
_workerThreads.Add(new Thread(udp.DoWork));
}
foreach (Thread th in _workerThreads)
{
th.Start();
}
}
private void ShowMessage(string s)
{
richTextBox1.Text = s;
}
private void InsertMessage(string s)
{
richTextBox1.AppendText(s+"\n");
}
private int GetOvenIndexByIp(string ip)
{
for (int i = 0; i < _ovenList.Count; i++)
{
if (_ovenList[i].UdpIp == ip)
{
return i;
}
}
return -1;
}
private string GetDatabaseConnection()
{
SqlLiteConnection = new SQLiteConnection("Data Source=Logger.sqlite;Version=3;");
SqlLiteConnection.Open();
string sql = "select * from Setting";
SQLiteCommand command = new SQLiteCommand(sql, SqlLiteConnection);
SQLiteDataReader reader = command.ExecuteReader();
reader.Read();
var temp = "Data Source=" + reader["DbServer"] + ";Initial Catalog=" + reader["DbDatabaseName"] +
";Persist Security Info=True;" +
"User ID=" + reader["DbDatabaseUsername"] + ";Password=" + reader["DbDatabasePassword"] +
";MultipleActiveResultSets=True;";
_interval = Convert.ToInt32(reader["UploadInterval"].ToString()) * 1000; //numpang
SqlLiteConnection.Close();
return temp;
}
private int GetInterval()
{
SqlLiteConnection = new SQLiteConnection("Data Source=Logger.sqlite;Version=3;");
SqlLiteConnection.Open();
const string sql = "select * from Setting";
SQLiteCommand command = new SQLiteCommand(sql, SqlLiteConnection);
SQLiteDataReader reader = command.ExecuteReader();
reader.Read();
var temp = Convert.ToInt32(reader["UploadInterval"].ToString()) * 1000; //numpang
SqlLiteConnection.Close();
return temp;
}
private int GetMode()
{
SqlLiteConnection = new SQLiteConnection("Data Source=Logger.sqlite;Version=3;");
SqlLiteConnection.Open();
const string sql = "select * from Setting";
SQLiteCommand command = new SQLiteCommand(sql, SqlLiteConnection);
SQLiteDataReader reader = command.ExecuteReader();
reader.Read();
var temp = Convert.ToInt32(reader["Mode"].ToString()); //numpang
SqlLiteConnection.Close();
return temp;
}
private List<OvenData> GetOvenDatas(string dbConnection)
{
List<OvenData> __list = new List<OvenData>();
var ds = SqlHelper.ExecuteDataset(_dbConnection, CommandType.StoredProcedure, "usp_gpro_SelectOven");
foreach (DataRow dr in ds.Tables[0].Rows)
{
var j = new OvenData
{ UdpIp = dr["AlarmIPAddr"].ToString(),
OvenId = dr["OvenID"].ToString(),
UdpPort = Convert.ToInt32(dr["AlarmUDPPort"].ToString()),
Mode = this.Mode
};
j.StatusChanged += new ChangedEventHandler(OnStatusChanged);
__list.Add(j);
}
return __list;
}
private void OnStatusChanged(object sender, EventArgs e)
{
OvenData j = (OvenData) sender;
var i = GetOvenIndexByIp(j.UdpIp);
UpdateGvStatus(i, _ovenList[i].Status);
}
private void OvenDataToTableRow(OvenData data)
{
var row = new DataGridViewRow();
row.Cells.Add(new DataGridViewTextBoxCell{Value=data.OvenId});
dataGridView1.Rows.Add(row);
}
private void OvenListToGridView(List<OvenData> list)
{
foreach (OvenData listRow in list)
{
OvenDataToTableRow(listRow);
}
}
private void UpdateGv(int row, int column, string value)
{
dataGridView1.Rows[row].Cells[column].Value = value;
}
private void UpdateGvStatus(int row,string value)
{
UpdateGv(row,1,value);
}
private void UpdateGvLastReadTemp(int row,string value)
{
UpdateGv(row, 2, value);
}
private void UpdateGvlastReadTime(int row,string value)
{
UpdateGv(row, 3, value);
}
private void button1_Click(object sender, EventArgs e)
{
ShowMessage(_dbConnection);
}
private void Uploader_Tick(object sender, EventArgs e)
{
UploaderTimer.Enabled = false;
UploadData(OvenListDataToString(_ovenList));
UploaderTimer.Enabled = true;
}
private void WriteDataToGridView()
{
foreach (OvenData od in _ovenList)
{
var j = GetOvenIndexByIp(od.UdpIp);
UpdateGvlastReadTime(j, od.LastReadTime.ToString("R"));
UpdateGvLastReadTemp(j, od.LastReadTemperature.ToString("F"));
}
}
private void LoggerTimer_Tick(object sender, EventArgs e)
{
LoggerTimer.Enabled = false;
WriteDataToGridView();
LoggerTimer.Enabled = true;
}
private void Form1_FormClosing(object sender, FormClosingEventArgs e)
{
try
{
foreach (UdpWorker uw in _udpWorkers)
{
uw.RequestStop();
}
foreach (Thread th in _workerThreads)
{
th.Join();
}
}
catch (Exception ex)
{
}
}
private String OvenListDataToString(List<OvenData> list)
{
StringBuilder sb = new StringBuilder();
foreach (OvenData od in list)
{
sb.Append(od.OvenId+"|"+od.Status+"|"+od.LastReadTemperature+"|"+od.LastReadTime.ToString("s")+"|]");
}
var sbs = sb.ToString();
return sbs;
}
private void UploadData(String data)
{
var par = new SqlParameter("@data", data);
var it = SqlHelper.ExecuteNonQuery(_dbConnection, CommandType.StoredProcedure, "usp_gpro_OvenLoggerUploader", par);
}
}
}
|
212522de45b6561a21204c098cadab7f5e4977bb
|
[
"C#"
] | 6
|
C#
|
patoeng/OvenLogger1
|
3169cde2f1ca24362b44f5ac75abc06696337478
|
aa35114a7094ebd427f424b0eedb3613ce67338b
|
refs/heads/master
|
<repo_name>xadrnd/carbon<file_sep>/lib/carbon/aggregator/receiver.py
from carbon.instrumentation import increment
from carbon.aggregator.rules import RuleManager
from carbon.aggregator.buffers import BufferManager
from carbon.rewrite import RewriteRuleManager
from carbon import events, log
from carbon.conf import settings
def process(metric, datapoint):
increment('datapointsReceived')
for rule in RewriteRuleManager.preRules:
metric = rule.apply(metric)
aggregate_metrics = []
if settings.AGGREGATOR_RULE_METHOD == "rules":
for rule in RuleManager.rules:
aggregate_metric = rule.get_aggregate_metric(metric)
if aggregate_metric is None:
continue
else:
aggregate_metrics.append(aggregate_metric)
buffer = BufferManager.get_buffer(aggregate_metric)
if not buffer.configured:
buffer.configure_aggregation(rule.frequency, rule.aggregation_func)
buffer.input(datapoint)
# Custom rule to sum metrics
elif settings.AGGREGATOR_RULE_METHOD == "sumall":
sum_index = metric.find(".sum.")
if sum_index != -1:
aggregate_metric = metric[:sum_index] + ".sum_all.hosts"
aggregate_metrics.append(aggregate_metric)
buffer = BufferManager.get_buffer(aggregate_metric)
if not buffer.configured:
buffer.configure_aggregation(60, sum)
buffer.input(datapoint)
for rule in RewriteRuleManager.postRules:
metric = rule.apply(metric)
if metric not in aggregate_metrics:
#log.msg("Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric)
events.metricGenerated(metric, datapoint)
|
1c1b4d4968c02eb5585d507985dcc5ef93ef178e
|
[
"Python"
] | 1
|
Python
|
xadrnd/carbon
|
3b19f40bfe80d88b0309ca4909488ce751262d97
|
8e0715b322b6abe6945580bc5fd7d41b0ba55ac2
|
refs/heads/master
|
<file_sep>list.of.packages <- c("data.table","RMySQL")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages, repos = "http://cran.rstudio.com/")
library(data.table)
library(RMySQL)
#Margot
setwd("D:/data-warehouse/")
#Thu
#setwd("C:/Users/<NAME>/Dropbox/DM/BD/Projet")
####
data <- fread("clean_data.csv", sep=";", stringsAsFactors=T)
#### Connection to Database ####
# install.packages("RMySQL")
con <- dbConnect(MySQL(), user="root", password="",dbname="gas-emissions", host="localhost")
#rs <- dbSendQuery(con, "select * from h_year")
#### Insert values in Database ####
# Insert countries
codes <- levels(data$Country_code)
names <- rep("",length(codes))
levels <- levels(data$Country)
for(i in 1:length(codes)){
temp <- subset(data, Country_code == codes[i])
names[i] <- levels[temp$Country[1]]
}
for(j in 1:length(codes)){
sql <-sprintf( "insert into `h_country` (`id_country`, `name`) values ('%s', '%s');", codes[j], names[j])
rs <- dbSendQuery(con, sql)
}
# Insert sectors
codes <- levels(data$Sector_code)
names <- rep("",length(codes))
parents <- rep("", length(codes))
levelsNames <- levels(data$Sector_name)
levelsParents <- levels(data$Parent_sector_code)
for(i in 1:length(codes)){
temp <- subset(data, Sector_code == codes[i])
names[i] <- levelsNames[temp$Sector_name[1]]
parents[i] <- levelsParents[temp$Parent_sector_code[1]]
}
for(j in 1:length(codes)){
sql <- sprintf( "insert into `h_sector` (`id_sector`, `name`) values ('%s', '%s');", codes[j], names[j])
rs <- dbSendQuery(con, sql)
}
for(j in 1:length(codes)){
if(parents[j] != ""){
sql <- sprintf( "update `gas-emissions`.`h_sector` SET `id_ancestor`='%s' where `id_sector`='%s';",parents[j], codes[j])
rs <- dbSendQuery(con, sql)
}
}
# Insert relation between 2 sector
sectors <- levels(data$Sector_code)
relation_sectors <- data.frame(id_sector1=character(), id_sector2=character(), distance = integer(), stringsAsFactors=F)
line = 1
for (i in 1:length(sectors)){
s1 = sectors[i]
print(s1)
split_s1 <- unlist(strsplit(s1, "[.]"))
for (j in i:length(sectors)){
s2 = sectors[j]
split_s2 <- unlist(strsplit(s2, "[.]"))
if (length(split_s1) <= length(split_s2)){
same_branch = T
for (k in 1: length(split_s1)){
same_branch = same_branch & (split_s1[k] == split_s2[k])
}
if (same_branch){
relation_sectors[line,] <- c(s1, s2, length(split_s2) - length(split_s1))
line = line + 1
}
}
}
}
for(i in 1:nrow(relation_sectors)){
sql <- sprintf( "insert into `sector_relation` (`id_ancestor`, `id_sector`, `distance`) values ('%s', '%s', '%s');", relation_sectors$id_sector1[i], relation_sectors$id_sector2[i], relation_sectors$distance[i])
rs <- dbSendQuery(con, sql)
}
library(parallel)
cl <- makeCluster(4)
objectData <- data
clusterExport(cl, list("dbSendQuery","sprintf","relation_sectors","objectData","con"))
res <- parLapply(cl,1:nrow(objectData), fun = function(k) {
idxes <- which(relation_sectors$id_sector1 == data$Sector_code[k])
is_ancestor = FALSE
for(i in 1:length(idxes)){
if(relation_sectors$distance[i]!=0){
is_ancestor = TRUE
}
}
if(is_ancestor){
sql <- sprintf( "insert into `fact_emission` (`quantity`, `id_sector`, `id_country`, `id_gas`, `id_year` ) values ('%f', '%s', '%s', '%s','%d');", 0, data$Sector_code[k], data$Country_code[k], substr(data$Pollutant_name[k],1,3), data$Year[k])
rs <- dbSendQuery(con, sql)
}
else{
sql <- sprintf( "insert into `fact_emission` (`quantity`, `id_sector`, `id_country`, `id_gas`, `id_year` ) values ('%f', '%s', '%s', '%s','%d');", data$emissions[k], data$Sector_code[k], data$Country_code[k], substr(data$Pollutant_name[k],1,3), data$Year[k])
rs <- dbSendQuery(con, sql)
}
})
stopCluster(cl)
# Insert facts
for(k in 1:nrow(data)){
idxes <- which(relation_sectors$id_sector1 == data$Sector_code[k])
is_ancestor = FALSE
for(i in 1:length(idxes)){
if(relation_sectors$distance[i]!=0){
is_ancestor = TRUE
}
}
if(is_ancestor){
sql <- sprintf( "insert into `fact_emission` (`quantity`, `id_sector`, `id_country`, `id_gas`, `id_year` ) values ('%f', '%s', '%s', '%s','%d');", 0, data$Sector_code[k], data$Country_code[k], substr(data$Pollutant_name[k],1,3), data$Year[k])
rs <- dbSendQuery(con, sql)
}
else{
sql <- sprintf( "insert into `fact_emission` (`quantity`, `id_sector`, `id_country`, `id_gas`, `id_year` ) values ('%f', '%s', '%s', '%s','%d');", data$emissions[k], data$Sector_code[k], data$Country_code[k], substr(data$Pollutant_name[k],1,3), data$Year[k])
rs <- dbSendQuery(con, sql)
}
}<file_sep>CREATE SCHEMA `gas-emissions` ;
CREATE TABLE `gas-emissions`.`h_year` (
`id_year` INT NOT NULL,
PRIMARY KEY (`id_year`))
COMMENT = 'years';
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1990');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1991');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1992');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1993');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1994');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1995');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1996');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1997');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1998');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('1999');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2000');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2001');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2002');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2003');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2004');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2005');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2006');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2007');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2008');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2009');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2010');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2011');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2012');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2013');
INSERT INTO `gas-emissions`.`h_year` (`id_year`) VALUES ('2014');
CREATE TABLE `gas-emissions`.`h_gas` (
`id_gas` VARCHAR(3) NOT NULL,
`name` VARCHAR(100) NOT NULL,
PRIMARY KEY (`id_gas`));
INSERT INTO `gas-emissions`.`h_gas` (`id_gas`, `name`) VALUES ('CH4', 'CH4');
INSERT INTO `gas-emissions`.`h_gas` (`id_gas`, `name`) VALUES ('CO2', 'CO2');
INSERT INTO `gas-emissions`.`h_gas` (`id_gas`, `name`) VALUES ('HFC', 'HFCs - (CO2 equivalent)');
INSERT INTO `gas-emissions`.`h_gas` (`id_gas`, `name`) VALUES ('N2O', 'N2O');
INSERT INTO `gas-emissions`.`h_gas` (`id_gas`, `name`) VALUES ('NF3', 'NF3 - (CO2 equivalent)');
INSERT INTO `gas-emissions`.`h_gas` (`id_gas`, `name`) VALUES ('PFC', 'PFCs - (CO2 equivalent)');
INSERT INTO `gas-emissions`.`h_gas` (`id_gas`, `name`) VALUES ('SF6', 'SF6 - (CO2 equivalent)');
INSERT INTO `gas-emissions`.`h_gas` (`id_gas`, `name`) VALUES ('Uns', 'Unspecified mix of HFCs and PFCs - (CO2 equivalent)');
CREATE TABLE `gas-emissions`.`h_country` (
`id_country` VARCHAR(4) NOT NULL,
`name` VARCHAR(50) NOT NULL,
PRIMARY KEY (`id_country`));
CREATE TABLE `gas-emissions`.`h_sector` (
`id_sector` VARCHAR(20) NOT NULL,
`name` VARCHAR(350) NOT NULL,
PRIMARY KEY (`id_sector`));
ALTER TABLE `gas-emissions`.`h_sector`
ADD COLUMN `id_ancestor` VARCHAR(20) NULL AFTER `name`;
/*Before, Execute R Which inserts al h_sectors*/
CREATE TABLE `gas-emissions`.`fact_emission` (
`id_emission` INT NOT NULL AUTO_INCREMENT,
`quantity` DOUBLE NOT NULL,
`id_sector` VARCHAR(20) NOT NULL,
`id_country` VARCHAR(4) NOT NULL,
`id_gas` VARCHAR(3) NOT NULL,
`id_year` INT NOT NULL,
PRIMARY KEY (`id_emission`),
INDEX `id_sector_idx` (`id_sector` ASC),
INDEX `id_country_idx` (`id_country` ASC),
INDEX `id_gas_idx` (`id_gas` ASC),
INDEX `id_year_idx` (`id_year` ASC),
CONSTRAINT `id_sector`
FOREIGN KEY (`id_sector`)
REFERENCES `gas-emissions`.`h_sector` (`id_sector`)
ON DELETE CASCADE
ON UPDATE NO ACTION,
CONSTRAINT `id_country`
FOREIGN KEY (`id_country`)
REFERENCES `gas-emissions`.`h_country` (`id_country`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `id_gas`
FOREIGN KEY (`id_gas`)
REFERENCES `gas-emissions`.`h_gas` (`id_gas`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `id_year`
FOREIGN KEY (`id_year`)
REFERENCES `gas-emissions`.`h_year` (`id_year`)
ON DELETE NO ACTION
ON UPDATE NO ACTION);
CREATE TABLE `gas-emissions`.`sector_relation` (
`id_sector` VARCHAR(20) NULL,
`id_ancestor` VARCHAR(20) NULL,
`distance` INT NULL);
CREATE UNIQUE INDEX sector_closure_pk ON sector_relation (
id_sector,
id_ancestor);
CREATE INDEX sector_closure_emp ON sector_relation (
id_sector);
UPDATE `gas-emissions`.`h_sector` SET `name`='Household sector' WHERE `id_sector`='1.A.4';
UPDATE `gas-emissions`.`h_sector` SET `name`='Other Sectors' WHERE `id_sector`='1.A.5';
DELETE FROM `gas-emissions`.`h_country` WHERE `id_country`='EUA';
DELETE FROM `gas-emissions`.`h_country` WHERE `id_country`='EUC';
UPDATE `gas-emissions`.`h_country` SET `name`='United Kingdom' WHERE `id_country`='UK';
ALTER TABLE `gas-emissions`.`h_country`
ADD COLUMN `nb_inhabitants` INT NULL AFTER `name`;
/*insert nb_inhabitants in counries*/
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='8700471' WHERE `id_country`='AT';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='11289853' WHERE `id_country`='BE';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='7153784' WHERE `id_country`='BG';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='8325194' WHERE `id_country`='CH';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='848319' WHERE `id_country`='CY';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='10553843' WHERE `id_country`='CZ';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='82162000' WHERE `id_country`='DE';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='5707251' WHERE `id_country`='DK';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='1315944' WHERE `id_country`='EE';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='46438422' WHERE `id_country`='ES';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='5487308' WHERE `id_country`='FI';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='66661621' WHERE `id_country`='FR';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='10793526' WHERE `id_country`='GR';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`=' 4190669' WHERE `id_country`='HR';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='9830485' WHERE `id_country`='HU';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='4658530' WHERE `id_country`='IE';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='332529' WHERE `id_country`='IS';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='60665551' WHERE `id_country`='IT';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='37622' WHERE `id_country`='LI';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='2888558' WHERE `id_country`='LT';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='576249' WHERE `id_country`='LU';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='1968957' WHERE `id_country`='LV';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='434403' WHERE `id_country`='MT';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='16979120' WHERE `id_country`='NL';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='5213985' WHERE `id_country`='NO';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='37967209' WHERE `id_country`='PL';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='10341330' WHERE `id_country`='PT';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='19759968' WHERE `id_country`='RO';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='9851017' WHERE `id_country`='SE';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='2064188' WHERE `id_country`='SI';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='5426252' WHERE `id_country`='SK';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='78741053' WHERE `id_country`='TR';
UPDATE `gas-emissions`.`h_country` SET `nb_inhabitants`='65341183' WHERE `id_country`='UK';
<file_sep># Data ware house #
# <NAME> et <NAME> #
list.of.packages <- c("data.table")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages, repos = "http://cran.rstudio.com/")
library(data.table)
# http://data.europa.eu/euodp/fr/data/dataset/data_national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-12
#Margot
#setwd("D:/data-warehouse/")
#Thu
setwd("C:/Users/<NAME>/Dropbox/DM/BD/Projet")
#### Loading data ####
data <- fread("UNFCCC_V19.csv", sep="\t", stringsAsFactors=T)
#### Deleting coloumns we are not interested in : Unit and Format_name ####
data <- data[,c(4, 10, 11):=NULL]
colnames(data)
#### Delete years before 1990 ####
data <- subset(data, Year != '1985-1987')
data$Year <- as.numeric(levels(data$Year))[data$Year]
data <- subset(data, Year >= 1990)
#### Remove total + all green house gases + EU####
data <- subset(data, Pollutant_name != "All greenhouse gases - (CO2 equivalent)")
data$Pollutant_name <- factor(data$Pollutant_name)
levels(data$Parent_sector_code)[levels(data$Parent_sector_code)=="Sectors/Totals_incl_incl"] <- ""
data <- subset(data, !grepl("Sector", Sector_code))
data$Sector_code <- factor(data$Sector_code)
data$Sector_name <- factor(data$Sector_name)
data <- subset(data, !grepl("EU", Country))
levels(data$Country)[levels(data$Country)=="United Kingdom (Convention)"] <- "United Kingdom"
data$Country <- factor(data$Country)
data$Country_code <- factor(data$Country_code)
#### Reorganize Sector_code ####
## Remove 3.1
levels(data$Parent_sector_code)[levels(data$Parent_sector_code)=="3.1"] <- "3"
data <- subset(data, Sector_code != "3.1")
data$Sector_code <- factor(data$Sector_code)
## Replace 1.AA by 1.A
levels(data$Sector_code)[levels(data$Sector_code)=="1.AA"] <- "1.A"
levels(data$Parent_sector_code)[levels(data$Parent_sector_code)=="1.AA"] <- "1.A"
## Replace the bad Sector_code of sector 4
levels(data$Sector_code)[levels(data$Sector_code)=="4.A Emissions/Removal"] <- "4.A.3"
levels(data$Sector_code)[levels(data$Sector_code)=="4.B Emissions/Removal"] <- "4.B.3"
levels(data$Sector_code)[levels(data$Sector_code)=="4.C Emissions/Removal"] <- "4.C.3"
levels(data$Sector_code)[levels(data$Sector_code)=="4.D Emissions/Removal"] <- "4.D.3"
levels(data$Sector_code)[levels(data$Sector_code)=="4.E Biomass Burning"] <- "4.E.3"
levels(data$Sector_code)[levels(data$Sector_code)=="-"] <- "4.F.1"
data$Parent_sector_code[which(data$Sector_code=="4.F.1")] <- "4.F"
#### Remove sector code from Sector_name ####
levels(data$Sector_name)[levels(data$Sector_name)=="- 4(IV) Indirect N2O Emissions from Managed Soils"] <- "Indirect N2O Emissions from N Mineralization/Immobilization"
sector_names <- levels(data$Sector_name)
for (i in 1:length(sector_names)){
sector <- sector_names[i]
name = unlist(strsplit(sector, split="-"))
if (length(name) == 2){
levels(data$Sector_name)[levels(data$Sector_name)==sector] <- trimws(name[2])
}
if (length(name) < 2){
if (grepl("4\\(", sector))
levels(data$Sector_name)[levels(data$Sector_name)==sector] <- trimws(unlist(strsplit(sector, split="\\)"))[2])
}
if (length(name) > 2){
new_name <- paste(trimws(name[2]), '-', trimws(name[3]), sep='')
levels(data$Sector_name)[levels(data$Sector_name)==sector] <- new_name
}
}
#### Creat new big sectors about international activities, bioamss and indirect CO2 ####
levels(data$Sector_code)[levels(data$Sector_code)=="ind_CO2"] <- "7"
levels(data$Parent_sector_code)[levels(data$Parent_sector_code)=="1.D"] <- ""
levels(data$Sector_code)[levels(data$Sector_code)=="1.D.1"] <- "8"
levels(data$Sector_code)[levels(data$Sector_code)=="1.D.1.a"] <- "8.A"
levels(data$Sector_code)[levels(data$Sector_code)=="1.D.1.b"] <- "8.B"
levels(data$Parent_sector_code)[levels(data$Parent_sector_code)=="1.D.1"] <- "8"
levels(data$Sector_code)[levels(data$Sector_code)=="1.D.2"] <- "9"
levels(data$Sector_code)[levels(data$Sector_code)=="1.D.3"] <- "10"
#### Add sector 5.F ####
for(i in which(data$Sector_code == "5.F.1")){
temp <- subset(data, Country == data$Country[i] & Pollutant_name == data$Pollutant_name[i] & Year == data$Year[i] & Parent_sector_code == "5.F")
new_row <- data[i,]
new_row$Parent_sector_code <- "5"
new_row$Sector_code <- "5.F"
new_row$Sector_name <- "Long-term C Storage"
new_row$emissions <- sum(temp$emissions)
data <- rbind(data, new_row)
}
#### Recompute the sum sector 4, 5 and 1 ####
for(i in which(data$Sector_code == "4.F")){
temp <- subset(data, Country == data$Country[i] & Pollutant_name == data$Pollutant_name[i] & Year == data$Year[i] & Parent_sector_code == "4.F")
data$emissions[i] <- sum(temp$emissions)
}
for(i in which(data$Sector_code == "4")){
temp <- subset(data, Country == data$Country[i] & Pollutant_name == data$Pollutant_name[i] & Year == data$Year[i] & Parent_sector_code == "4")
data$emissions[i] <- sum(temp$emissions)
}
for(i in which(data$Sector_code == "1")){
temp <- subset(data, Country == data$Country[i] & Pollutant_name == data$Pollutant_name[i] & Year == data$Year[i] & Parent_sector_code == "1")
data$emissions[i] <- sum(temp$emissions)
}
for(i in which(data$Sector_code == "5")){
temp <- subset(data, Country == data$Country[i] & Pollutant_name == data$Pollutant_name[i] & Year == data$Year[i] & Parent_sector_code == "5")
data$emissions[i] <- sum(temp$emissions)
}
#### Save cleaned data ####
write.table(data, "clean_data.csv", row.names = F, append = F, sep = ";")
<file_sep># Data ware house #
# <NAME> et <NAME> #
# Just inspecting the data, changing nothing #
list.of.packages <- c("data.table")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages, repos = "http://cran.rstudio.com/")
library(data.table)
# http://data.europa.eu/euodp/fr/data/dataset/data_national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-12
#Margot
#setwd("D:/data-warehouse/")
#Thu
setwd("C:/Users/<NAME>/Dropbox/DM/BD/Projet")
#### Loading data ####
data <- fread("UNFCCC_V19.csv", sep="\t", stringsAsFactors=T)
#### Format_name ####
levels(data$Format_name)
# Only one levels -> can remove this factor
#### Unit ####
levels(data$Unit)
#[1] "Gg" "Gg CO2 equivalent"
# -> can remove this factor
#### Sector_name ####
weird_sectors <- data[grepl("4\\(", data$Sector_name),] #extract the lines with weird sector name
sum(weird_sectors$emissions) # not 0 -> cannot remove
not_zero <- weird_sectors[which(weird_sectors$emissions!=0),]
not_zero$Sector_name <- factor(not_zero$Sector_name) # drop levels that don't exist anymore
not_zero$Pollutant_name <- factor(not_zero$Pollutant_name)
levels(not_zero$Sector_name) # All weird sectors have non-zero values
levels(not_zero$Pollutant_name)
# number of lines != 0 with weird sector name is ~5000 ~ 1% of the data -> can be considered negligeable
# We can remove all the weird sectors but will have to recalculate the sums in the parent sectors !!!!<file_sep>File for multidimensional schema :
https://docs.google.com/drawings/d/1uvjTJThoZ_8RC91hSBfMpBi7LciW2G3-VdcedJ9pQM4/edit?usp=sharing
File for Relationnal Database schema :
https://docs.google.com/drawings/d/1V_FbutdW2vdMZQEGEJpqVN4kU0SzYC3bkdzFeEOeZys/edit?usp=sharing
Rapport:
https://www.overleaf.com/6954144kncswfdnjhfz#/23795848/
Architecture Technique :
https://docs.google.com/drawings/d/1HLDSqUifJeRfWEgomuAoGrcSP8UZ5D_mMWj1AWFhfec/edit?usp=sharing
|
e5e69e76df677b892e9a61746f5e8697041f2cff
|
[
"Markdown",
"SQL",
"R"
] | 5
|
R
|
mversionpenny/gas-emissions-data-warehouse
|
bfe89b394328b74aaf3980345af95aee961815ab
|
66350969c5080c13ebee66177111745959ad3b7f
|
refs/heads/master
|
<repo_name>Yiroha/Leetcode_History<file_sep>/src/Tree/PathSum_437.java
package Tree;
public class PathSum_437 {
public class TreeNode {
int val;
TreeNode left;
TreeNode right;
TreeNode(int x) { val = x; }
}
/*437. 路径总和 III*/
/*
递归解决;
两类情况,以当前节点重新开始计算,继承上一个节点的目标值进行累计;
*/
public int pathSum(TreeNode root, int sum) {
if(root == null){
return 0;
}
return fun(root,sum) + pathSum(root.left,sum) + pathSum(root.right,sum);
}
public int fun(TreeNode root, int sum){
if(root == null){
return 0;
}
int res = 0;
if(sum == root.val){
res++;
}
return res + fun(root.left,sum - root.val) + fun(root.right,sum - root.val);
}
}
<file_sep>/src/BinarySearch/FindDuplicate_287.java
package BinarySearch;
public class FindDuplicate_287 {
/*287. 寻找重复数*/
/*
对数字进行二分,而不是对索引进行二分;
时间复杂度(NlogN)
*/
public int findDuplicate(int[] nums) {
int len = nums.length;
int left = 1, right = len - 1, mid, count;
while(left < right){
mid = (right - left) / 2 + left;
count = 0;
for(int n : nums){
if(n <= mid){
count++;
}
}
if(count <= mid){
left = mid + 1;
}else{
right = mid;
}
}
return left;
}
}
<file_sep>/src/Math/ComputeArea_223.java
package Math;
public class ComputeArea_223 {
/*223. 矩形面积*/
/*
分情况讨论,分别计算长和宽重叠的4种情况;
*/
public int computeArea(int A, int B, int C, int D, int E, int F, int G, int H) {
int s1 = (C - A) * (D - B);
int s2 = (G - E) * (H - F);
int l = 0, w = 0;
if(C >= G && A >= E && G >= A){
w = G - A;
}
if(C >= E && E >= A && G >= C){
w = C - E;
}
if(G > C && A > E){
w = C - A;
}
if(C > G && E > A){
w = G - E;
}
if(H >= D && D >= F && F >= B){
l = D - F;
}
if(D >= H && H >= B && B >= F){
l = H - B;
}
if(H > D && B > F){
l = D - B;
}
if(D > H && F > B){
l = H -F;
}
return s1 + s2 - l * w;
}
}
<file_sep>/src/Tree/TrieTree_208.java
package Tree;
public class TrieTree_208 {
/*208. 实现 Trie (前缀树)*/
/*
前缀树的节点底层是一个节点数组包含当前节点所有子节点的信息;
以及一个Boolean类型的变量,用于表示当前节点是否为一个单词的末尾;
*/
public class TrieNode{
public TrieNode[] childNodes;
public static final int R = 26;
public boolean flag = false;
public TrieNode(){
childNodes = new TrieNode[R];
}
public boolean contains(char c){
return childNodes[c - 'a'] != null;
}
public void put(char c){
childNodes[c - 'a'] = new TrieNode();
}
public TrieNode get(char c){
return childNodes[c - 'a'];
}
public void setF(){
flag = true;
}
public boolean getF(){
return flag;
}
}
public TrieNode root;
/** Initialize your data structure here. */
public TrieTree_208() {
root = new TrieNode();
}
/** Inserts a word into the trie. */
public void insert(String word) {
TrieNode cur = root;
for(int i = 0; i < word.length(); i++){
char c = word.charAt(i);
if(!cur.contains(c)){
cur.put(c);
}
cur = cur.get(c);
}
cur.setF();
}
/** Returns if the word is in the trie. */
public boolean search(String word) {
TrieNode cur = root;
for(int i = 0; i < word.length(); i++){
char c = word.charAt(i);
if(!cur.contains(c)){
return false;
}
cur = cur.get(c);
}
return cur.getF();
}
/** Returns if there is any word in the trie that starts with the given prefix. */
public boolean startsWith(String prefix) {
TrieNode cur = root;
for(int i = 0; i < prefix.length(); i++){
char c = prefix.charAt(i);
if(!cur.contains(c)){
return false;
}
cur = cur.get(c);
}
return true;
}
}
<file_sep>/src/Tree/InvertTree_226.java
package Tree;
public class InvertTree_226 {
public class TreeNode {
int val;
TreeNode left;
TreeNode right;
TreeNode(int x) { val = x; }
}
/*226. 翻转二叉树*/
/*
递归解决;
交换节点的左右节点,分别递归调用左右节点;
若根节点为null则直接返回,否则返回根节点;
*/
public TreeNode invertTree(TreeNode root) {
if(root == null){
return null;
}
TreeNode temp = root.right;
root.right = root.left;
root.left = temp;
invertTree(root.left);
invertTree(root.right);
return root;
}
}
<file_sep>/DangDangIMSystem_WEB/src/test/java/com/per/iroha/imsystem/Test.java
package com.per.iroha.imsystem;
import com.per.iroha.mapper.UserMapperImpl;
import com.per.iroha.model.Advice;
import java.sql.SQLException;
import java.util.*;
public class Test {
public class TreeNode {
int val = 0;
TreeNode left = null;
TreeNode right = null;
public TreeNode(int val) {
this.val = val;
}
}
@org.junit.Test
public void test() throws SQLException {
UserMapperImpl userMapper = new UserMapperImpl();
HashMap map = new HashMap();
Advice advice = new Advice();
advice.setDate("123123");
advice.setFromUsername("123123");
advice.setAdvice("123123");
userMapper.saveAdvice(advice);
}
public int StrToInt(String str) {
if(str.equals("") || str.length() == 0){
return 0;
}
int flag = 0;
int sum = 0;
char[] a = str.toCharArray();
if(a[0] == '-'){
flag = 1;
}
for(int i = flag; i < a.length; i++){
if(a[i] == '+'){
continue;
}
if(a[i] > '9' || a[i] < '0'){
return 0;
}
sum = sum * 10 + a[i] - '0';
}
return flag == 0 ? sum : sum * (-1);
}
public boolean duplicate(int numbers[],int length,int [] duplication) {
HashSet<Integer> hashSet = new HashSet<>();
for (int i = 0; i < length; i++){
if(hashSet.contains(numbers[i])){
duplication[0] = numbers[i];
return false;
}else{
hashSet.add(numbers[i]);
}
}
return true;
}
public int[] multiply(int[] A) {
int length = A.length;
int[] B = new int[length];
if(length != 0){
B[0] = 1;
for(int i = 1; i < length; i++){
B[i] = B[i - 1] * A[i - 1];
}
int temp = 1;
for(int j = length - 2; j >= 0; j--){
temp *= A[j + 1];
B[j] *= temp;
}
}
return B;
}
public boolean match(char[] str, char[] pattern)
{
if(str.length == 0 || pattern.length == 0){
return false;
}
int strIndex = 0, parIndex = 0;
return matchString(str,pattern,strIndex,parIndex);
}
public boolean matchString(char[] str, char[] pattern,int strIndex,int parIndex){
if(strIndex == str.length && parIndex == pattern.length){
return true;
}
if(strIndex != str.length && parIndex == pattern.length){
return false;
}
if(parIndex + 1 < pattern.length && pattern[parIndex + 1] == '*'){
if((strIndex != str.length && pattern[parIndex] == str[strIndex]) || (strIndex != str.length && pattern[parIndex] == '.')){
return matchString(str,pattern,strIndex + 1,parIndex) || matchString(str,pattern,strIndex,parIndex + 2) || matchString(str,pattern,strIndex + 1,parIndex + 2);
}else{
return matchString(str,pattern,strIndex,parIndex + 2);
}
}
if ((strIndex != str.length && pattern[parIndex] == str[strIndex]) || (strIndex != str.length && pattern[parIndex] == '.')){
return matchString(str,pattern,strIndex + 1,parIndex + 1);
}
return false;
}
private int index = 0;
public boolean isNumeric(char[] str) {
if(str.length == 0){
return false;
}
boolean flag = scanInteger(str);
if(index < str.length && str[index] == '.'){
index++;
flag = scanInteger(str) || flag;
}
if(index < str.length && (str[index] == 'E' || str[index] == 'e')){
index++;
flag = scanUnsignInteger(str) && flag;
}
return flag && index == str.length;
}
private boolean scanInteger(char[] str){
if(index < str.length && (str[index] == '-' || str[index] == '+')){
index++;
}
return scanUnsignInteger(str);
}
private boolean scanUnsignInteger(char[] str){
int start = index;
while(index < str.length && (str[index] >= '0' && str[index] <= '9')){
index++;
}
return index - start > 0;
}
private LinkedHashMap<Character,Integer> linkedHashMap = new LinkedHashMap<>();
public void Insert(char ch)
{
if(linkedHashMap.containsKey(ch)){
linkedHashMap.put(ch,linkedHashMap.get(ch) + 1);
}else{
linkedHashMap.put(ch,1);
}
}
//return the first appearence once char in current stringstream
public char FirstAppearingOnce()
{
for(Map.Entry<Character,Integer> entry : linkedHashMap.entrySet()){
if(entry.getValue() == 1){
return entry.getKey();
}
}
return '#';
}
public class ListNode {
int val;
ListNode next = null;
ListNode(int val) {
this.val = val;
}
}
public ListNode EntryNodeOfLoop(ListNode pHead)
{
if(pHead == null || pHead.next == null){
return null;
}
ListNode fast = pHead.next;
ListNode slow = pHead;
while(fast != slow){
if(fast.next != null && fast.next.next != null){
fast = fast.next;
slow = slow.next;
}else{
return null;
}
}
slow = pHead;
while(fast != slow){
fast = fast.next;
slow = slow.next;
}
return slow;
}
public ListNode deleteDuplication(ListNode pHead)
{
ListNode p = new ListNode(-1);
p.next = pHead;
ListNode slow = p;
ListNode fast = pHead;
while(fast != null && fast.next != null){
if(fast.val == fast.next.val){
int val = fast.val;
while(fast != null && fast.val == val){
fast = fast.next;
}
slow.next = fast;
}else{
slow = fast;
fast = fast.next;
}
}
return p.next;
}
public class TreeLinkNode {
int val;
TreeLinkNode left = null;
TreeLinkNode right = null;
TreeLinkNode next = null;
TreeLinkNode(int val) {
this.val = val;
}
}
public TreeLinkNode GetNext(TreeLinkNode pNode)
{
if(pNode == null){
return null;
}
if(pNode.right != null){
pNode = pNode.right;
while(pNode.left != null){
pNode = pNode.left;
}
return pNode;
}
while(pNode.next != null){
if(pNode.next.left == pNode){
return pNode.next;
}
pNode = pNode.next;
}
return null;
}
boolean isSymmetrical(TreeNode pRoot)
{
if(pRoot == null){
return true;
}
return compare(pRoot.left, pRoot.right);
}
private boolean compare(TreeNode left,TreeNode right){
if(left == null){
return right == null;
}
if(right == null){
return false;
}
if(left.val != right.val){
return false;
}
return compare(left.left,left.right) && compare(right.left,right.right);
}
public ArrayList<ArrayList<Integer> > Print(TreeNode pRoot) {
ArrayList<ArrayList<Integer>> result = new ArrayList<>();
if(pRoot == null){
return result;
}
boolean flag = true;
Stack<TreeNode> stack1 = new Stack<>();
Stack<TreeNode> stack2 = new Stack<>();
stack1.push(pRoot);
while(!stack1.isEmpty() || !stack2.isEmpty()){
if(flag){
ArrayList<Integer> arrayList = new ArrayList<>();
while(!stack1.isEmpty()){
TreeNode node = stack1.pop();
arrayList.add(node.val);
if(node.left != null){
stack2.push(node.left);
}
if(node.right != null){
stack2.push(node.right);
}
}
result.add(arrayList);
flag = !flag;
}else{
ArrayList<Integer> arrayList = new ArrayList<>();
while(!stack2.isEmpty()){
TreeNode node = stack2.pop();
arrayList.add(node.val);
if(node.right != null){
stack1.push(node.right);
}
if(node.left != null){
stack1.push(node.left);
}
}
result.add(arrayList);
flag = !flag;
}
}
return result;
}
ArrayList<ArrayList<Integer> > Printl(TreeNode pRoot) {
ArrayList<ArrayList<Integer>> res = new ArrayList<>();
if(pRoot == null){
return res;
}
boolean flag = true;
LinkedList<TreeNode> linkedList1 = new LinkedList<>();
LinkedList<TreeNode> linkedList2 = new LinkedList<>();
linkedList1.add(pRoot);
while(!linkedList1.isEmpty() || !linkedList2.isEmpty()){
if(flag){
ArrayList<Integer> arrayList = new ArrayList<>();
while(!linkedList1.isEmpty()){
TreeNode node = linkedList1.poll();
arrayList.add(node.val);
if(node.left != null){
linkedList2.add(node.left);
}
if(node.right != null){
linkedList2.add(node.right);
}
}
res.add(arrayList);
flag = !flag;
}else{
ArrayList<Integer> arrayList = new ArrayList<>();
while(!linkedList2.isEmpty()){
TreeNode node = linkedList2.poll();
arrayList.add(node.val);
if(node.left != null){
linkedList1.add(node.left);
}
if(node.right != null){
linkedList1.add(node.right);
}
}
res.add(arrayList);
flag = !flag;
}
}
return res;
}
TreeNode KthNode(TreeNode pRoot, int k)
{
int i = 0;
Stack<TreeNode> stack = new Stack<>();
while(!stack.empty() || pRoot != null){
while(pRoot != null){
stack.push(pRoot);
pRoot = pRoot.left;
}
pRoot = stack.pop();
i++;
if(i == 7){
return pRoot;
}else{
pRoot = pRoot.right;
}
}
return null;
}
private PriorityQueue<Integer> maxHeap = new PriorityQueue<>(15, new Comparator<Integer>() {
@Override
public int compare(Integer o1, Integer o2) {
return o2 - o1;
}
});
private PriorityQueue<Integer> minHeap = new PriorityQueue<>();
private int count = 0;
//读入字符,放到合适位置
public void Insert(Integer num) {
if (count %2 == 0) {
maxHeap.offer(num);
int filteredMaxNum = maxHeap.poll();
minHeap.offer(filteredMaxNum);
} else {
minHeap.offer(num);
int filteredMinNum = minHeap.poll();
maxHeap.offer(filteredMinNum);
}
count++;
}
//求中位数
public Double GetMedian() {
if (count %2 == 0) {
return new Double((minHeap.peek() + maxHeap.peek())) / 2;
} else {
return new Double(minHeap.peek());
}
}
public ArrayList<Integer> maxInWindows(int [] num, int size)
{
ArrayList<Integer> arrayList = new ArrayList<>();
LinkedList<Integer> linkedList = new LinkedList<>();
if(num.length == 0 || size <= 0 || num.length < size){
return arrayList;
}
for(int i = 0; i < num.length; i++){
while(!linkedList.isEmpty() && num[linkedList.peekLast()] < num[i]){
linkedList.pollLast();
}
linkedList.addLast(i);
if(linkedList.peekFirst() == i - size){
linkedList.pollFirst();
}
if(i - size + 1 >= 0){
arrayList.add(num[linkedList.peekFirst()]);
}
}
return arrayList;
}
public boolean hasPath(char[] matrix, int rows, int cols, char[] str) {
boolean[] check = new boolean[matrix.length];
for(int i = 0; i < rows; i++){
for(int j = 0; j < cols; j++){
if(searchFromHere(matrix,rows,cols,i,j,str,0,check)){
return true;
}
}
}
return false;
}
private boolean searchFromHere(char[] matrix, int rows, int cols, int r, int c, char[] str,int index, boolean[] check){
if(r < 0 || r >= rows || c < 0 || c >= cols || matrix[r * cols + c] != str[index] || check[r * cols + c]){
return false;
}
if(index == str.length -1){
return true;
}
check[r * cols + c] = true;
if(searchFromHere(matrix,rows,cols,r - 1,c,str,index + 1,check) ||
searchFromHere(matrix,rows,cols,r + 1,c,str,index + 1,check)||
searchFromHere(matrix,rows,cols,r,c - 1,str,index + 1,check)||
searchFromHere(matrix,rows,cols,r,c + 1,str,index + 1,check)){
return true;
}
check[r * cols + c] = false;
return false;
}
public int movingCount(int threshold, int rows, int cols)
{
boolean[][] check = new boolean[rows][cols];
return search(threshold,rows,cols,0,0,check);
}
private int search(int threshold, int rows, int cols, int r, int c, boolean[][] check){
if(r < 0 || r >= rows || c < 0 || c >= cols || bitCount(r) + bitCount(c) > threshold || check[r][c]){
return 0;
}
check[r][c] = true;
return search(threshold,rows,cols,r - 1,c,check) +
search(threshold,rows,cols,r + 1,c,check) +
search(threshold,rows,cols,r,c - 1,check) +
search(threshold,rows,cols,r,c + 1,check) + 1;
}
private int bitCount(int num){
int count = 0;
while(num != 0){
count += num % 10;
num /= 10;
}
return count;
}
}
<file_sep>/src/Backtracking/LetterCombinations_17.java
package Backtracking;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
public class LetterCombinations_17 {
/*17. 电话号码的字母组合*/
/*
回溯法解决;
一张HashMap存数字与对应字母串的映射;
终结条件为index等于电话号码的长度;
每一步可进行的操作为选取字母串中一个字母;
回溯过程将最后选取的字母从list中删去;
*/
public List<String> res = new ArrayList<>();
public HashMap<Character,String> map = new HashMap<>();
public List<String> letterCombinations(String digits) {
if(digits.length() == 0){
return res;
}
map.put('2',"abc");
map.put('3',"def");
map.put('4',"ghi");
map.put('5',"jkl");
map.put('6',"mno");
map.put('7',"pqrs");
map.put('8',"tuv");
map.put('9',"wxyz");
bb(digits,0,new LinkedList());
return res;
}
public void bb(String str, int index,LinkedList<Character> list){
if(index == str.length()){
StringBuilder sb = new StringBuilder();
for(Character s : list){
sb.append(s);
}
res.add(sb.toString());
return;
}
String temp = map.get(str.charAt(index));
for(int i = 0; i < temp.length(); i++){
list.add(temp.charAt(i));
bb(str,index + 1,list);
list.removeLast();
}
}
}
<file_sep>/DangDangIMSystem_WEB/src/main/java/com/per/iroha/model/WebSocketMessage.java
package com.per.iroha.model;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Date;
@Data
@AllArgsConstructor
@NoArgsConstructor
public class WebSocketMessage {
private int type; // 消息类型
private int fromUserId; //发送用户Id
private String fromUsername; // 发送用户名
private int toUserId; //发送用户Id
private String toUsername; //接受用户名
private String group; // 发送群组名
private String date; //发送日期
private String time; //发送时间
private String message; // 消息主体
private int err; //错误码
@Override
public String toString() {
return "WebSocketMessage{" +
"type=" + type +
", fromUserId=" + fromUserId +
", fromUsername='" + fromUsername + '\'' +
", toUserId=" + toUserId +
", toUsername='" + toUsername + '\'' +
", group='" + group + '\'' +
", date='" + date + '\'' +
", time='" + time + '\'' +
", message='" + message + '\'' +
", err=" + err +
'}';
}
}
<file_sep>/src/Sort/TopKFrequent_347.java
package Sort;
import java.util.*;
public class TopKFrequent_347 {
/*347. 前 K 个高频元素*/
/*
TopK问题,HashMap统计词频,构建小顶堆存储最大的K个词;
*/
public List<Integer> topKFrequent(int[] nums, int k) {
List<Integer> res = new ArrayList<>();
HashMap<Integer,Integer> map = new HashMap<>();
for(int i = 0; i < nums.length; i++){
if(!map.containsKey(nums[i])){
map.put(nums[i],1);
}else{
map.put(nums[i],map.get(nums[i]) + 1);
}
}
PriorityQueue<Integer> heap = new PriorityQueue<>((n1, n2) -> map.get(n1) - map.get(n2));
for(Integer n : map.keySet()){
heap.add(n);
if(heap.size() > k){
heap.poll();
}
}
for(Integer n : heap){
res.add(n);
}
Collections.reverse(res);
return res;
}
}
<file_sep>/src/Stack/ScoreOfParentheses_856.java
package Stack;
import java.util.Stack;
public class ScoreOfParentheses_856 {
/*856.括号的分数*/
/*
括号问题一律用栈解决;
栈底压入0作为最终结果;
当压入‘(’时判断下一位是否为‘)’若是压入1,否则压入0;
当压入‘)’时判断上一位是否为‘(’若是将栈顶元素加一,否则栈顶元素乘2;
*/
public int scoreOfParentheses(String S) {
Stack<Integer> stack = new Stack<>();
stack.push(0);
for(int i = 0; i < S.length(); i++){
if(S.charAt(i) == '('){
if(S.charAt(i + 1) == ')'){
stack.push(1);
}else{
stack.push(0);
}
}else{
int num = stack.pop();
if(num == 1 && S.charAt(i - 1) == '('){
stack.push(stack.pop() + 1);
}else{
num *= 2;
stack.push(stack.pop() + num);
}
}
}
return stack.pop();
}
}
<file_sep>/src/DP/CountBits_338.java
package DP;
public class CountBits_338 {
/*338. 比特位计数*/
/*
动态规划:
由于偶数二进制末尾是0;奇数二进制末尾是1;
当前数的1的数量为右移一位后的数加上末尾的1;
*/
public int[] countBits(int num) {
int[] dp = new int[num + 1];
for(int i = 1; i <= num; i++){
dp[i] = dp[i >> 1] + i % 2;
}
return dp;
}
}
<file_sep>/src/Math/Reverse_7.java
package Math;
public class Reverse_7 {
/*7.整数反转*/
/*
注意超过int的最大最小值的情况;
*/
public int reverse(int x) {
boolean flag = false;
int res = 0;
if(x < 0){
flag = true;
}
while(x != 0){
if(flag){
if( res < Integer.MIN_VALUE / 10 || (res == Integer.MIN_VALUE / 10 && x % 10 < -8)){
return 0;
}
}else{
if(res > Integer.MAX_VALUE / 10 || (res == Integer.MAX_VALUE / 10 && x % 10 > 7)){
return 0;
}
}
res = res * 10 + x % 10;
x /= 10;
}
return res;
}
}
<file_sep>/src/DFS/IsMatch_10.java
package DFS;
public class IsMatch_10 {
/*10. 正则表达式匹配*/
/*
DFS解决问题;
终结状态为字符串与正则表达式同时都遍历完成返回true,当正则表达式遍历完成而字符串没有时返回false;
匹配正则表达式需要考虑当前字符的下一位是否为‘*’;
当下一位为‘*’时可以匹配0个或多个字符;
*/
public boolean isMatch(String s, String p) {
if(s == null && p == null){
return true;
}
if(s == "" || p == ""){
return false;
}
return bb(s,p,0,0);
}
public boolean bb(String s, String p, int i, int j){
if(i == s.length() && j == p.length()){
return true;
}
if(i != s.length() && j == p.length()){
return false;
}
if(j + 1 < p.length() && p.charAt(j + 1) == '*'){
if(i != s.length() && (s.charAt(i) == p.charAt(j) || p.charAt(j) == '.')){
return bb(s,p,i + 1,j) || bb(s,p,i,j + 2);
}else{
return bb(s,p,i,j + 2);
}
}else{
if(i != s.length() && (s.charAt(i) == p.charAt(j) || p.charAt(j) == '.')){
return bb(s,p,i + 1,j + 1);
}
}
return false;
}
}
<file_sep>/src/Stack/DailyTemperatures_739.java
package Stack;
import java.util.Stack;
public class DailyTemperatures_739 {
/*739. 每日温度*/
/*
构建最大栈,遍历数组,当当前温度小于栈顶温度时,将当前天数压入栈;
当前温度大于栈顶温度时,弹出栈顶天数,下标相减写入结果中;
*/
public int[] dailyTemperatures(int[] T) {
Stack<Integer> stack = new Stack<>();
int[] res = new int[T.length];
for(int i = 0; i < T.length; i++){
while(!stack.isEmpty() && T[stack.peek()] < T[i]){
int d = stack.pop();
res[d] = i - d;
}
stack.push(i);
}
while(!stack.isEmpty()){
res[stack.pop()] = 0;
}
return res;
}
}
<file_sep>/src/Tree/MergeTrees_617.java
package Tree;
public class MergeTrees_617 {
public class TreeNode {
int val;
TreeNode left;
TreeNode right;
TreeNode(int x) { val = x; }
}
/*617. 合并二叉树*/
/*
合并树,对于每个节点有三种情况,都为null,有一个为null,都不为null;
都不为null时,创建新的节点,对左右两边分别递归调用合并方法;
*/
public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {
if(t1 == null && t2 == null){
return null;
}
if(t1 == null){
return t2;
}
if(t2 == null){
return t1;
}
TreeNode res = new TreeNode(t1.val + t2.val);
res.left = mergeTrees(t1.left,t2.left);
res.right = mergeTrees(t1.right,t2.right);
return res;
}
}
<file_sep>/src/LinkedList/RotateRight_61.java
package LinkedList;
public class RotateRight_61 {
public class ListNode {
int val;
ListNode next;
ListNode(int x) { val = x; }
}
/*61. 旋转链表*/
/*
当head == null以及head.next == null 是特殊情况;
计算链表长度,将链表头部接到链表尾部,将移动次数k取余,找到k % count的结点位置,断开链表即可;
*/
public ListNode rotateRight(ListNode head, int k) {
if(head == null){
return null;
}
if(head.next == null){
return head;
}
int count = 1;
ListNode cur = head;
while(cur.next != null){
count++;
cur = cur.next;
}
cur.next = head;
count = k % count;
cur = head;
while(count > 0){
cur = cur.next;
count--;
}
ListNode p = head;
while(cur.next != head){
cur = cur.next;
p = p.next;
}
ListNode res = p.next;
p.next = null;
return res;
}
}
<file_sep>/src/Math/LeastInterval_621.java
package Math;
import java.util.Arrays;
public class LeastInterval_621 {
/*621. 任务调度器*/
/*
两种情况:
n的间距足够大时,最短时间为(最大任务数 - 1) * 每一轮间隔长度(n + 1)+ 相同最大任务数的数量;
n的间距小,即当结果小于数组长度时,返回数组长度;
*/
public int leastInterval(char[] tasks, int n) {
int res = 0;
int[] count = new int[26];
for(char c : tasks){
count[c - 'A']++;
}
Arrays.sort(count);
int maxCount = count[count.length - 1];
int len = 0, index = count.length - 1;
while(index >= 0 && count[index] == maxCount){
len++;
index--;
}
return Math.max(tasks.length,(maxCount - 1) * (n + 1) + len);
}
}
<file_sep>/Leetcode_Histroy/src/HashMap/MaxPoints_149.java
package HashMap;
import java.util.HashMap;
public class MaxPoints_149 {
/*149. 直线上最多的点数*/
/*
HashMap来保存斜率以及相同斜率的点数;
注意斜率为0有正零和负零的区别;
考虑除数为0的情况;
当出现相同点时另外计数;
*/
public int maxPoints(int[][] points) {
if(points.length == 0 || points.length == 1){
return points.length;
}
HashMap<Double,Integer> map = new HashMap<>();
int res = 0, cur = 1, same = 0;
for(int i = 0; i < points.length - 1; i++){
for(int j = i + 1; j < points.length; j++){
double d;
if(points[i][0] - points[j][0] == 0 && points[i][1] - points[j][1] == 0){
same++;
}else{
if(points[i][0] == points[j][0]){
d = 0;
}else if(points[i][1] - points[j][1] != 0){
d = (double)(points[i][0] - points[j][0]) / (points[i][1] - points[j][1]);
}else{
d = points[i][1];
}
if(!map.containsKey(d)){
map.put(d,2);
}else{
map.put(d,map.get(d) + 1);
}
}
}
for(Double d : map.keySet()){
if(map.get(d) > cur){
cur = map.get(d);
}
}
res = Math.max(res,cur + same);
cur = 1;
same = 0;
map.clear();
}
return res;
}
}
<file_sep>/src/DP/NumTrees_96.java
package DP;
public class NumTrees_96 {
/*96. 不同的二叉搜索树*/
/*
递归求解会超时,用动态规划求解;
设置初始状态0和1,只有一种情况;
遍历填表,当前状态与前段和后段两者相乘累加得到;
*/
public int numTrees(int n) {
if(n == 0){
return 0;
}
int[] res = new int[n + 1];
res[0] = 1;
res[1] = 1;
for(int i = 2; i < res.length; i++){
for(int k = 1; k <= i; k++){
res[i] += res[k - 1] * res[i - k];
}
}
return res[n];
}
}
<file_sep>/Leetcode_Histroy/src/List/CopyRandomList_138.java
package LinkedList;
public class CopyRandomList_138 {
class Node {
public int val;
public Node next;
public Node random;
public Node() {}
public Node(int _val,Node _next,Node _random) {
val = _val;
next = _next;
random = _random;
}
}
/*138. 复制带随机指针的链表*/
/*
3遍遍历,第一遍将所有节点复制一份接在原节点之后;
第二遍遍历复制random指向的的节点;
第三遍遍历将新旧链表分开;
*/
public Node copyRandomList(Node head) {
if(head == null){
return null;
}
Node cur = head;
while(cur != null){
Node copyNode = new Node();
copyNode.val = cur.val;
copyNode.next = cur.next;
cur.next = copyNode;
cur = cur.next.next;
}
cur = head;
while(cur != null){
if(cur.random != null){
cur.next.random = cur.random.next;
}
cur = cur.next.next;
}
cur = head;
Node res = new Node();
Node p = res;
while(cur != null){
p.next = cur.next;
cur.next = p.next.next;
cur = cur.next;
p = p.next;
}
return res.next;
}
}
<file_sep>/src/AndCheck/FindCircleNum_547.java
package AndCheck;
public class FindCircleNum_547 {
/*547. 朋友圈*/
/*
并查集例题;
朋友圈数初始为总人数,遍历图,每当M[i][j] == 1 且两人所属的圈子不同时人数--;
*/
public int findCircleNum(int[][] M) {
int len = M.length;
if(len == 0){
return 0;
}
int[] nums = new int[len];
for(int i = 0; i < len; i++){
nums[i] = i;
}
int count = len;
for(int i = 0; i < len; i++){
for(int j = i + 1; j < len; j++){
if(M[i][j] == 1){
int fr = find(nums,i);
int sr = find(nums,j);
if(fr != sr){
nums[sr] = fr;
count--;
}
}
}
}
return count;
}
public int find(int[] nums, int x){
while(x != nums[x]){
x = nums[x];
}
return x;
}
}
<file_sep>/Leetcode_Histroy/src/HashMap/groupAnagrams_49.java
package HashMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
public class groupAnagrams_49 {
/*49. 字母异位词分组*/
/*
一张HashMap存字典顺序的字符串及结果列表list;
*/
public List<List<String>> groupAnagrams(String[] strs) {
List<List<String>> res = new ArrayList<>();
if(strs.length == 0){
return res;
}
HashMap<String,ArrayList<String>> map = new HashMap<>();
for(String s : strs){
char[] chars = s.toCharArray();
Arrays.sort(chars);
String temp = new String(chars);
if(!map.containsKey(temp)){
map.put(temp,new ArrayList());
}
map.get(temp).add(s);
}
for(String s : map.keySet()){
res.add(map.get(s));
}
return res;
}
}
<file_sep>/DangDangIMSystem_WEB/src/main/java/com/per/iroha/netty/WebSocketHandler.java
package com.per.iroha.netty;
import com.alibaba.fastjson.JSON;
import com.per.iroha.model.User;
import com.per.iroha.model.WebSocketMessage;
import com.per.iroha.redis.RedisMq;
import com.per.iroha.service.MessageService;
import com.per.iroha.service.impl.MessageServiceImpl;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.*;
import io.netty.handler.codec.http.*;
import io.netty.handler.codec.http.cookie.Cookie;
import io.netty.handler.codec.http.cookie.ServerCookieDecoder;
import io.netty.handler.codec.http.websocketx.*;
import io.netty.handler.timeout.IdleState;
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.CharsetUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Controller;
import java.util.Set;
@Controller
public class WebSocketHandler extends SimpleChannelInboundHandler<FullHttpRequest> {
private MessageService messageService = new MessageServiceImpl();
private RedisMq redisMq = new RedisMq();
private WebSocketServerHandshaker handshaker;
private User user;
private static final Logger logger = LoggerFactory.getLogger(WebSocketHandler.class);
@Override
protected void channelRead0(ChannelHandlerContext channelHandlerContext, FullHttpRequest request) throws Exception {
//WebSocket在一次TCP协议握手的基础上实现的
handHttpRequest(channelHandlerContext,request);
//异步把用户信息写入缓存
channelHandlerContext.channel().eventLoop().execute(new Runnable() {
@Override
public void run() {
String userJ = redisMq.pop();
user = JSON.parseObject(userJ,User.class);
if(user != null){
messageService.bindSession(user,channelHandlerContext.channel());
}else{
Set<Cookie> cookies = ServerCookieDecoder.LAX.decode(request.headers().toString());
for(Cookie cookie : cookies){
if(cookie.name().equals("userId")){
user = messageService.getCookie(cookie.value());
break;
}
}
messageService.bindSession(user,channelHandlerContext.channel());
}
WebSocketMessage CountMessage = new WebSocketMessage();
CountMessage.setErr(1);
CountMessage.setMessage("本月签到次数:" + messageService.getCheckCount(messageService.getSession(channelHandlerContext.channel()).getUserId()));
channelHandlerContext.channel().writeAndFlush(new TextWebSocketFrame(JSON.toJSONString(CountMessage)));
}
});
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
//当channel连接打开时,加入到总ChannelGroup中
NettyConfig.globalChannels.add(ctx.channel());
super.channelActive(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
//断开连接时清除用户Session
messageService.unbindSession(ctx.channel());
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
cause.printStackTrace();
ctx.close();
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if(evt instanceof IdleStateEvent){
IdleState state = ((IdleStateEvent) evt).state();
if (state == IdleState.READER_IDLE) {
logger.info("在规定时间内没有收到客户端的上行数据, 主动断开连接" );
WebSocketMessage CountMessage = new WebSocketMessage();
CountMessage.setErr(1);
CountMessage.setMessage("长时间未进行操作已与服务器断开连接,请刷新~");
ctx.channel().writeAndFlush(new TextWebSocketFrame(JSON.toJSONString(CountMessage)));
ctx.channel().close();
}
}else{
super.userEventTriggered(ctx, evt);
}
}
// 处理 http 请求,WebSocket 初始握手 (opening handshake ) 都始于一个 HTTP 请求
private void handHttpRequest(ChannelHandlerContext ctx, FullHttpRequest request){
if(!request.decoderResult().isSuccess() || !("websocket".equals(request.headers().get("Upgrade")))){
sendHttpResponse(ctx, new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.BAD_REQUEST));
return;
}
WebSocketServerHandshakerFactory factory = new WebSocketServerHandshakerFactory("ws://" + NettyConfig.NETTY_HOST + NettyConfig.NETTY_PORT, null, false);
handshaker = factory.newHandshaker(request);
if(handshaker == null){
WebSocketServerHandshakerFactory.sendUnsupportedVersionResponse(ctx.channel());
} else {
handshaker.handshake(ctx.channel(), request);
}
}
// 响应非 WebSocket 初始握手请求
private void sendHttpResponse(ChannelHandlerContext ctx, DefaultFullHttpResponse res) {
if(res.status().code() != 200){
ByteBuf buf = Unpooled.copiedBuffer(res.status().toString(), CharsetUtil.UTF_8);
res.content().writeBytes(buf);
buf.release();
}
ChannelFuture f = ctx.channel().writeAndFlush(res);
if(res.status().code() != 200){
f.addListener(ChannelFutureListener.CLOSE);
}
}
}
<file_sep>/src/String/LongestCommonPrefix_14.java
package String;
public class LongestCommonPrefix_14 {
/*14. 最长公共前缀*/
/*
水平扫描字符串数组中每一个字符串的前缀;
其他思路:二分查找法去寻找最长前缀;
分治法将数组中字符串分别求最长前缀;
*/
public String longestCommonPrefix(String[] strs) {
if(strs.length == 0 || strs[0].length() == 0){
return "";
}
int index = -1;
boolean flag = true;
while(flag){
index++;
if(index >= strs[0].length()){
break;
}
char c = strs[0].charAt(index);
for(int i = 1; i < strs.length; i++){
if(index >= strs[i].length() || c != strs[i].charAt(index)){
flag = false;
break;
}
}
}
return strs[0].substring(0,index);
}
}
<file_sep>/src/Sort/FindKthLargest_215.java
package Sort;
public class FindKthLargest_215 {
/*215. 数组中的第K个最大元素*/
/*
未排序数组找第K大元素;
关键字第K大:堆排和快搜实现;
快速搜素算法,时间复杂度为O(logN);
*/
public int findKthLargest(int[] nums, int k) {
int left = 0, right = nums.length - 1;
while(true){
int temp = nums[left], i = left, j = right;
while(i < j){
while(i < j && nums[j] >= temp){
j--;
}
while(i < j && nums[i] <= temp){
i++;
}
int t = nums[i];
nums[i] = nums[j];
nums[j] = t;
}
nums[left] = nums[i];
nums[i] = temp;
if(nums.length - k > i){
left = i + 1;
}else if(nums.length - k == i){
return nums[i];
}else{
right = i - 1;
}
}
}
}
<file_sep>/DangDangIMSystem_WEB/src/main/java/com/per/iroha/model/Group.java
package com.per.iroha.model;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Set;
@Data
@AllArgsConstructor
@NoArgsConstructor
public class Group {
private String groupName;
@Override
public String toString() {
return "Group{" +
"groupName='" + groupName + '\'' +
", userList=" + userList +
'}';
}
private Set<Integer> userList;
}
<file_sep>/DangDangIMSystem_WEB/src/main/java/com/per/iroha/service/impl/UserServiceImpl.java
package com.per.iroha.service.impl;
import com.per.iroha.mapper.UserMapper;
import com.per.iroha.model.User;
import com.per.iroha.service.UserService;
import com.per.iroha.util.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import redis.clients.jedis.Jedis;
import java.util.Calendar;
@Service
public class UserServiceImpl implements UserService {
@Autowired
private UserMapper userMapper;
Jedis jedis = new Jedis();
@Override
public void register(User user) {
userMapper.userRegister(user);
}
@Override
public boolean md5Password(User user, int salt) {
String password = userMapper.findByUsername(user.getUsername()).getPassword();
String md5 = StringUtils.getMD5Str(password + salt,null);
return md5.equals(user.getPassword());
}
@Override
public User findByName(String username) {
return userMapper.findByUsername(username);
}
@Override
public User findById(int userId) {
return userMapper.findByUserId(userId);
}
@Override
public boolean hasUser(String username) {
User user = userMapper.findByUsername(username);
return user != null;
}
//打卡签到
@Override
public void checkIn(int userId) {
Calendar cal = Calendar.getInstance();//得到当前时间
int month = cal.get(Calendar.MONTH) + 1;
int day = cal.get(Calendar.DATE);//日
// jedis.set(userId + "checkInTableOf" + month,"0","nx","ex",60 * 60 * 24 * 31);
jedis.setbit(userId + "checkInTableOf" + month,day,"1");
// System.out.println(jedis.get(userId + "checkInTableOf" + month));
}
@Override
public long getTable(int userId, int month) {
return jedis.bitcount(userId + "checkInTableOf" + month);
}
}
<file_sep>/src/Tree/RecoverTree_99.java
package Tree;
import java.util.Stack;
public class RecoverTree_99 {
public class TreeNode {
int val;
TreeNode left;
TreeNode right;
TreeNode(int x) { val = x; }
}
/*99. 恢复二叉搜索树*/
/*
是98题的拓展,恢复二叉树需要在中序遍历的序列中寻找2个排序错误的节点;
第一个节点是降序节点的前一个节点,而第二个节点是找到第一个错误节点后的第二个降序节点;
若完整中序遍历后仍没有找到第二个节点;则直接交换第一个节点和他的后继节点;否则交换两个降序节点;
同样样例中存在Integer.MIN_VALUE,需要进行过滤;
*/
public void recoverTree(TreeNode root) {
if(root == null){
return;
}
TreeNode cur = new TreeNode(Integer.MIN_VALUE);
TreeNode p1 = null;
TreeNode next = null;
boolean flag = true;
boolean first = true;
Stack<TreeNode> stack = new Stack<>();
while(!stack.isEmpty() || root != null){
while(root != null){
stack.push(root);
root = root.left;
}
root = stack.pop();
if(root.val <= cur.val && !first){
if(p1 == null){
p1 = cur;
next = root;
}else{
flag = false;
break;
}
}
cur = root;
root = root.right;
if(first){
first = false;
}
}
if(!flag){
int temp = p1.val;
p1.val = root.val;
root.val = temp;
}else{
int temp = p1.val;
p1.val = next.val;
next.val = temp;
}
}
}
<file_sep>/src/Tree/DiameterOfBinaryTree_543.java
package Tree;
public class DiameterOfBinaryTree_543 {
public class TreeNode {
int val;
TreeNode left;
TreeNode right;
TreeNode(int x) { val = x; }
}
/*543. 二叉树的直径*/
/*
每次递归检查节点的左右节点长度之和是否最大,并返回当前节点的深度;
最后在根节点验证根节点的左右节点长度之和是否最大,返回结果;
*/
public int res = 0;
public int diameterOfBinaryTree(TreeNode root) {
if(root == null){
return 0;
}
int left = fun(root.left);
int right = fun(root.right);
res = Math.max(res,left + right);
return res;
}
public int fun(TreeNode root){
if(root == null){
return 0;
}
int left = fun(root.left);
int right = fun(root.right);
int deep = Math.max(left,right);
res = Math.max(res,left + right);
return 1 + deep;
}
}
<file_sep>/src/String/CountAndSay_38.java
package String;
public class CountAndSay_38 {
/*38. 报数*/
/*
首先理解题意,每轮报数记录重复出现的字符个数;
优化:尾递归
将递归调用放在最后return处,大大减少递归的用时和消耗;
*/
public String res = "";
public String countAndSay(int n) {
if(n == 0){
return res;
}
return fun("1",n);
}
public String fun(String s, int k){
if(k == 1){
return s;
}else{
int i = 0;
StringBuilder sb = new StringBuilder();
while(i < s.length()){
int count = 1;
while(i < s.length() - 1 && s.charAt(i) == s.charAt(i + 1)){
count++;
i++;
}
sb.append(count);
sb.append(s.charAt(i));
i++;
}
s = sb.toString();
}
return fun(s,k - 1);
}
}
<file_sep>/src/String/RomanToInt_13.java
package String;
import java.util.HashMap;
public class RomanToInt_13 {
/*13. 罗马数字转整数*/
/*
一张HashMap存罗马数字与整数的映射;
遍历一遍字符串,分两种情况:
1、存在2个罗马数字为一组代表一个整数的特殊情况
2、单个罗马数字代表一个整数
*/
public int romanToInt(String s) {
HashMap<String,Integer> map = new HashMap<>();
map.put("M",1000);
map.put("CM",900);
map.put("D",500);
map.put("CD",400);
map.put("C",100);
map.put("XC",90);
map.put("L",50);
map.put("XL",40);
map.put("X",10);
map.put("IX",9);
map.put("V",5);
map.put("IV",4);
map.put("I",1);
int res = 0, index = 0;
while(index < s.length()){
if(index + 1 < s.length() && map.containsKey(s.substring(index,index + 2))){
res += map.get(s.substring(index,index + 2));
index += 2;
}else{
res += map.get(s.substring(index,index + 1));
index += 1;
}
}
return res;
}
}
<file_sep>/DangDangIMSystem_WEB/src/main/java/com/per/iroha/mapper/UserMapperImpl.java
package com.per.iroha.mapper;
import com.per.iroha.model.Advice;
import com.per.iroha.model.Group;
import java.sql.*;
public class UserMapperImpl {
private static final String url = "jdbc:mysql://127.0.0.1:3306/dangdangim?useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC";
private static final String username = "root";
private static final String password = "<PASSWORD>";
private static final String driver = "com.mysql.cj.jdbc.Driver";
private Connection conn = null;
public int getUserId(String name) throws SQLException {
PreparedStatement pst;
ResultSet rs;
conn = conn();
String sql1 = "select userId from `user` where realName=?";
String sql2 = "select userId from `user` where username=?";
pst = conn.prepareStatement(sql1);
pst.setString(1, name);
rs = pst.executeQuery();
if(rs.next()){
return rs.getInt("userId");
}else{
pst = conn.prepareStatement(sql2);
pst.setString(1, name);
rs = pst.executeQuery();
if(rs.next()){
return rs.getInt("userId");
}
}
close();
return 0;
}
public void saveAdvice(Advice advice) throws SQLException {
PreparedStatement pst;
conn = conn();
String sql = "insert into advice (`date`,fromUsername,advice) values(?,?,?)";
pst = conn.prepareStatement(sql);
pst.setString(1,advice.getDate());
pst.setString(2,advice.getFromUsername());
pst.setString(3,advice.getAdvice());
pst.executeUpdate();
close();
}
/**
* 连接数据库
* @return
*/
private Connection conn() {
Connection conn = null;
try {
Class.forName(driver); //加载数据库驱动
try {
conn = DriverManager.getConnection(url, username, password); //连接数据库
} catch (SQLException e) {
e.printStackTrace();
}
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
return conn;
}
/**
* 关闭数据库链接
* @return
*/
private void close() {
if(conn != null) {
try {
conn.close(); //关闭数据库链接
} catch (SQLException e) {
e.printStackTrace();
}
}
}
}
<file_sep>/DangDangIMSystem_WEB/src/main/java/com/per/iroha/service/UserService.java
package com.per.iroha.service;
import com.per.iroha.model.User;
public interface UserService {
void register(User user);
boolean md5Password(User user,int salt);
User findByName(String username);
User findById(int userId);
boolean hasUser(String username);
void checkIn(int userId);
long getTable(int userId,int month);
}
|
754ed65a563ccdbabfbe37de7f63325ce2b293af
|
[
"Java"
] | 33
|
Java
|
Yiroha/Leetcode_History
|
8c967cb25b2e101b1ba25888d40345d5bbce2ee1
|
15fb8c3819dba8df5b8c5ba58d640d277adcb1f1
|
refs/heads/master
|
<repo_name>arcterex/frontend<file_sep>/src/libraries/adapters/DatabaseProvider.php
<?php
class DatabaseProvider
{
public static function init($type, $opts = null)
{
switch($type)
{
case 'simpleDb':
return new DatabaseProviderSimpleDb($opts);
break;
}
throw new Exception(404, 'DataProvider does not exist');
//throw new DataProviderDoesNotExistException();
}
}
function getDb($type, $opts)
{
static $database;
if($database)
return $database;
$database = DatabaseProvider::init($type, $opts);
return $database;
}
<file_sep>/src/libraries/routes.php
<?php
getRoute()->get('/', array('GeneralController', 'home'));
getRoute()->get('/photos', array('PhotosController', 'home'));
<file_sep>/src/libraries/adapters/DatabaseProviderSimpleDb.php
<?php
class DatabaseProviderSimpleDb implements DatabaseInterface
{
public function __construct($opts)
{
$this->db = new AmazonSDB($opts->awsKey, $opts->awsSecret);
}
public function getPhotos()
{
$res = $this->db->select('select * from photos');
$photos = array();
foreach($res->body->SelectResult->Item as $photo)
{
$photos[] = $this->normalizePhoto($photo);
}
return $photos;
}
private function normalizePhoto($raw)
{
$photo = array('id' => (string)$raw->Name);
foreach($raw->Attribute as $item)
{
$name = (string)$item->Name;
$value = (string)$item->Value;
$photo[$name] = $value;
}
return $photo;
}
}
<file_sep>/src/libraries/controllers/ApiController.php
<?php
class ApiController extends BaseController
{
public static function photos()
{
$db = getDb(getConfig()->get('systems')->database, getConfig()->get('credentials'));
$photos = $db->getPhotos();
return self::success('yay', $photos);
}
}
<file_sep>/src/configs/defaults.ini
[paths]
adapters = /opme_source/src/libraries/adapters
controllers = /opme_source/src/libraries/controllers
external = /opme_source/src/libraries/external
libraries = /opme_source/src/libraries
libraries = /opme_source/src/models
photos = /opme_source/src/html/photos
[systems]
database=simpleDb
; DO NOT STORE SECRETS HERE
; PLACE THEM IN /opme_source/src/configs/override/defaults.ini
;[credentials]
;awsKey=your_aws_key
;awsSecret=your_aws_secret
<file_sep>/src/libraries/adapters/Database.php
<?php
interface DatabaseInterface
{
public function getPhotos();
}
<file_sep>/Readme.markdown
OpenPhoto
=======================
#### OpenPhoto, a photo service for the masses
----------------------------------------
### What is OpenPhoto?
####Think of OpenPhoto as a WordPress for photo sharing and management.####
OpenPhoto is a photo sharing service focused on letting it's users retain 100% ownership of their photos, tags and comments.
Unlike current photo services, OpenPhoto works on a model where you (the user) grants OpenPhoto (the service) access to your photos.
This means you can revoke access at anytime and still retain all of the photos, tags and comments you've collected.
Imagine never having to migrate your photos from one service to another.
This is the way "the cloud" was always meant to be.
----------------------------------------
### Why should I use OpenPhoto?
While OpenPhoto functions like many existing services it's drastically different for several reasons.
1. **Ownership**
Users can specify where their photos are stored. By default they are seamlessly stored in your [Amazon S3][s3] bucket.
1. **Built in backups**
Since you upload photos to your own [Amazon S3][s3] bucket it's like uploading and archiving your photos in one step.
1. **Portability**
Easily start off by signing up for a hosted OpenPhoto account and later switch to hosting the software yourself. There's no need to transfer your photos somewhere else since it's federated. It's plug and play.
1. **Durability**
Your photos are not tied to any particular photo service. Because everything is open you can write your own web interface for your photos, choose between OpenPhoto hosting providers or install the OpenPhoto software on your own server.
1. **Community**
New features will be entirely community driven. Features with the most votes will get top priority in getting implemented. If you want a feature that doesn't have many votes you can implement it yourself and issue a pull request.
----------------------------------------
### What if I use Flickr or Smugmug?
If you're using Flickr or Smugmug you should consider switching to OpenPhoto.
The more photos and time you invest on a propietary photo sharing service the more devastated you're going to be once they shut down or no longer appeal to you.
There are importing tools available to make the switch easy.
----------------------------------------
[aws]: http://aws.amazon.com/
[s3]: http://aws.amazon.com/s3/
[simpledb]: http://aws.amazon.com/simpledb/
<file_sep>/src/libraries/controllers/PhotosController.php
<?php
class PhotosController extends BaseController
{
public static function home()
{
$photos = getApi()->invoke('/photos.json');
echo '<ul>';
foreach($photos['result'] as $photo)
{
echo "<li>Photo {$photo['id']} has url {$photo['urlOriginal']}</li>";
}
echo '</ul>';
}
}
<file_sep>/src/libraries/models/Photo.php
<?php
class Photo
{
private $id;
public function __construct($id)
{
$this->id = $id;
}
}
<file_sep>/src/libraries/controllers/GeneralController.php
<?php
class GeneralController extends BaseController
{
public static function home()
{
echo 'Welcome to OpenPhoto';
}
}
<file_sep>/src/html/index.php
<?php
/*
* Author: <NAME> <<EMAIL>>
* Front controller for OpenPhoto.
* This file takes all requests and dispatches them to the appropriate controller.
*/
$basePath = dirname(dirname(__FILE__));
$epiPath = "{$basePath}/libraries/external/epi";
require "{$epiPath}/Epi.php";
Epi::setPath('base', $epiPath);
Epi::setPath('config', "{$basePath}/configs");
//Epi::setPath('view', "{$basePath}/views");
//Epi::setSetting('exceptions', true);
Epi::init('api','config','route');
getConfig()->load('defaults.ini');
getConfig()->load('override/defaults.ini');
// load all dependencies
require getConfig()->get('paths')->libraries . '/dependencies.php';
getRoute()->run();
|
a998e43569853f30a0e3fe3f88655c19c4dbc52e
|
[
"Markdown",
"PHP",
"INI"
] | 11
|
PHP
|
arcterex/frontend
|
cb8dd1047e5f63925142e0bd24203d3db182dcb9
|
9ca3a02f0537fbcd2f091e4d71c2087c32eadd05
|
refs/heads/master
|
<file_sep>package sdatcrm
/*
R Get("/api/extraItemsInInvoice/") -> Get all extra items in this invoice for which an adHoc order should be created
*/
import (
"fmt"
"net/http"
"appengine"
)
const EXTRA_ITEMS_IN_INVOICE_API = "/api/extraItemsInInvoice/"
func init() {
http.Handle(EXTRA_ITEMS_IN_INVOICE_API, gaeHandler(extraItemsInInvoiceHandler))
return
}
func extraItemsInInvoiceHandler(c appengine.Context, w http.ResponseWriter, r *http.Request) (interface{}, error) {
pid := r.URL.Path[len(EXTRA_ITEMS_IN_INVOICE_API):]
if len(pid) == 0 {
switch r.Method {
case "POST":
invoice, err := decodeInvoice(r.Body)
if err != nil {
return nil, err
}
return FindExtraItemsInInvoice(c, invoice)
default:
return nil, fmt.Errorf(r.Method + " on " + r.URL.Path + " not implemented")
}
}
return nil, nil
}
func FindExtraItemsInInvoice(c appengine.Context, invoice *Invoice) ([]Item, error) {
pendingOrders, err := getPendingOrdersForPurchaser(c, invoice.PurchaserId)
if err != nil {
return nil, err
}
var clubbedPendingItems []Item
for _, o := range pendingOrders {
for _, i := range o.PendingItems {
foundSameItem := false
for _, npi := range clubbedPendingItems {
if npi.equals(i) {
foundSameItem = true
npi.Qty += i.Qty
break
}
}
if !foundSameItem {
clubbedPendingItems = append(clubbedPendingItems, i)
}
}
}
var invoicedExtraItems []Item
for _, extraItem := range invoice.Items {
var newItem = extraItem
for _, i := range clubbedPendingItems {
if extraItem.equals(i) {
newItem.Qty -= i.Qty
break
}
}
invoicedExtraItems = append(invoicedExtraItems, newItem)
}
var prunedExtraItems []Item
for _, e := range invoicedExtraItems {
if e.Qty != 0 {
prunedExtraItems = append(prunedExtraItems, e)
}
if e.Qty < 0 {
return nil, fmt.Errorf("We should not have reached here. How can an extra item be negative. Its a bug.") // Defensive Programming.
}
}
return prunedExtraItems, nil
}
<file_sep>package sdatcrm
import (
"time"
)
type TCDieItem struct {
PelletSize string `json:"PelletSize"`
BoreSize string `json:"BoreSize"`
CaseType string `json:"CaseType"`
CaseSize string `json:"CaseSize"`
}
type MiscItem struct {
Name string `json:"Name"`
Unit string `json:"Unit"`
}
type SKU struct {
TCDieItem
MiscItem
Rate float64 `json:"Rate"`
Type string `json:"Type"` //TCD or MSC
CRemarks string `json:"CRemarks"`
}
func (a Item) equals(b Item) bool {
if a.PelletSize != b.PelletSize {
return false
}
if a.BoreSize != b.BoreSize {
return false
}
if a.CaseType != b.CaseType {
return false
}
if a.CaseSize != b.CaseSize {
return false
}
if a.Name != b.Name {
return false
}
if a.Unit != b.Unit {
return false
}
if a.Rate != b.Rate {
return false
}
if a.Type != b.Type {
return false
}
if a.CRemarks != b.CRemarks {
return false
}
return true
}
type Item struct {
SKU
Qty int64 `json:"Qty"`
}
type Address struct {
DeliveryAddress string `json:"DeliveryAddress"`
City string `json:"City"`
State string `json:"State"`
Pincode string `json:"Pincode"`
EnvelopePhoneNumbers CSL `json:"EnvelopePhoneNumbers"`
}
type Purchaser struct {
Address
Id PurchaserId `json:"Id" datastore:"-"`
SKUs []SKU `json:"SKUs"`
Created time.Time `json:"Created"`
Name string `json:"Name"`
DispatchEmails CSL `json:"DispatchEmails"`
DefaultTaxPercentage float64 `json:"DefaultTaxPercentage"`
FORMCEmails CSL `json:"FORMCEmails"`
TinNumber string `json:"TinNumber"`
BillingAddress string `json:"BillingAddress"`
SMSPhoneNumbers CSL `json:"SMSPhoneNumbers"`
MDPhoneNumbers CSL `json:"MDPhoneNumbers"`
CreditDays int64 `json:"CreditDays"`
CRemarks string `json:"CRemarks"`
}
type Order struct {
Id OrderId `json:"Id" datastore:"-"`
Created time.Time `json:"Created"`
Date time.Time `json:"Date"`
TotalQty int64 `json:"TotalQty"`
PurchaserId PurchaserId `json:"PurchaserId"`
SupplierName string `json:"SupplierName"`
Number string `json:"Number"`
IsComplete bool `json:"IsComplete"`
InvoicesId []InvoiceId `json:"InvoicesId"`
OrderedItems []Item `json:"OrderedItems"`
PendingItems []Item `json:"PendingItems"`
DispatchedItems []Item `json:"DispatchedItems"`
PuntedItems []Item `json:"PuntedItems"`
}
type Invoice struct {
Id InvoiceId `json:"Id" datastore:"-"`
Items []Item `json:"Items"`
Created time.Time `json:"Created"`
Date time.Time `json:"Date"`
TotalQty int64 `json:"TotalQty"`
PurchaserId PurchaserId `json:"PurchaserId"`
SupplierName string `json:"SupplierName"`
Number string `json:"Number"`
PRemarks string `json:"PRemarks"`
OrdersId []OrderId `json:"OrdersId"`
DoNotMoveStock bool `json:"DoNotMoveStock"`
GoodsValue int64 `json:"GoodsValue"`
DiscountAmount int64 `json:"DiscountAmount"`
TaxPercentage float64 `json:"TaxPercentage"`
TaxAmount int64 `json:"TaxAmount"`
CourierCharges int64 `json:"CourierCharges"`
InvoiceAmount int64 `json:"InvoiceAmount"`
}
<file_sep>package sdatcrm
/*
C Post("/api/invoices/") -> Create a new invoice
R Get("/api/invoices/id/") -> Get the invoice with this id
U Put("/api/invoices/id/") -> Resave this invoice with id
D Delete("/api/invoices/id/") -> Delete invoice having id
Q Get("/api/invoices/") -> Get all invoices
*/
import (
"encoding/json"
"fmt"
"html/template"
"io"
"net/http"
"strconv"
"time"
"appengine"
"appengine/datastore"
)
const INVOICES_API = "/api/invoices/"
type InvoiceId int64
func init() {
http.Handle(INVOICES_API, gaeHandler(invoiceHandler))
http.HandleFunc("/invoice/new/", newInvoicePageHandler)
http.HandleFunc("/invoice/", editInvoicePageHandler)
http.HandleFunc("/invoices/", allInvoicePageHandler)
}
func invoiceHandler(c appengine.Context, w http.ResponseWriter, r *http.Request) (interface{}, error) {
id := r.URL.Path[len(INVOICES_API):]
c.Infof("Received invoice id %v", id)
if len(id) > 0 {
switch r.Method {
case "GET":
id64, err := strconv.ParseInt(id, 10, 64)
if err != nil {
return nil, err
}
invoice := new(Invoice)
invoice.Id = InvoiceId(id64)
return invoice.get(c)
default:
return nil, fmt.Errorf(r.Method + " on " + r.URL.Path + " not implemented")
}
} else {
switch r.Method {
case "POST":
return invoiceSaveEntryPoint(c, r)
case "GET":
return getAllInvoices(c)
default:
return nil, fmt.Errorf(r.Method + " on " + r.URL.Path + " not implemented")
}
}
return nil, nil
}
func decodeInvoice(r io.ReadCloser) (*Invoice, error) {
defer r.Close()
var invoice Invoice
err := json.NewDecoder(r).Decode(&invoice)
return &invoice, err
}
func (o *Invoice) get(c appengine.Context) (*Invoice, error) {
err := datastore.Get(c, o.key(c), o)
if err != nil {
return nil, err
}
return o, nil
}
func (o *Invoice) save(c appengine.Context) (*Invoice, error) {
k, err := datastore.Put(c, o.key(c), o)
if err != nil {
return nil, err
}
o.Id = InvoiceId(k.IntID())
return o, nil
}
func defaultInvoiceList(c appengine.Context) *datastore.Key {
ancestorKey := datastore.NewKey(c, "ANCESTOR_KEY", BranchName(c), 0, nil)
return datastore.NewKey(c, "InvoiceList", "default", 0, ancestorKey)
}
func (o *Invoice) key(c appengine.Context) *datastore.Key {
if o.Id == 0 {
o.Created = time.Now()
return datastore.NewIncompleteKey(c, "Invoice", defaultInvoiceList(c))
}
return datastore.NewKey(c, "Invoice", "", int64(o.Id), defaultInvoiceList(c))
}
func getAllInvoices(c appengine.Context) ([]Invoice, error) {
invoices := []Invoice{}
ks, err := datastore.NewQuery("Invoice").Ancestor(defaultInvoiceList(c)).Order("Created").GetAll(c, &invoices)
if err != nil {
return nil, err
}
for i := 0; i < len(invoices); i++ {
invoices[i].Id = InvoiceId(ks[i].IntID())
}
return invoices, nil
}
func newInvoicePageHandler(w http.ResponseWriter, r *http.Request) {
t := template.Must(template.ParseFiles("templates/invoice.html"))
var data interface{}
data = struct{ Nature string }{"NEW"}
if t == nil {
t = PAGE_NOT_FOUND_TEMPLATE
data = nil
}
if err := t.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
func editInvoicePageHandler(w http.ResponseWriter, r *http.Request) {
t := template.Must(template.ParseFiles("templates/invoice.html"))
var data interface{}
data = struct{ Nature string }{"EDIT"}
if t == nil {
t = PAGE_NOT_FOUND_TEMPLATE
data = nil
}
if err := t.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
func allInvoicePageHandler(w http.ResponseWriter, r *http.Request) {
t := template.Must(template.ParseFiles("templates/invoices.html"))
if err := t.Execute(w, nil); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
func UpdateRelatedOrdersFieldAndEditTheirPendingItemsListAndMarkOrderCompletion(c appengine.Context, invoice *Invoice) error {
return nil
}
func invoiceSaveEntryPoint(c appengine.Context, r *http.Request) (*Invoice, error) {
invoice, err := decodeInvoice(r.Body)
if err != nil {
return nil, err
}
if invoice.Id == 0 {
return ProcessBrandNewInvoice(c, invoice)
} else {
return ProcessBrandNewInvoice(c, invoice)
//return ProcessExistingInvoice(c, invoice)
}
}
func ProcessBrandNewInvoice(c appengine.Context, invoice *Invoice) (*Invoice, error) {
var newInvoice *Invoice
var err error
err = datastore.RunInTransaction(c, func(c appengine.Context) error {
if err = CreateAdHocOrderIfRequired(c, invoice); err != nil {
return err
}
if err = UpdateRelatedOrdersFieldAndEditTheirPendingItemsListAndMarkOrderCompletion(c, invoice); err != nil {
return err
}
newInvoice, err = invoice.save(c)
return err
}, nil)
if err != nil {
return nil, err
} else {
return newInvoice, nil
}
}
func ProcessExistingInvoice(c appengine.Context, invoice *Invoice) (*Invoice, error) {
return invoice.save(c)
}
func CreateAdHocOrderWithTheseItems(c appengine.Context, items []Item, invoice *Invoice) (*Order, error) {
order := new(Order)
order.PurchaserId = invoice.PurchaserId
order.SupplierName = invoice.SupplierName
order.Date = time.Now()
order.Number = "Telephonic"
for _, i := range items {
order.OrderedItems = append(order.OrderedItems, i)
order.PendingItems = append(order.PendingItems, i)
}
c.Infof("About to save order:%#v", order)
return order.save(c)
}
func CreateAdHocOrderIfRequired(c appengine.Context, invoice *Invoice) error {
// 1. Check if the invoice is being created for some extra items.
// 2. Create an adHoc order for extra items.
// 3. Recheck if invoice is being created for extra items. This time it should not be. Defensive Programming.
extraItems, err := FindExtraItemsInInvoice(c, invoice)
if err != nil {
return err
}
c.Infof("Extra items in invoice of %v: %#v", invoice.PurchaserId, extraItems)
if len(extraItems) > 0 {
o, err := CreateAdHocOrderWithTheseItems(c, extraItems, invoice)
if err != nil {
return err
}
c.Infof("created teh extra order: %#v", o)
}
return nil
}
|
c54468143bcc42771b8fb5119730055a9501de4e
|
[
"Go"
] | 3
|
Go
|
ashishthedev/sdcrm
|
f3f83fbd9c9ceb66cac01ae742f8fc0ec63d1493
|
f9decc48e05b00f666f9e56cb2902cf0d5f94ebb
|
refs/heads/master
|
<file_sep># Exo_Angular
Entrainement via des exercices sur Angular
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo4',
template: `
<article>
<h2>Exercice N°4</h2>
<p>Conception d'un event clic afin que quand j'ai cliquer dessus, j'affiche un message dans la console.</p>
<button (click)="messageConsole()">Clic ici pour voir un message s'afficher dans la console.</button>
</article>
`,
styles: []
})
export class Exo4Component implements OnInit {
messageConsole() : void {
console.log("Ce message est inscrit dans la console pour me permettre de valider mon évènement");
}
constructor() { }
ngOnInit() {
}
}
<file_sep>import { Component, OnInit } from '@angular/core';
import { DatePipe } from '@angular/common';
@Component({
selector: 'app-exo7',
template: `
<article>
<h2>Exercice N°7 - <small>{{ maDate | date: 'dd-MMM-yyyy à HH:mm:ss:SS' }}</small></h2>
<p>Affichange de la date dans le titre de l'exercice sur le clic du bouton</p>
<button type="button" (click)="showDate()">Afficher la date</button>
</article>
`,
styles: []
})
export class Exo7Component implements OnInit {
public maDate = new Date();
showDate() {
this.maDate = new Date();
}
constructor() { }
ngOnInit() {
}
}
<file_sep>import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { AppComponent } from './app.component';
import { Exo1Component } from './exercices/exo1/exo1.component';
import { Exo2Component } from './exercices/exo2/exo2.component';
import { Exo3Component } from './exercices/exo3/exo3.component';
import { Exo4Component } from './exercices/exo4/exo4.component';
import { Exo5Component } from './exercices/exo5/exo5.component';
import { Exo6Component } from './exercices/exo6/exo6.component';
import { Exo7Component } from './exercices/exo7/exo7.component';
import { Exo8Component } from './exercices/exo8/exo8.component';
import { Exo9Component } from './exercices/exo9/exo9.component';
import { Exo10Component } from './exercices/exo10/exo10.component';
@NgModule({
declarations: [
AppComponent,
Exo1Component,
Exo2Component,
Exo3Component,
Exo4Component,
Exo5Component,
Exo6Component,
Exo7Component,
Exo8Component,
Exo9Component,
Exo10Component
],
imports: [
BrowserModule,
FormsModule,
],
providers: [],
bootstrap: [AppComponent]
})
export class AppModule { }
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo8',
template: `
<article>
<h2>Exercice N°8</h2>
<p>Conception de 2 boutons permettant d'incrémenter ou de décrémenter un nombre.</p>
<ul>
<li>Au départ 10</li>
<li>1 Mini</li>
<li>25 Maxi</li>
</ul>
<button type="button" (click)="incrementer()" >+</button>
<span><strong>{{ nombre }}</strong></span>
<button type="button" (click)="decrementer()" >-</button>
</article>
`,
styles: []
})
export class Exo8Component implements OnInit {
public nombre: number = 10;
incrementer() {
if ( this.nombre >= 1 && this.nombre < 25 ) {
this.nombre++;
} else if ( this.nombre == 25 ){
this.nombre = 25;
}
return this.nombre;
}
decrementer() {
if ( this.nombre <= 25 && this.nombre >= 2 ) {
this.nombre--;
} else if ( this.nombre == 1 ){
this.nombre = 1;
}
return this.nombre;
}
constructor() { }
ngOnInit() {
}
}
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo2',
template: `
<article>
<h2>Exercice N°2</h2>
<p>Création d'une interpolation avec les moustaches</p>
<p>{{ description }}</p>
</article>
`,
styles: []
})
export class Exo2Component implements OnInit {
public description: string = "Je suis une interpolation";
constructor() { }
ngOnInit() {
}
}
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo10',
template: `
<article>
<h2>Exercice N°10</h2>
<p>
Utilisation à nouveau de la boucle ngFor pour afficher une liste actuellement vide.<br />
Je la remplirais avec un petit formulaire.
</p>
<input type="text" [(ngModel)]="newItem" /><button type="button" (click)="addListe(newItem)">Ajouter</button>
<ul *ngFor="let tache of taches let iteration = index">
<li>{{ iteration + 1 }} - {{ tache }}</li>
</ul>
</article>
`,
styles: []
})
export class Exo10Component implements OnInit {
newItem: string = '';
taches: string[] = [];
addListe(item: string) {
this.taches.push(item);
}
constructor() { }
ngOnInit() {
}
}
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo5',
template: `
<article>
<h2>Exercice N°5</h2>
<p>Dans cet exercice je vais changer l'image au clic sur un bouton</p>
<figure>
<img [src]="imgSrc" [alt]="imgAlt" [width]="imgWidth" [height]="imgHeight" />
<figcaption>{{ imgAlt }}</figcaption>
</figure>
<button (click)="changerImage()">Changé l'image</button>
</article>
`,
styles: []
})
export class Exo5Component implements OnInit {
public images: any[] = [
{image: "./assets/images/image1.jpg", desc: "Je suis l'image 1"},
{image: "./assets/images/image2.jpg", desc: "Je suis l'image 2"},
{image: "./assets/images/image3.jpg", desc: "Je suis l'image 3"}
];
public imgSrc: string = this.images[0].image;
public imgAlt: string = this.images[0].desc;
public imgWidth: string = "400";
public imgHeight: string = "266";
index = this.images.length - this.images.length;
changerImage() {
// IF - Si je ne connais pas le nombre d'élément dans mon tableau
if (this.index < this.images.length - 1) {
this.index++;
this.imgSrc = this.images[this.index].image;
this.imgAlt = this.images[this.index].desc;
} else {
this.index = 0;
this.imgSrc = this.images[this.index].image;
this.imgAlt = this.images[this.index].desc;
}
// --------------------------------------------------------------
// SWITCH - Si je connais le nombre d'élément dans mon tableau.
// switch ( this.index ) {
// case 0 :
// this.imgSrc = this.images[this.index].image;
// this.imgAlt = this.images[this.index].desc;
// this.index++;
// break
// case 1 :
// this.imgSrc = this.images[this.index].image;
// this.imgAlt = this.images[this.index].desc;
// this.index++;
// break
// case 2 :
// this.imgSrc = this.images[this.index].image;
// this.imgAlt = this.images[this.index].desc;
// this.index = 0;
// break
// }
}
constructor() { }
ngOnInit() {
}
}
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo3',
template: `
<article>
<h2>Exercice N°3</h2>
<p>Création d'un data binding sur une image.</p>
<figure>
<img [src]="imgSrc" alt="{{ imgAlt }}" title="{{ imgAlt }}" [width]="imgWidth" height="{{ imgHeight }}" />
<figcaption>{{ imgLegend }}</figcaption>
</figure>
</article>
`,
styles: []
})
export class Exo3Component implements OnInit {
public imgSrc: string = "./assets/images/image4.jpg";
public imgAlt: string = "Je suis la description de l'image au cas où si elle ne s'affiche pas.";
public imgWidth: string = "400";
public imgHeight: string = "266";
public imgLegend: string = "Je suis la legende de l'image interpolé";
constructor() { }
ngOnInit() {
}
}
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo1',
template: `
<article>
<h2>Exercice N°1</h2>
<p>Création d'un component permettant de me voir affiché.</p>
</article>
`,
styles: []
})
export class Exo1Component implements OnInit {
constructor() { }
ngOnInit() {
}
}
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo6',
template: `
<article>
<h2>Exercice N°6</h2>
<p>Conception d'un input qui permettra d'éditer un titre.</p>
<input type="text" placeholder="saisir un texte ici..." [(ngModel)]="titre" /><button (click)="reset()">Reset</button>
<h3>{{ titre }}</h3>
</article>
`,
styles: []
})
export class Exo6Component implements OnInit {
public titre: string = 'Titre démonstratif';
reset() {
this.titre = '';
}
constructor() { }
ngOnInit() {
}
}
<file_sep>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-exo9',
template: `
<article>
<h2>Exercice N°9</h2>
<p>Utilisation de la boucle ngFor pour afficher une liste issu d'un tableau.</p>
<ul *ngFor="let resultat of tableau let iteration = index">
<li>{{ iteration + 1 }} - {{ resultat }}</li>
</ul>
</article>
`,
styles: []
})
export class Exo9Component implements OnInit {
public tableau: string[] = [
'Playstation 4',
'Nintendo Switch',
'Xbox One S',
'Super Nintendo',
'Playstation 5'
]
constructor() { }
ngOnInit() {
}
}
|
cd6c0fd0ae9e98f9f969c3c98aae6c686f920bf5
|
[
"Markdown",
"TypeScript"
] | 12
|
Markdown
|
Zyrass/Exo_Angular
|
c5addc3fe1f303620d9abcf98706d1e62b3ac547
|
610188beac7eb858e833cf2af1aa1e941c43eda6
|
refs/heads/master
|
<repo_name>swaiing/recordings-handler<file_sep>/app.rb
# app.rb
require 'sinatra'
require 'json'
require 'net/http'
require 'uri'
require 'twilio-ruby'
class TwilioRecordingsHandler < Sinatra::Base
helpers do
def handlePost(notification_number, add_ons)
transcripts = ""
vb_results = add_ons['results']['voicebase_transcription']
if !vb_results.nil?
recording_link = retrieveRecordingLink vb_results
response_body = retrieveAnalysisResults vb_results
transcripts += "VoiceBase: "
transcripts += response_body['media']['transcripts']['text']
end
ibm_results = add_ons['results']['ibm_watson_speechtotext']
if !ibm_results.nil?
recording_link = retrieveRecordingLink ibm_results
response_body = retrieveAnalysisResults ibm_results
transcripts += "IBM: "
results = response_body['results'][0]['results']
results.each do |result|
transcripts += result['alternatives'][0]['transcript'] + " "
end
end
p transcripts
sendSms notification_number, transcripts
end
def retrieveAnalysisResults(results)
url = results['payload'][0]['url']
response = ""
# for API resource fix
if url.include? "api.twilio.com"
uri = URI.parse url
Net::HTTP.start(uri.host, uri.port, :use_ssl => uri.scheme == 'https') do |http|
req = Net::HTTP::Get.new uri.request_uri
req.basic_auth ENV['TWILIO_ACCOUNT_SID'], ENV['TWILIO_AUTH_TOKEN']
response = http.request req
p "API resource: #{response.body}"
end
# for invalid S3
else
trans_url = url.gsub(/^https/, "http")
uri = URI.parse trans_url
response = Net::HTTP.get uri
p "Invalid S3 resource: #{response}"
end
JSON.parse response
end
def retrieveRecordingLink(results)
results['links']['Recording']
end
def sendSms(notification_number, body)
if notification_number.nil?
p "Error sending SMS: No notification number given"
return
end
# TODO: move to class instantiation
account_sid = ENV['TWILIO_ACCOUNT_SID']
auth_token = ENV['<PASSWORD>_AUTH_TOKEN']
from_number = ENV['TWILIO_NUMBER']
@client = Twilio::REST::Client.new account_sid, auth_token
# send SMS
@client.account.messages.create({
:from => from_number,
:to => notification_number,
:body => body
})
end
end
post '/' do
# get AddOns, form encoded in POST body
add_ons = JSON.parse params[:AddOns]
status = add_ons['status']
if status != "successful"
message = add_ons['message']
code = add_ons['code']
p "Error #{code} : #{message}"
return
end
# get number param in GET
notification_number = params[:number]
p "Notification Number: #{notification_number}"
# process transcription analysis
handlePost notification_number, add_ons
end
end
|
3436a348b0feb4bd0a9443a7e6a4c0cd2e6f4d6c
|
[
"Ruby"
] | 1
|
Ruby
|
swaiing/recordings-handler
|
18dee60e752dd442cb9a8383c40677753e084d65
|
4dc944abc4e41029f8711586e6fa7619d6c2ec00
|
refs/heads/master
|
<repo_name>RickRieger/express-games-hw<file_sep>/routes/gameRouter.js
const express = require("express");
const router = express.Router();
const uuidv4 = require("uuid").v4;
let games = [
{
id: "adowb1b3bb",
game: "League of Legends",
description: "League of Legends is a team-based game with over 140 champions to make epic plays with."
},
{
id: "kd7b9ks2nda",
game: "PlayerUnknown's Battlegrounds",
description: "PLAYERUNKNOWN'S BATTLEGROUNDS is a last-man-standing shooter being developed with community feedback."
}
];
router.get("/get-all-games", function (req, res) {
res.json({games});
});
router.get("/get-game-by-id/:id", function (req, res) {
const id = req.params.id;
let foundGame;
games.forEach(function (game) {
if (game.id === id) {
foundGame = game;
}
});
if (!foundGame) {
res.status(404).json({ message: "The game with the id does not exist, please check id" });
} else {
res.json({ payload: foundGame });
}
});
router.get("/get-todos-by-done/:boolean", function (req, res) {
const boolean = req.params.boolean;
let newDoneArray = [];
todos.forEach(function (element) {
if (element.done === boolean) {
newDoneArray.push(element);
}
});
res.json(newDoneArray);
});
// extra credit!!!!
router.get("/get-game-by-name/:name", function (req, res) {
console.log(req.params.name)
let foundGame;
let name = req.params.name;
games.forEach(function (item) {
if (item.game === name) {
foundGame = item;
console.log('working')
}
});
if (!foundGame) {
res.json({ message: "The game does not exist, please check name" });
} else {
res.json({ foundGame });
}
});
router.post("/create-new-game", function (req, res) {
let newGame = {
id: uuidv4(),
game: req.body.game,
description:req.body.description
};
if(newGame.game === '' || newGame.description === ''|| newGame.game === undefined || newGame.description === undefined){
res.json({ message: "cannot leave text area blank"});
}
let isTrue = true;
games.forEach((game)=>{
if (newGame.game === game.game){
res.json({ message: "Game already exists, cannot add game"});
isTrue = false;
}
});
if(isTrue){
games.push(newGame);
res.json({ games });
}
});
router.put("/update-game/:id", function (req, res) {
let canUpdate = false;
let foundGame;
games.forEach(function (game) {
if (game.id === req.params.id) {
canUpdate = true;
foundGame = game;
}
});
if(!canUpdate){
res.json({ message: "game not found, cannot update"});
}
if (req.body.game !== undefined && req.body.game !== "" ) {
foundGame.game = req.body.game;
}
if (req.body.description !== undefined && req.body.description !== ""){
foundGame.description = req.body.description;
}
res.json({ games });
});
router.delete("/delete-game/:id", function (req, res) {
let isFound = games.findIndex((game) => game.id === req.params.id);
if (isFound === -1) {
res.json({ message: "game not found, cannot delete" });
} else {
games.splice(isFound, 1);
res.json({ games });
}
});
module.exports = router;
|
b30df288686ea695586c4956cb97132bdc9bf547
|
[
"JavaScript"
] | 1
|
JavaScript
|
RickRieger/express-games-hw
|
6e2fad2ac5625316f8d53640d56ef8aa2d843abb
|
ea2d0fe3454a046b295ade70dcc1eeb774d14ac2
|
refs/heads/master
|
<file_sep><?php
namespace Tests\Feature;
use Tests\TestCase;
use Illuminate\Foundation\Testing\WithFaker;
use Illuminate\Foundation\Testing\RefreshDatabase;
class AuthTest extends TestCase
{
/**
* A basic test example.
*
* @return void
*/
use DatabaseTransaction;
public function user_can_login_with_valid_credentials(){
$user = factory(User::class)->create();
$response = $this->post('/login', [
'email' => $user->email,
'password' => '<PASSWORD>'
]);
$response->assertStatus(302);
}
public function user_cannot_login_with_invalid_credentials(){
$user = factory(User::class)->create();
$response = $this->post('/login', [
'email' => $user->email,
'password' => '<PASSWORD>'
]);
$response->assertSessionHasErrors();
}
public function user_can_register_with_valid_credentials(){
$user = factory(User::class)->make();
$response = $this->post('register', [
'name' => $user->name,
'email' => $user->email,
'password' => '<PASSWORD>',
'password_confirmation' => '<PASSWORD>'
]);
$response->assertStatus(302);
}
public function user_cannot_register_with_existing_credentials(){
$user = factory(User::class)->make();
$response = $this->post('register', [
'name' => $user->name,
'email' => $user->email,
'password' => '<PASSWORD>',
'password_confirmation' => '<PASSWORD>'
]);
$response->assertSessionHasErrors();
}
public function user_can_request_for_reset_password_code(){
$user = factory(User::class)->create();
$this->expectsNotification($user, ResetPassword::class);
$response = $this->post('password/email', ['email' => $user->email]);
$response->assertStatus(302);
}
public function user_can_reset_password_with_valid_code(){
$user = factory(User::class)->create();
$token = Password::createToken($user);
$response = $this->post('/password/reset', [
'token' => $token,
'email' => $user->email,
'password' => '<PASSWORD>',
'password_confirmation' => '<PASSWORD>'
]);
$this->assertTrue(Hash::check('password', $user->fresh()->password));
}
}
|
d3db8388bac756784ef03e7d6d036069c3edb8ff
|
[
"PHP"
] | 1
|
PHP
|
fredrickjomo/ralphowino-consulting-test1
|
98fe3b9d54d5b8266e6e4cc09d541daa29884abe
|
f5290278c9a2106e393572f6babecda876665614
|
refs/heads/master
|
<repo_name>alifa20/random-particles-ios<file_sep>/RandomlySpawnEnemyProject/FinishScene.swift
//
// FinishScene.swift
// RandomlySpawnEnemyProject
//
// Created by <NAME> on 6/5/18.
// Copyright © 2018 SkyVan Labs. All rights reserved.
//
import SpriteKit
struct ScoreRecord: Codable {
let playerName: String
let score: Double
init(playerName: String, score: Double) {
self.playerName = playerName
self.score = score
}
}
class GameRoomTableView: UITableView,UITableViewDelegate,UITableViewDataSource {
// var items: [String] = ["Player1", "Player2", "Player3"]
// var items: [ScoreRecord] = [ScoreRecord(playerName: "jafar",score: 10.0)]
var items: [ScoreRecord] = []
override init(frame: CGRect, style: UITableViewStyle) {
super.init(frame: frame, style: style)
self.delegate = self
self.dataSource = self
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
// MARK: - Table view data source
func numberOfSections(in tableView: UITableView) -> Int {
return 1
}
func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
return items.count
}
func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell {
let cell:UITableViewCell = tableView.dequeueReusableCell(withIdentifier: "cell")! as UITableViewCell
cell.textLabel?.text = "\(self.items[indexPath.row].score) - \(self.items[indexPath.row].playerName)"
return cell
}
func tableView(_ tableView: UITableView, titleForHeaderInSection section: Int) -> String? {
return "Top Scores"
}
func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) {
print("You selected cell #\(indexPath.row)!")
}
}
class FinishScene: SKScene {
var records: [ScoreRecord] = []
var gameTableView = GameRoomTableView()
var settings: Settings?
var playerName: String? = "Player 1"
var playAgainButton: SKLabelNode!
var score: Double = 20
override func didMove(to view: SKView) {
let documentsDir = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).last!
print(documentsDir)
let r = ScoreRecord(playerName: self.playerName!, score: self.score)
// saveData(records: records)
gameTableView.register(UITableViewCell.self, forCellReuseIdentifier: "cell")
gameTableView.frame = CGRect(x:20,y:50,width:280,height:200)
// gameTableView.items = self.records
self.scene?.view?.addSubview(gameTableView)
// gameTableView.reloadData()
loadData()
self.records.append(r)
// let r2 = ScoreRecord(playerName: "test", score: 12.0)
// self.records.append(r2)
self.gameTableView.items = records.sorted(by: {(n1:ScoreRecord, n2:ScoreRecord) -> Bool in return n2.score < n1.score})
self.gameTableView.reloadData()
saveData(records: self.records)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches {
let positionInScene = t.location(in: self)
let touchedNode = self.atPoint(positionInScene)
if let name = touchedNode.name
{
if name == "playAgainButton"
{
let transition:SKTransition = SKTransition.fade(withDuration: 1)
let sceneTemp = MenuScene(fileNamed: "MenuScene") as MenuScene?
sceneTemp?.scaleMode = .aspectFill
sceneTemp?.settings = Settings(maxBubbles:15, playTime: 60 )
gameTableView.removeFromSuperview()
self.scene?.view?.presentScene(sceneTemp!, transition: transition)
}
}
}
}
func saveData(records: [ScoreRecord]) {
let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).last!
let archiveURL = documentsDirectory.appendingPathComponent("high_scores").appendingPathExtension("json")
do {
let data = try JSONEncoder().encode(records)
try data.write(to: archiveURL, options: .noFileProtection)
}
catch {
print("Error saving data")
}
}
func loadData() {
let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).last!
let archiveURL = documentsDirectory.appendingPathComponent("high_scores").appendingPathExtension("json")
let jsonDecoder = JSONDecoder()
if let highScoresData = try? Data(contentsOf: archiveURL),
let decodedHighScores = try? jsonDecoder.decode([ScoreRecord].self, from: highScoresData) {
self.records = decodedHighScores
print(records)
self.gameTableView.items = self.records
self.gameTableView.reloadData()
// self.tableView.reloadData()
}
}
}
<file_sep>/RandomlySpawnEnemyProject/SVLSpriteNodeButton.swift
//
// SVLSpriteNodeButton.swift
// AdvanceSpriteKitButtonProject
//
// Created by <NAME> on 9/2/17.
// Copyright © 2017 SkyVan Labs. All rights reserved.
//
import SpriteKit
protocol SVLSpriteNodeButtonDelegate: class {
func spriteButtonDown(_ button: SVLSpriteNodeButton)
func spriteButtonUp(_ button: SVLSpriteNodeButton)
func spriteButtonMoved(_ button: SVLSpriteNodeButton)
func spriteButtonTapped(_ button: SVLSpriteNodeButton)
}
class SVLSpriteNodeButton: SKSpriteNode {
enum SpriteButtonState {
case up
case down
}
weak var delegate: SVLSpriteNodeButtonDelegate?
var label: SKLabelNode?
var state = SpriteButtonState.up
//MARK: - Init and Setup
override init(texture: SKTexture?, color: UIColor, size: CGSize) {
super.init(texture: texture, color: color, size: size)
setup()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
setup()
}
func setup(){
isUserInteractionEnabled = true
for child in children{
if let label = child as? SKLabelNode{
self.label = label
}
}
}
//MARK: - Touch Logic
func touchDown(atPoint pos : CGPoint) {
alpha = 0.5
state = .down
delegate?.spriteButtonDown(self)
}
func touchMoved(toPoint pos : CGPoint) {
delegate?.spriteButtonMoved(self)
}
func touchUp(atPoint pos : CGPoint) {
alpha = 1.0
state = .up
delegate?.spriteButtonUp(self)
if contains(pos){
delegate?.spriteButtonTapped(self)
}
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches { self.touchDown(atPoint: t.location(in: self)) }
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches { self.touchMoved(toPoint: t.location(in: self)) }
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches {
if parent != nil {
self.touchUp(atPoint: t.location(in: parent!))
}
}
}
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches {
if parent != nil {
self.touchUp(atPoint: t.location(in: parent!))
}
}
}
}
<file_sep>/RandomlySpawnEnemyProject/TouchableSKSpriteNode.swift
//
// TouchableSKSpriteNode.swift
// RandomlySpawnEnemyProject
//
// Created by <NAME> on 5/5/18.
// Copyright © 2018 SkyVan Labs. All rights reserved.
//
import SpriteKit
//protocol GameSceneDelegate {
// func calledFromBubble(_ button: TouchableSKSpriteNode)
//}
protocol GameSceneDelegate: class {
func calledFromBubble(_ button: TouchableSKSpriteNode)
}
class TouchableSKSpriteNode: SKSpriteNode {
var scoreLabel: SKLabelNode!
var bubbleType = ""
// var gameDelegate: GameSceneDelegate?
weak var delegate: GameSceneDelegate?
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
// scoreLabel = self.parent?.childNode(withName: "scoreLabel") as! SKLabelNode
// scoreLabel.text = String(Int(scoreLabel.text!)!+1)
// gameDelegate?.calledFromBubble(self)
delegate?.calledFromBubble(self)
self.removeFromParent()
}
}
<file_sep>/RandomlySpawnEnemyProject/SVLSpriteNodeButton2.swift
//
// SVLSpriteNodeButton2.swift
// RandomlySpawnEnemyProject
//
// Created by Ali on 5/5/18.
// Copyright © 2018 SkyVan Labs. All rights reserved.
//
import SpriteKit
protocol SVLSpriteNodeButton2Delegate: class {
func calledFromSVLSpriteNodeButton2Delegate(_ button: SVLSpriteNodeButton2)
// func spriteButtonUp(_ button: SVLSpriteNodeButton)
// func spriteButtonMoved(_ button: SVLSpriteNodeButton)
// func spriteButtonTapped(_ button: SVLSpriteNodeButton)
}
class SVLSpriteNodeButton2: SKSpriteNode {
weak var delegate: SVLSpriteNodeButton2Delegate?
func touchDown(atPoint pos : CGPoint){
print ("Touch Down")
}
func touchMoved(atPoint pos : CGPoint){
print ("Touch Moved")
}
func touchUp(atPoint pos : CGPoint){
print ("Touch Up")
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
print("tiouched")
// delegate?.calledSVLSpriteNodeButton2Delegate(self)
for t in touches {self.touchDown(atPoint: t.location(in: self))}
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches {self.touchMoved(atPoint: t.location(in: self))}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches {self.touchUp(atPoint: t.location(in: self))}
}
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches {self.touchUp(atPoint: t.location(in: self))}
}
}
<file_sep>/RandomlySpawnEnemyProject/GameViewController.swift
//
// GameViewController.swift
// RandomlySpawnEnemyProject
//
// Created by <NAME> on 9/30/17.
// Copyright © 2017 SkyVan Labs. All rights reserved.
//
import UIKit
import SpriteKit
import GameplayKit
//struct Settings {
//// let playTime: TimeInterval
// let maxBubbles: Int
//}
struct Settings {
var maxBubbles: Int
var playTime: Double
init(maxBubbles: Int, playTime: Double) {
self.maxBubbles = maxBubbles
self.playTime = playTime
}
// init(maxBubbles: Int) {
// self.maxBubbles = maxBubbles
// }
}
class GameViewController: UIViewController, MenuSceneDelegate {
func calledFromMenuScene(_ scene: MenuScene) {
print("calledFromMenuScene")
}
var playerName: String?
var settings: Settings?
var startGame: SVLSpriteNodeButton2!
// func calledFromMenuScene(_ button: SVLSpriteNodeButton2) {
// print("calledFromMenuScene")
//// if let scene = SKScene(fileNamed: "GameScene") {
//// // Set the scale mode to scale to fit the window
//// scene.scaleMode = .aspectFill
//// }
// }
@IBOutlet weak var timerLabel: UILabel!
var countdownTimer: Timer!
var totalTime = 60
var TimeInterval = 1
override func viewDidLoad() {
super.viewDidLoad()
// let menuScene = MenuScene()
// menuScene.scaleMode = .aspectFill
// menuScene.menuDelegate = self
if let view = self.view as! SKView? {
// Load the SKScene from 'GameScene.sks'
// if let scene = SKScene(fileNamed: "FinishScene") {
// // Set the scale mode to scale to fit the window
// scene.scaleMode = .aspectFill
// // Present the scene
// view.presentScene(scene)
// return
// }
if let scene = SKScene(fileNamed: "MenuScene") as? MenuScene {
// Set the scale mode to scale to fit the window
scene.scaleMode = .aspectFill
scene.settings = Settings(maxBubbles:15, playTime: 60 )
scene.playerName = self.playerName
// Present the scene
view.presentScene(scene)
}
view.ignoresSiblingOrder = true
view.showsFPS = true
view.showsNodeCount = true
// startTimer()
}
}
@objc func updateTime() {
timerLabel.text = "\(timeFormatted(totalTime))"
if totalTime != 0 {
totalTime -= 1
} else {
endTimer()
}
}
func startTimer() {
countdownTimer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: #selector(updateTime), userInfo: nil, repeats: true)
// let timer = Timer.scheduledTimer(timeInterval: 1.0, target: self, selector: #selector(updateTime), userInfo: nil, repeats: true)
// timer.fire()
}
func endTimer() {
countdownTimer.invalidate()
if let view = self.view as! SKView? {
// Load the SKScene from 'GameScene.sks'
if let scene = SKScene(fileNamed: "FinishScene") {
// Set the scale mode to scale to fit the window
scene.scaleMode = .aspectFill
// Present the scene
view.presentScene(scene)
}
}
timerLabel.removeFromSuperview()
}
func timeFormatted(_ totalSeconds: Int) -> String {
let seconds: Int = totalSeconds % 60
let minutes: Int = (totalSeconds / 60) % 60
// let hours: Int = totalSeconds / 3600
return String(format: "%02d:%02d", minutes, seconds)
}
override var shouldAutorotate: Bool {
return true
}
// func willMoveFromView(view: SKView) {
// timerLabel.removeFromSuperview()
// }
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
if UIDevice.current.userInterfaceIdiom == .phone {
return .allButUpsideDown
} else {
return .all
}
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if let scene = segue.destination as? GameViewController {
scene.playerName = "..."
// let s = Settings(maxBubbles:10 )
// scene.settings = s
// scene.playerName = self.playerName
// scene.settings = self.settings
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Release any cached data, images, etc that aren't in use.
}
override var prefersStatusBarHidden: Bool {
return true
}
}
|
84a666f255e81fa0a19a02a2cb446ebd051ac975
|
[
"Swift"
] | 5
|
Swift
|
alifa20/random-particles-ios
|
35ee1ae4022f3632a60b1d5362a1840391d4792b
|
511562aac5f9b031fb11850b5385aa88b6eb9422
|
refs/heads/master
|
<repo_name>metabench/nextleveldb-model<file_sep>/query.js
// Will have different types of queries
// Queries will be serialised to binary.
class Query {
constructor(id, page_size = 0) {
this.id = id;
this.page_size = page_size;
}
}
// Need to be able to encode a record fully into a buffer.
// Would mean storing the length of the key?
// Put Record
// 0
// ----------
// Put records
// 1
// -----------
// Keys_In_Range_Query - using full keys
// 10
// ---------------------
// don't give table IDs
// This can be got with other parts of the Model.
// Records_In_Range_Query
// 11
// ---------------------
// Need to have queries on the server that make use of the indexes
// This would require the server-side to have knowledge of the model.
// This is also why having the model encoded into the database would help.
// Or have a query that gets records according to indexes. Relies on the query generator knowing how the indexes are.
// DB will need to know how to get records according to an index. That should be possible. Could read full pages of indexes and then get the results.
// Want some more basic keys for the moment.
// Don't want to make the DB server side of it all that complex.
// Want to be able to leave the server running for a long time without changing it.
Query.Keys_In_Range = require('./queries/keys_in_range');
Query.Records_In_Range = require('./queries/records_in_range');
module.exports = Query;<file_sep>/buffer-backed/buffer-backed.js
// Initialises the rest of them out of buffers.
// Each type has its own prefix for buffers where it gets encoded into Buffers.
// Binary_Encoding does not handle buffer-backed db types.
const Key = require('./key');
const Key_List = require('./key-list');
const Record = require('./record');
const Record_List = require('./record-list');
const Value = require('./value');
const xas2 = require('xas2');
const Binary_Encoding = require('binary-encoding');
// Trouble is, we don't have a prefix for no prefix.
// Treat them as encoding overwrites.
/*
const NONE = 0;
const RECORD = 1;
const KEY = 2;
const VALUE = 3;
*/
//const NONE = 0;
const RECORD = 200;
const KEY = 201;
const VALUE = 202;
const NONE = 210; // D2
const _from = (buf) => {
let pos = 0, prefix;
[prefix, pos] = xas2.read(buf, pos);
// prefix could be one of the numbers in binary encoding.
let buf_the_rest = Buffer.alloc(buf.length - pos);
buf.copy(buf_the_rest, 0, pos);
//
console.log('buf', buf);
console.log('prefix', prefix);
// Be sure to indicate no prefix in the Command_Message then.
if (prefix === RECORD) {
//console.log('RECORD prefix');
// then create the record out of that buf_the_rest
return new Record(buf_the_rest);
} else if (prefix === NONE) {
//console.log('RECORD prefix');
// then create the record out of that buf_the_rest
return buf_the_rest;
//return Binary_Encoding.decode_buffer(buf_the_rest);
} else {
//return Binary_Encoding.decode_buffer(buf);
//return buf_the_rest;
console.log('pre decode');
console.trace();
console.log('buf', buf);
return Binary_Encoding.decode_buffer(buf);
//console.trace();
//throw 'NYI';
}
}
const decode_args_buffers = (arr) => {
return arr.map(item => {
if (item instanceof Buffer) {
//return from(item);
return item;
} else {
return item;
}
});
}
module.exports = {
//from: from,
decode_args_buffers: decode_args_buffers
}
<file_sep>/command-message.js
// Will be based around the buffer.
// Lazy reading of the buffer values where possible.
// Maybe the Command_Message could itself have events.
const lang = require('lang-mini');
const def = lang.is_defined;
const each = lang.each;
const get_a_sig = lang.get_a_sig;
const clone = lang.clone;
const tof = lang.tof;
const Evented_Class = lang.Evented_Class;
const get_truth_map_from_arr = lang.get_truth_map_from_arr;
const Binary_Encoding = require('binary-encoding');
const xas2 = require('xas2');
const Paging = require('./paging');
// Not so sure that this should process events.
const buffer_backed = require('./buffer-backed/buffer-backed');
const LL_COUNT_RECORDS = 0;
const LL_PUT_RECORDS = 1;
// USING PAGING OPTION?
const LL_GET_ALL_KEYS = 2;
const LL_GET_ALL_RECORDS = 3;
const LL_GET_KEYS_IN_RANGE = 4;
const LL_GET_RECORDS_IN_RANGE = 5;
const LL_GET_RECORDS_IN_RANGES = 50;
const LL_COUNT_KEYS_IN_RANGE = 6;
const LL_GET_FIRST_LAST_KEYS_IN_RANGE = 7;
const LL_GET_RECORD = 8;
const LL_COUNT_KEYS_IN_RANGE_UP_TO = 9;
const LL_GET_RECORDS_IN_RANGE_UP_TO = 10;
const LL_FIND_COUNT_TABLE_RECORDS_INDEX_MATCH = 11;
const INSERT_TABLE_RECORD = 12;
const INSERT_RECORDS = 13;
const ENSURE_TABLE = 20;
const ENSURE_TABLES = 21;
const TABLE_EXISTS = 22;
const TABLE_ID_BY_NAME = 23;
const GET_TABLE_FIELDS_INFO = 24;
const GET_TABLE_KEY_SUBDIVISIONS = 25;
const SELECT_FROM_RECORDS_IN_RANGE = 40;
const SELECT_FROM_TABLE = 41;
const LL_SUBSCRIBE_ALL = 60;
const LL_SUBSCRIBE_KEY_PREFIX_PUTS = 61;
const LL_UNSUBSCRIBE_SUBSCRIPTION = 62;
const LL_WIPE = 100;
const LL_WIPE_REPLACE = 101;
const LL_SEND_MESSAGE_RECEIPT = 120;
// Not to be confused with Command_Response_Message
// May be best to treat all of these as optional. If no paging option is given, would use a server default.
let command_ids_with_paging_option = [LL_COUNT_RECORDS, LL_GET_ALL_KEYS, LL_GET_ALL_RECORDS, LL_GET_KEYS_IN_RANGE, LL_GET_RECORDS_IN_RANGE, LL_GET_RECORDS_IN_RANGES];
// Optional paging option will maybe be phased out or not used
// Could assume that if the message ends before paging option, then none is to be used.
let command_ids_with_optional_paging_option = [];
let map_paging_commands = get_truth_map_from_arr(command_ids_with_paging_option);
const RECORD = 200;
const KEY = 201;
const VALUE = 202;
const NONE = 210;
class Command_Message {
constructor(spec) {
let a = arguments;
let l = a.length;
//console.log('Command_Message l', l);
if (l === 1) {
let t_spec = tof(spec);
if (t_spec === 'buffer') {
this._buffer = spec;
}
} else if (l === 2) {
// number and number?
// id and paging
if (typeof a[0] === 'number' && Array.isArray(a[1])) {
let [command_id, arr_args] = arguments;
//
// put a prefix on each of them...
//let buf_encoded_args = Binary_Encoding.encode_to_buffer(arr_args, NONE);
// want a prefix at the beginning of each item?
// encode to buffer, but have a prefix before each of them
// put something saying no specific encoding type in here.
let buf_encoded_args = Binary_Encoding.encode_to_buffer(arr_args);
// But we have not given it a command id (yet)
// Best to do that just before sending.
//this._id = undefined;
// Hard to include undefined buffer.
// Special characters extension in xas2? Allowing undefined.
// saying that it's missing the id?
this.missing_id = true;
//
let buf_paging = new Paging.No_Paging().buffer;
this._buffer = Buffer.concat([xas2(command_id).buffer, buf_paging, buf_encoded_args]);
//this._buffer = Buffer.concat([xas2(command_id).buffer, xas2(0).buffer,]);
// Then depending on the command itself it can have different construction.
// Simplest construction is command, then param
} else if (typeof a[0] === 'number' && typeof a[1] === 'number') {
//console.trace();
//throw 'NYI';
let [command_id, page_size] = arguments;
this.missing_id = true;
let buf_paging = new Paging.Record_Paging(page_size).buffer;
this._buffer = Buffer.concat([xas2(command_id).buffer, buf_paging]);
} else {
console.trace();
throw 'NYI';
}
// Have some assistance in building the command.
// building the command message out of arrays / other things.
// has its paging and communication options, then its method call args.
} else if (l === 3) {
if (typeof a[0] === 'number' && Array.isArray(a[1]) && typeof a[2] === 'number') {
let [command_id, arr_args] = arguments;
//console.log('1) arr_args', arr_args);
arr_args = arr_args.map(x => {
//console.log('arr_args x', x);
if (x instanceof Buffer) {
// saying its 0 for a buffer in the command message...
// concat a different number here.
// read it as a different number to see its a buffer.
return x;
//return Buffer.concat([xas2(NONE).buffer, x]);
} else {
return x;
}
});
//console.log('2) arr_args', arr_args);
// Encoding buffer to buffer and decoding should be fine.
let buf_encoded_args = Binary_Encoding.encode_to_buffer(arr_args);
//console.log('* buf_encoded_args', buf_encoded_args);
this.missing_id = true;
let buf_paging = new Paging.Record_Paging(a[2]).buffer;
//console.log('buf_paging', buf_paging);
this._buffer = Buffer.concat([xas2(command_id).buffer, buf_paging, buf_encoded_args]);
}
}
}
set id(value) {
//this._id = value;
//console.log('this.missing_id', this.missing_id);
//console.log('set id value', value);
if (this.missing_id) {
this.missing_id = false;
this._buffer = Buffer.concat([xas2(value).buffer, this._buffer]);
} else {
throw 'Command_Message ID has already been set';
}
//
}
// message id, message type code(or command id), then the rest of the buffer is the message encoded according to the expected type, depending on the type code.
// in some cases it will be best to decode that message as a binary buffer.
// in other cases we save a few bytes over transmission because we know the types of the items (such as XAS2) and read them directly.
// Quite a lot of message type codes at present, correspond with the command names. Could have individual parser functions. Moving to standards would help make cleaner code.
// Also want an efficient protocol, but at this stage deem it best not to add further complexity, especially lower level encoding types.
// Right now, want to use Command_Message for server-side parsing.
get id() {
if (this.missing_id) {
return undefined;
} else {
let [res, pos] = xas2.read(this._buffer, 0);
return res;
}
}
get command_id() {
let res, pos;
if (this.missing_id) {
} else {
[res, pos] = xas2.skip(this._buffer, 0);
}
[res, pos] = xas2.read(this._buffer, pos);
return res;
}
get paging() {
let command_id;
let res, pos;
if (this.missing_id) {
} else {
[res, pos] = xas2.skip(this._buffer, 0);
}
[command_id, pos] = xas2.read(this._buffer, pos);
let paging;
// There is always a byte for paging / comm options.
//if (map_paging_commands[command_id]) {
[paging, pos] = Paging.read(this._buffer, pos);
//}
// Otherwise, we use paging.no_paging
return paging;
}
get buffer() {
/*
if (this.missing_id) {
return Buffer.concat([this._buffer]);
} else {
return this._buffer;
}
*/
return this._buffer;
}
// The inner message, but as separare arr rows
// Need to handle index rows, db rows, incrementor rows
// inner message buffer
// then it gets parsed according to that particlar command id.
get inner_message_buffer() {
let command_id;
let res, pos;
if (this.missing_id) {
} else {
[res, pos] = xas2.skip(this._buffer, 0);
}
[command_id, pos] = xas2.read(this._buffer, pos);
let paging;
[paging, pos] = Paging.read(this._buffer, pos);
let buf_res = Buffer.alloc(this._buffer.length - pos);
this._buffer.copy(buf_res, 0, pos);
return buf_res;
}
decode_inner() {
let imb = this.inner_message_buffer;
//console.log('imb', imb);
let arr_decoded_stage = Binary_Encoding.decode_buffer(this.inner_message_buffer);
// not sure it needs to be multi-stage
//console.log('arr_decoded_stage', arr_decoded_stage);
//console.trace();
// Can't have further decoding of these buffers.
// A prefix saying they are rows or records would need to be part of Binary_Encoding.
// Could incorporate extended types into Binary_Encoding.
// Binary_Encoding.extend(class, i_prefix);
// So it gets extended to handle B_Key etc
// just use Binary_Encoding here.
let arr_decoded = buffer_backed.decode_args_buffers(arr_decoded_stage);
//console.log('arr_decoded', arr_decoded);
return arr_decoded;
}
// Decodes it...
get inner() {
return this.decode_inner();
}
}
module.exports = Command_Message;<file_sep>/foreign-key.js
// Possibly will not use this.
// Will have something within Field that can indicate it is a foreign key reference.
// A foreign key seems less like a thing than many other things. It is essentially a note that a reference is in place.
class Foreign_Key {
// Maybe foreign key could do some of the lookup.
// Referring to the field itself would make more sense.
// Then what about referring to a default name field?
// A table could possibly have a field, called 'name' or not, that is the default field when looking up a name.
// May have currencies with their names, and want to add them, and have this automatic lookup done.
// Would probably be worth getting the data added to the system soon.
// Maybe some validation of input field types?
// Basically want to add a lot of data to the database soon, keeping it streaming.
// Not so sure that a forei9gn key table within the database is so important.
// Could have record encoding and decoding procedures which make use of foreign key information.
// Storing field types would definitely help with this encoding / decoding.
'constructor'(field_name, foreign_table) {
this.field_name = field_name;
this.foreign_table = foreign_table;
// Probably best to have a reference to the table, the field.
// refers one field in this table to a field in another table.
// if we do set_fk, it finds the field, and sets its foreign_key_to_table value.
// Want to do this so we can add records more easily.
// It can know to translate some values that it receives, most likely strings into numeric ids, and vice versa.
}
}
module.exports = Foreign_Key;<file_sep>/buffer-backed/value.js
const Binary_Encoding = require('binary-encoding');
/*
const NONE = 0;
const RECORD = 1;
const KEY = 2;
const VALUE = 3;
*/
//const NONE = 0;
const RECORD = 200;
const KEY = 201;
const VALUE = 202;
class Value {
constructor() {
let a = arguments,
l = a.length;
if (l === 1) {
if (a[0] instanceof Buffer) {
this._buffer = a[0];
}
}
}
get buffer() {
return this._buffer;
}
get buffer_xas2_prefix() {
return new xas2(VALUE).buffer;
}
get decoded() {
if (this._buffer.length > 0) {
return Binary_Encoding.decode_buffer(this._buffer);
} else {
//return null;
return [];
}
}
get_value_at(idx) {
return Binary_Encoding.get_value_at(this._buffer, idx);
}
// then need to be able to get decoded value
}
module.exports = Value;<file_sep>/encoding.js
var lang = require('lang-mini');
var each = lang.each;
var get_a_sig = lang.get_a_sig;
var clone = lang.clone;
var tof = lang.tof;
const is_defined = lang.is_defined;
const Evented_Class = lang.Evented_Class;
const Binary_Encoding = require('binary-encoding');
const xas2 = require('xas2');
// Not working here.
//let B_Record = require('./buffer-backed/record');
// Possibly need a version of Binary_Encoding that handles NextLevelDB types
// but it makes it less general.
// Binary_Encoding plugin functions look like the right way to add encoding for the moment.
// Defining a custom range would help. There are not that many types in Binary_Encoding anyway.
// Then there can be NextLevelDB_Binary_Encoding, which will have the plugins loaded.
// May make a new module dor the database encoding, focusing on encode and decode.
// This code will generally cover binary <=> js obj conversion, specifically for the structures used by NextLevelDB.
// xas2, bin enc
/*
var buffer_to_buffer_pairs = (buf_encoded) => {
// read xas2, see the length of the row
var pos = 0,
length, l = buf_encoded.length;
var old_pos;
var done = false;
var res = [];
var arr_row;
//console.log('buf_encoded', buf_encoded);
while (!done) {
[length, pos] = xas2.read(buf_encoded, pos);
var buf_key = Buffer.alloc(length);
buf_encoded.copy(buf_key, 0, pos, pos + length);
pos = pos + length;
[length, pos] = xas2.read(buf_encoded, pos);
var buf_value = Buffer.alloc(length);
buf_encoded.copy(buf_value, 0, pos, pos + length);
pos = pos + length;
arr_row = [buf_key, buf_value];
//console.log('arr_row', arr_row);
//cb_row(arr_row);
res.push(arr_row);
if (pos >= l) {
done = true;
}
}
//var res = [buf_key, buf_value];
return res;
}
*/
// Decode
// -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~\\
// Option to drop the kp
// Seems worth making more functionality to test live database tables and records.
// Have had at least one record added wrong in bittrex markets.
// Could test decoding of all records, if we find a problem, raise it in the observable.
// Could also be careful when syncing all / core / structural records that they get added correctly.
// I doubt there will be many more problems after this, but it seems as though checking and data reclamation is the next order of business.
// Could take a little while as well.
// error_scan
// fix errors
// Modelling the errors seems like it would be useful as well.
// Could compose an OO list of errors, then display them, propose a remedy, and carry it out.
// Checking for orpaned rows
// Could model mistakes made (record added with wrong id, should be x), and then redo it?
// For the moment, best to do this upon db starting up.
// Or could do the error scan (or some of it) from the client.
// Could get low level rows and check them
//
// Check table
//
// check for whether keys can or can't be decoded.
// have had problem when new coin has launched on bittrex.
// If a record has been added wrong, may need to delete it, may be able to fix it.
// Be able to replace a record's key.
// client side safety check - get all keys, then check they have been formed properly.
// Inconveniently, it seems like the new market for a new coin has been added wrong.
// scan_table_keys
// will raise an event for any malformed keys.
// To fix it - find any records that reference this key. Change them to reference something different.
// Seems like this will be multi-part checking and changing data. A somewhat large undertaking here.
// Changing many rows from one thing to another would take a while, for sure.
// May need to be about how to recover the good data from the DB.
var decode_key = (buf_key, remove_kp = false) => {
//console.log('4) remove_kp', remove_kp);
var key_1st_value = Binary_Encoding.decode_first_value_xas2_from_buffer(buf_key);
var decoded_key;
//if (key_1st_value % 2 === 0) {
if (key_1st_value % 2 === 0 && key_1st_value > 0) {
// even, so it's a table, so 1 prefix
// Have incrementor work differently - just xas2s in keys and values.
//console.log('buf_key', buf_key);
//console.log('key_1st_value', key_1st_value);
// Seems like a key has been put into the DB incorrectly in some cases.
// Checking for and correction various data errors / corruption makes sense.
if (remove_kp) {
decoded_key = Binary_Encoding.decode_buffer(buf_key, 1).slice(1);
} else {
decoded_key = Binary_Encoding.decode_buffer(buf_key, 1);
}
} else {
// odd, meaning indexes, so 2 prefixes. Includes the incrementors.
decoded_key = Binary_Encoding.decode_buffer(buf_key, 2);
}
return decoded_key;
}
var key_length = (buf_key) => {
let pos = 0,
key_1st_value;
[key_1st_value, pos] = xas2.read(buf_key, pos);
// 0th value really here.
if (key_1st_value % 2 === 0 && key_1st_value > 0) {
return Binary_Encoding.count_encoded_items(buf_key, pos);
//
} else {
throw 'NYI';
}
}
// Still encoded
let key_value_at = (buf_key, idx) => {
let pos = 0,
key_1st_value;
[key_1st_value, pos] = xas2.read(buf_key, pos);
if (key_1st_value % 2 === 0 && key_1st_value > 0) {
// even, so it's a table, so 1 prefix
// Have incrementor work differently - just xas2s in keys and values.
//console.log('buf_key', buf_key);
//console.log('pos', pos);
// Then skip through, or better count the items.
// Including skipping KPs.
// count_encoded_items
let ith_value = Binary_Encoding.get_value_at(buf_key, idx, pos);
//let count = Binary_Encoding.count_encoded_items(buf_key, pos);
//console.log('count', count);
//return count;
return ith_value;
//
} else {
// Will need to work to get this to handle index keys.
throw 'NYI';
}
}
// encode key
// encode index key
// index_kp, index_id, arr_index_values
//var encode_index_key = (index_kp, index_id, arr_index_values) => {
var encode_index_key = function (index_kp, index_id, arr_index_values) {
let a = arguments,
l = a.length,
res;
//console.log('l', l);
//console.log('arguments', arguments);
if (l === 1) {
arr_index_values = a[0];
//console.log('arr_index_values', arr_index_values);
//index_kp = arr_index_values.shift();
//index_id = arr_index_values.shift();
//console.log('index_kp, index_id', index_kp, index_id);
//res = Binary_Encoding.encode_to_buffer(arr_index_values, index_kp, index_id);
res = Binary_Encoding.encode_to_buffer_use_kps(arr_index_values, 2);
} else {
res = Binary_Encoding.encode_to_buffer(arr_index_values, index_kp, index_id);
}
return res;
}
//
var encode_key = function (kp, arr_values) {
// May have just given it an array...
let a = arguments,
res;
//let sig = get_a_sig(a);
if (a.length === 1) {
let kp = a[0].shift();
// then if it's an index...
//console.log('kp', kp);
if (kp % 2 !== 0) {
// its odd, so an index
let idx_id = a[0].shift();
res = Binary_Encoding.encode_to_buffer(a[0], kp, idx_id);
} else {
res = Binary_Encoding.encode_to_buffer(a[0], kp);
}
} else {
// Needs to be able to handle encoding an index key.
res = Binary_Encoding.encode_to_buffer(arr_values, kp);
}
return res;
}
var decode_keys = lang.arrayify(decode_key);
// Maybe test this with the newest version of the DB server running.s
//
// select_indexes_buffer_from_kv_pair_buffer
// would do the index selection on both the key and the value, but not decoding the indexed values, only retrieving them.
// Will have some more ways to process encoded data with minimal decoding.
// That will be faster.
let select_indexes_buffer_from_kv_pair_buffer = (buf_kvp, remove_kp, arr_indexes) => {
// Want to build up a single buffer of the indexed data. Don't decode the data along the way. Just select the data from it.
// prob best to split it?
//console.log('buf_kvp', buf_kvp);
let arr_kvp = Binary_Encoding.split_length_item_encoded_buffer_to_kv(buf_kvp)[0];
//console.log('arr_kvp', arr_kvp);
// Then do the selection on each
var buf_key = arr_kvp[0];
var buf_value = arr_kvp[1];
//console.log('buf_key', buf_key);
var key_1st_value = Binary_Encoding.decode_first_value_xas2_from_buffer(buf_key);
let buf_selected_key_fields, total_key_fields_count, buf_selected_value_fields, total_value_fields_count;
if (key_1st_value % 2 === 0 && key_1st_value > 0) {
// even, so it's a table, so 1 prefix
// Have incrementor work differently - just xas2s in keys and values.
//console.log('buf_key', buf_key);
// Seems like data was encoded wrong in the table buffer.
[buf_selected_key_fields, total_key_fields_count] = Binary_Encoding.buffer_select_from_buffer(buf_key, arr_indexes, 1, 1);
//console.log('buf_selected_key_fields, total_key_fields_count', buf_selected_key_fields, total_key_fields_count);
// We need to have the encoding method of xas2 encoded alongside this data.
// The selected fields should be encoded alongside their types. Then before that is the length
// Need the right language for it too to avoid confusion.
//throw 'stop';
// then read the value part.
//throw 'stop';
} else {
throw 'select_indexes_buffer_from_kv_pair_buffer NYI';
}
if (buf_value) {
// Could have an empty buffer as a value.
// That seems wrong so far though.
if (key_1st_value === 0) {
//let decoded_key = Binary_Encoding.decode_buffer(buf_key, 1);
//console.log('decoded_key', decoded_key);
throw 'NYI';
/*
if (buf_value.length > 0) {
// Has difficulty doing this here.
value = Binary_Encoding.decode_first_value_xas2_from_buffer(buf_value);
} else {
//value = null;
console.trace();
throw 'stop';
}
*/
} else {
if (key_1st_value % 2 === 0) {
//console.log('buf_value', buf_value);
[buf_selected_value_fields, total_value_fields_count] = Binary_Encoding.buffer_select_from_buffer(buf_value, arr_indexes, 0, 0, total_key_fields_count);
//console.log('buf_selected_value_fields, total_value_fields_count', buf_selected_value_fields, total_value_fields_count);
//console.log('selected_value_fields', selected_value_fields);
//console.trace();
//throw 'stop';
//res = selected_key_fields.concat(selected_value_fields);
res = Buffer.concat([buf_selected_key_fields, buf_selected_value_fields]);
} else {
throw 'NYI';
// I think index records have values???
//value = Binary_Encoding.decode_buffer(buf_value);
}
// indexed lacking values?
}
}
//console.log('5) res', res);
//throw 'stop';
// use buffer_select_from_buffer
return res;
}
let select_indexes_from_kvp_buffer = (buf_kvp, remove_kp, arr_indexes) => {
// Want to build up a single buffer of the indexed data. Don't decode the data along the way. Just select the data from it.
// prob best to split it?
let bufs_kv = buffer_to_row_buffer_pairs(buf_kvp);
console.log('bufs_kv', bufs_kv);
}
// And a means to do this without decoding?
// Get that data back as binary encoded, ready to transmit?
// This looks like decode_select
// could have buffer_select
let select_indexes_from_model_row = (model_row, remove_kp, arr_indexes) => {
// Can consider the kp as the first index.
let res;
var buf_key = model_row[0];
var buf_value = model_row[1];
var key_1st_value = Binary_Encoding.decode_first_value_xas2_from_buffer(buf_key);
// Decode the key, and work out the number of key values
let selected_key_fields, total_key_fields_count, selected_value_fields, total_value_fields_count;
if (key_1st_value % 2 === 0 && key_1st_value > 0) {
// even, so it's a table, so 1 prefix
// Have incrementor work differently - just xas2s in keys and values.
//console.log('buf_key', buf_key);
// Seems like data was encoded wrong in the table buffer.
console.log('buf_key', buf_key);
// If we fail to decode the key?
// Leave out decoding errors...
//console.log('buf_key', buf_key);
console.log('arr_indexes', arr_indexes);
[selected_key_fields, total_key_fields_count] = Binary_Encoding.decode_buffer_select_by_index(buf_key, arr_indexes, 1, 1);
console.log('selected_key_fields, total_key_fields_count', selected_key_fields, total_key_fields_count);
// then read the value part.
//throw 'stop';
} else {
throw 'select_indexes_from_model_row NYI';
}
if (buf_value) {
// Could have an empty buffer as a value.
// That seems wrong so far though.
if (key_1st_value === 0) {
//let decoded_key = Binary_Encoding.decode_buffer(buf_key, 1);
//console.log('decoded_key', decoded_key);
throw 'NYI';
/*
if (buf_value.length > 0) {
// Has difficulty doing this here.
value = Binary_Encoding.decode_first_value_xas2_from_buffer(buf_value);
} else {
//value = null;
console.trace();
throw 'stop';
}
*/
} else {
if (key_1st_value % 2 === 0) {
// decode_buffer_select_by_index
//value = Binary_Encoding.decode_buffer(buf_value);
// Number of values to skip too...
// The key indexes are 0 based.
// The value indexes are based on the number of key fields.
[selected_value_fields, total_value_fields_count] = Binary_Encoding.decode_buffer_select_by_index(buf_value, arr_indexes, 0, 0, total_key_fields_count);
console.log('selected_value_fields, total_value_fields_count', selected_value_fields, total_value_fields_count);
//console.log('selected_value_fields', selected_value_fields);
//console.trace();
//throw 'stop';
res = selected_key_fields.concat(selected_value_fields);
} else {
throw 'NYI';
// I think index records have values???
//value = Binary_Encoding.decode_buffer(buf_value);
}
// indexed lacking values?
}
}
return res;
}
var decode_model_row = (model_row, remove_kp) => {
// The DB could have been started with broken xas2 values, possibly encoded wrong using an older version of the DB code.
// Not sure.
//console.log('model_row', model_row);
//console.log('B_Record', B_Record);
//let r = new B_Record(model_row);
/*
if (remove_kp) {
return r.decoded_no_kp;
} else {
return r.decoded;
}
*/
// if the model_row is a Record, then use the .decoded function
//console.log('decode_model_row model_row', model_row);
//console.log('B_Record', B_Record);
if (model_row.kvp_bufs) {
return model_row.decoded;
} else {
//console.log('remove_kp', remove_kp);
var buf_key = model_row[0];
var buf_value = model_row[1];
var value = null;
//console.log('buf_key', buf_key);
//console.log('buf_value', buf_value);
// Decode buffer could tell from odd or even.
var key_1st_value = Binary_Encoding.decode_first_value_xas2_from_buffer(buf_key);
//console.log('key_1st_value', key_1st_value);
if (buf_value) {
// Could have an empty buffer as a value.
// That seems wrong so far though.
if (key_1st_value === 0) {
//let decoded_key = Binary_Encoding.decode_buffer(buf_key, 1);
//console.log('decoded_key', decoded_key);
if (buf_value.length > 0) {
// Has difficulty doing this here.
value = Binary_Encoding.decode_first_value_xas2_from_buffer(buf_value);
} else {
//value = null;
// It's an incrementor row.
// Or an error making it appear that way
console.trace();
throw 'stop';
}
} else {
if (key_1st_value % 2 === 0) {
value = Binary_Encoding.decode_buffer(buf_value);
//console.log('value', value);
} else {
// I think index records have values???
//value = Binary_Encoding.decode_buffer(buf_value);
}
// indexed lacking values?
}
}
// not all of them are 2 deep for indexes though.
// That's where it's a bit tricky.
// Table records have got 1 xas2 prefix
// Table index records have got 2 xas prefixes.
// We need to know which is which.
// Could say one is odd, the other is even.
// That seems like the best way to differentiate between them for the moment.
// Just can't read it with 2 prefixes as we don't know it has that many.
//console.log('key_1st_value', key_1st_value);
var decoded_key;
//if (key_1st_value % 2 === 0) {
if (key_1st_value % 2 === 0 && key_1st_value > 0) {
// even, so it's a table, so 1 prefix
// Have incrementor work differently - just xas2s in keys and values.
//console.log('buf_key', buf_key);
// Seems like data was encoded wrong in the table buffer.
//console.log('buf_key', buf_key);
// If we fail to decode the key?
// Leave out decoding errors...
//console.log('buf_key', buf_key);
// Not sure why it's not able to decode the data in the buffer from the server.
// It's the key where there is the problem.
// Maybe it's missing a byte - not sure.
// Not sure if any dodgy keys have got into the db.
// Could have a maintenance procedure that checks if all keys can be decoded.
// Wonder if data has got corrupted again through bittrex adding a new coin.
// (another week's work???)
// Verification that indexes can decode seems like a useful way of doing it.
// could have a can_decode function.
// Client-side verification and fixing could prove to be useful.
// Tricky too maybe.
// Client-side verification that the DB is in good running order makes sense.
decoded_key = Binary_Encoding.decode_buffer(buf_key, 1);
/*
try {
// Maybe some records got written wrong in a db.
// Could do more of a safety check on startup, or something with a command, that removes any records that are mis-formed.
// Or just point them out.
//console.log('buf_key', buf_key);
decoded_key = Binary_Encoding.decode_buffer(buf_key, 1);
} catch (err) {
console.trace();
throw 'stop';
//decoded_key = '[DECODING ERROR: ' + err + ']';
return null;
}
*/
} else {
try {
//console.log('buf_key', buf_key);
decoded_key = Binary_Encoding.decode_buffer(buf_key, 2);
} catch (err) {
console.trace();
throw 'stop';
decoded_key = '[DECODING ERROR: ' + err + ']';
}
}
if (remove_kp) {
decoded_key.splice(0, remove_kp);
}
//console.log('[decoded_key, value]', [decoded_key, value]);
return [decoded_key, value];
}
//console.log('decoded_record', decoded_record);
}
/*
var from_buffer = (buf) => {
console.log('from_buffer\n----------\n');
Binary_Encoding.evented_get_row_buffers(buf, (arr_row) => {
//console.log('arr_row', arr_row);
var decoded_row = decode_model_row(arr_row);
console.log('decoded_row', decoded_row);
// However, may need to put together the system tables anyway.
// These rows could have values useful for setting some records, but there may be the most basic system model that we need first?
// It may be possible to recreate the core model out of the data we have been provided.
// However, we may not really want to recreate the core from a buffer.
// The core would be used to properly process other data that comes in.
// May want to ignore the core items...
// May be easiest to add the rest of the tables once the core is in place.
// The tables table, beyond the first three rows so far, contains non-system information.
})
}
*/
var decode_model_rows = (model_rows, remove_kp) => {
var res = [];
//console.log('model_rows', model_rows);
//console.log('model_rows.length', model_rows.length);
// if there is only one model row....? Or one buffer.
each(model_rows, (model_row) => {
//console.log('model_row', model_row);
// Incrementors look OK so far.
// Let's see how records (keys and values), as well as index records (keys and values) decode with the multi-decoder.
//console.log('pre decode');
let decoded = decode_model_row(model_row, remove_kp);
//console.log('decoded', decoded);
if (decoded) res.push(decoded);
//throw 'stop';
//console.log('post decode');
});
return res;
}
// Need to express more about key prefix handling.
// Convert array records to kp notation
// Add a given key prefix
//
// Need the right language to talk about the different arrangements of row information.
// DB Rows Encoding (records encoding)
// Array of KV buffer pairs
// Array of buffers (encoded records / rows)
// Single buffer
// DB Row Encoding (record encoding)
// buffer pair
// kv buffer
// k buffer
// Array Record
// db row (full)
// handle incrementors, table records, and indexes
// table row (without the table kp, but there needs to be some way to identify the table)
// index rows as arrays
// When encoding, may need to set the number of kps to encode, or whether or not to encode them.
// Making longer and clearer function names with the above vocabulary would be helpful/
// Encode = JS => Binary, Decode = Binary => JS.
// More clearly named functionality will help here.
// makes clear its a single buffer
// encode_rows_including_kps_to_buffer
//
// Determines how many kps there are based on if the first kp is odd or event
// encode_kv_pair_to_kv_buffer_pair;
let encode_kv_pair_to_kv_buffer_pair = function (arr_pair, key_prefix) {
// If this is an incrementor though...
// May be best to throw an error because we don't use this type of encoding for incrementors
// don't allow key prefix of 0?
// Seem to have a problem when we get the incrementors as records, and then encode them.
//console.log('arr_pair, key_prefix', arr_pair, key_prefix);
// can't have the first prefix as 0 and use this.
let a = arguments;
if (a[1] === 0) {
throw 'Can not use encode_kv_pair_to_kv_buffer_pair to encode incrementor rows';
}
//console.log('a.length', a.length);
//console.log('a', a);
//console.log('arguments.length', arguments.length);
var arr_xas2_prefix_numbers = [];
if (a.length >= 2) {
for (var c = 1; c < a.length; c++) {
//console.log('c', c);
//console.log('a[c]', a[c])
if (is_defined(a[c])) arr_xas2_prefix_numbers.push(a[c]);
}
}
var prefix_buffers = [];
each(arr_xas2_prefix_numbers, (prefix_number) => {
prefix_buffers.push(xas2(prefix_number).buffer);
});
var res_key_0 = Binary_Encoding.encode_to_buffer(arr_pair[0]);
prefix_buffers.push(res_key_0);
//console.log('prefix_buffers', prefix_buffers);
var res_key = Buffer.concat(prefix_buffers);
// but for incrementors, it just encodes the value as xas2.
var res_val = Binary_Encoding.encode_to_buffer(arr_pair[1]);
var res = [res_key, res_val];
return res;
}
// Reading of incrementor records should always read them as XAS2.
// This seems like an odd place for a bug, or inconsistncy with older codebase.
// Need to get this fully working.
// encode_arr_rows_to_buf
let encode_row_including_kps_to_buffer = row => {
// row even or odd.
let row_kp = row[0][0];
let res;
//console.log('row_kp', row_kp);
if (row_kp % 2 === 0) {
// even (including 0)
// if it's 0, it's just got one kp field to start.
// let row.unshift(row_kp);
if (row_kp === 0) {
// but for incrementors, it just encodes the value as xas2.
//console.log('row[0]', row[0]);
row_kp = row[0].shift();
//console.log('row_kp', row_kp);
let incrementor_id = row[0].shift();
//console.log('row_kp', row_kp);
//console.log('incrementor_id', incrementor_id);
//throw 'stop';
// Nope, it should encode the key, and just have an xas2 as the value.
// encode incrementor key
// Possibly the incrementor keys have been put into the DB wrong somehow.
//let enc_key = encode_incrementor_key(row[0], row_kp, incrementor_id);
//let enc_val = xas2(row[1][0]).buffer;
//console.log('enc_key', enc_key);
//console.log('enc_val', enc_val);
let kv_buffer_pair = encode_kv_pair_to_kv_buffer_pair(row, row_kp, incrementor_id);
//console.log('row', row);
//console.log('1) kv_buffer_pair', kv_buffer_pair);
//throw 'stop';
let row_enc = encode_kv_buffer_pair(kv_buffer_pair);
//console.log('row_enc', row_enc);
//throw 'stop';
return row_enc;
} else {
row_kp = row[0].shift();
let kv_buffer_pair = encode_kv_pair_to_kv_buffer_pair(row, row_kp);
//console.log('2) kv_buffer_pair', kv_buffer_pair);
let row_enc = encode_kv_buffer_pair(kv_buffer_pair);
//console.log('row_enc', row_enc);
return row_enc;
}
} else {
// odd, so it's an index row with 2 kps
//console.log('row', row);
//console.trace();
row_kp = row[0].shift();
let idx_id = row[0].shift();
let kv_buffer_pair = encode_kv_pair_to_kv_buffer_pair(row, row_kp, idx_id);
let row_enc = encode_kv_buffer_pair(kv_buffer_pair);
//console.log('row_enc', row_enc);
//throw 'stop';
return row_enc;
// It's an index, has a value too,
// has 2 kps.
//throw 'NYI';
}
}
let encode_rows_including_kps_to_buffer = rows => {
let res = [];
each(rows, row => res.push(encode_row_including_kps_to_buffer(row)));
// flatten the array....
return Buffer.concat(res);
}
var buffer_to_row_buffer_pairs = (buf_encoded) => {
// read xas2, see the length of the row
let pos = 0,
length, l = buf_encoded.length;
let old_pos;
let done = false;
let res = [];
let arr_row;
let buf_key, buf_value;
//console.log('buf_encoded', buf_encoded);
while (!done) {
[length, pos] = xas2.read(buf_encoded, pos);
buf_key = Buffer.alloc(length);
buf_encoded.copy(buf_key, 0, pos, pos + length);
pos = pos + length;
[length, pos] = xas2.read(buf_encoded, pos);
buf_value = Buffer.alloc(length);
buf_encoded.copy(buf_value, 0, pos, pos + length);
pos = pos + length;
arr_row = [buf_key, buf_value];
//console.log('arr_row', arr_row);
//cb_row(arr_row);
res.push(arr_row);
if (pos >= l) {
done = true;
}
}
//var res = [buf_key, buf_value];
return res;
}
// Get this working, then sync the database
// Will have a database that stays up-to-date locally with the data gathered.
// Then will do analysis and verification of the db.
// encode_kv_buffer_pair
// encode_kv_buffer_pairs
// This function should probably be renamed to something very specific.
var encode_kv_buffer_pair = (model_row) => {
//console.log('encode_kv_buffer_pair model_row', model_row);
// Option about encoding as key prefixes, using the db style of doing it.
// Key prefix style just puts in XAS2 values,
// Need to expand this so that it handles model rows in array format?
if (model_row instanceof Buffer) {
var arr_res = [xas2(model_row.length).buffer, model_row, xas2(0).buffer];
} else {
// look at the types of each of them.
// different if it's a buffer or an array.
// The array itself will need to be encoded.
// In this case, encode it to
//console.log('model_row[1]', model_row[1]); // The values
if (model_row[1]) {
if (model_row[0][0] === 0) {
// It's an incrementor row
} else {
}
var arr_res = [xas2(model_row[0].length).buffer, model_row[0], xas2(model_row[1].length).buffer, model_row[1]];
} else {
// Value is null / no value set, all index rows are like this.
var arr_res = [xas2(model_row[0].length).buffer, model_row[0], xas2(0).buffer];
}
}
//console.log('arr_res', arr_res);
return Buffer.concat(arr_res);
}
var encode_kv_buffer_pairs = (model_rows) => {
var res = [];
each(model_rows, (model_row) => {
res.push(encode_kv_buffer_pair(model_row));
});
return Buffer.concat(res);
}
var get_arr_rows_as_buffer = (arr_rows) => {
// for every row, encode it using the Binary_Encoding.
// They may not have the table key prefix?
}
// Encode rows, but with the key prefix already there as the first 1 or 2 items
// Indexes (odd kp) have got 2 key prefixes. The second is the index number.
var encode_arr_rows_to_buf = (arr_rows, key_prefix) => {
var res = [];
each(arr_rows, (row) => {
// encode_model_row
res.push(encode_model_row(Binary_Encoding.encode_kv_pair_to_kv_buffer_pair(row, key_prefix)));
});
return Buffer.concat(res);
}
let obs_decode = (obs) => {
//console.trace();
//throw 'NYI';
let res = new Evented_Class();
obs.on('next', data => {
//console.log('data', data);
// decode_buffer - if we give it an array structure containing buffers, then decode each within the buffer?
// or there must have been some minimal decoding for it to come to the client.
let decoded = Binary_Encoding.decode_buffer(data);
//console.log('**3 decoded', decoded);
res.raise('next', decoded);
});
obs.on('error', err => res.raise('error', err));
obs.on('complete', () => res.raise('complete'));
return res;
}
let kp_to_range = buf_kp => {
let buf_0 = Buffer.alloc(1);
buf_0.writeUInt8(0, 0);
let buf_1 = Buffer.alloc(1);
buf_1.writeUInt8(255, 0);
// and another 0 byte...?
return [Buffer.concat([buf_kp, buf_0]), Buffer.concat([buf_kp, buf_1])];
}
// Decoding observable data?
let Database_Encoding = {
'encode_model_row': encode_kv_buffer_pair,
'encode_model_rows': encode_kv_buffer_pairs,
'encode_kv_buffer_pair': encode_kv_buffer_pair,
'encode_kv_buffer_pairs': encode_kv_buffer_pairs,
'encode_arr_rows_to_buf': encode_arr_rows_to_buf,
'encode_kv_pair_to_kv_buffer_pair': encode_kv_pair_to_kv_buffer_pair,
'encode_pair_to_buffers': encode_kv_pair_to_kv_buffer_pair,
'encode_index_key': encode_index_key,
'encode_row_including_kps_to_buffer': encode_row_including_kps_to_buffer,
'encode_rows_including_kps_to_buffer': encode_rows_including_kps_to_buffer,
'decode_model_rows': decode_model_rows,
'decode_model_row': decode_model_row,
'encode_key': encode_key,
'decode_key': decode_key,
'decode_keys': decode_keys,
'buffer_to_buffer_pairs': buffer_to_row_buffer_pairs,
'buffer_to_row_buffer_pairs': buffer_to_row_buffer_pairs,
'select_indexes_from_model_row': select_indexes_from_model_row,
'select_indexes_buffer_from_kv_pair_buffer': select_indexes_buffer_from_kv_pair_buffer,
'key_length': key_length,
'key_value_at': key_value_at,
'obs_decode': obs_decode,
'kp_to_range': kp_to_range,
'encode': {
'key': encode_key,
'encode_kv_buf_pair': encode_kv_buffer_pair,
'encode_kv_buf_pairs': encode_kv_buffer_pairs,
'arr_rows': encode_arr_rows_to_buf
},
'decode': {
'key': decode_key,
'keys': decode_keys,
'model_row': decode_model_row,
'model_rows': decode_model_rows
}
}
module.exports = Database_Encoding;<file_sep>/buffer-backed/key.js
const Binary_Encoding = require('binary-encoding');
const database_encoding = require('../encoding');
//const NONE = 0;
const RECORD = 200;
const KEY = 201;
const VALUE = 202;
class Key {
constructor() {
let a = arguments,
l = a.length;
if (l === 1) {
if (a[0] instanceof Buffer) {
this._buffer = a[0];
} else {
// Would be nice to take an array, and treat it as an index key if it starts with an odd number
// if it's an array, it's the values themselves
if (Array.isArray(a[0])) {
// then construct the buffer out of the values we have.
// encode key
//let encoded = database_encoding.encode_key(a[0]);
//console.log('encoded', encoded);
this._buffer = database_encoding.encode_key(a[0]);
} else {
throw 'NYI';
}
}
}
}
get key() {
return this;
}
get buffer() {
return this._buffer;
}
get decoded() {
//console.log('this._buffer.length', this._buffer.length);
if (this._buffer.length > 0) {
return database_encoding.decode_key(this._buffer);
} else {
return undefined;
}
}
get decoded_no_kp() {
let res = this.decoded;
res.shift();
return res;
}
// number of items here.
get length() {
//
return database_encoding.key_length(this._buffer);
}
get_value_at(idx) {
// Should maybe handle index keys too.
// this._pos_value_beginning
//console.log('KEY get_value_at', idx);
// But it's maybe an index record key. Can it handle getting the value like this?
return database_encoding.key_value_at(this._buffer, idx);
}
get buffer_xas2_prefix() {
return new xas2(KEY).buffer;
}
validate() {
try {
let d = this.decoded;
} catch (err) {
return false;
}
return true;
}
get kp() {
//console.log('xas2.read(this._buffer)', xas2.read(this._buffer));
return xas2.read(this._buffer);
}
get table_kp() {
let kp = this.kp;
if (kp % 2 === 0) {
return kp;
} else {
return kp - 1;
}
}
get table_id() {
return (this.table_kp - 2) / 2;
}
// then need to be able to get decoded value
}
Key.range = (buf_prefix) => {
var buf_0 = Buffer.alloc(1);
buf_0.writeUInt8(0, 0);
var buf_1 = Buffer.alloc(1);
buf_1.writeUInt8(255, 0);
// and another 0 byte...?
var buf_l = Buffer.concat([buf_prefix, buf_0]);
var buf_u = Buffer.concat([buf_prefix, buf_1]);
let res = [new Key(buf_l), new Key(buf_u)];
//console.log('res', res);
return res;
}
Key.buffer_range = (buf_prefix) => {
let kr = Key.range(buf_prefix);
return [kr[0].buffer, kr[1].buffer];
}
module.exports = Key;<file_sep>/queries/records_in_range.js
var Query = require('../query');
var xas2 = require('xas2');
var Binary_Encoding = require('binary-encoding');
var encode_item = Binary_Encoding.flexi_encode_item;
const i_query_type = 11;
class Records_In_Range_Query extends Query {
constructor(id, page_size = 0, l_key, u_key) {
this.id = id;
this.page_size = page_size;
// The lkey is its own binary blob.
// Need to be able to encode a buffer into Binary_Encoding
this.l_key = l_key;
this.u_key = u_key;
this.i_query_type = i_query_type;
}
to_buffer() {
var res = Buffer.concat([xas2(this.id), xas2(this.i_query_type), xas2(this.page_size), encode_item(this.l_key), encode_item(this.u_key)]);
return res;
}
}
module.exports = Records_In_Range_Query;<file_sep>/buffer-backed/record-list.js
let Binary_Encoding = require('binary-encoding');
let database_encoding = require('../encoding');
let xas2 = require('xas2');
let Record = require('./record');
let lang = require('lang-mini');
let each = lang.each;
let get_a_sig = lang.get_a_sig;
// Could have option of encoding this as its own item type.
// However, by default will encode itself as an array of keys.
// It looks like a specific encoding item type for 'key' is necessary
// Would be a new type for Binary_Encoding. Wondering about plugins. Hardcoding is likely faster.
// Would be a somewhat significant change on the server. The code would become simpler. The protocol slightly longer, but not by much (1 byte).
// Would better enable to keys and values and records to better be constructed / deconstructed on the server.
// Its own encoding type may be useful, but would need an extended Binary_Encoding.
// Seems OK to say this is an array?
// Though decoding it as a Key_List makes the most sense.
const XAS2 = 0;
const STRING = 4;
const BUFFER = 9;
const ARRAY = 10;
// Could make it so that this can support index records, which are just indexes by themselves.
// Just stick a 0 on the end for the length of the value
// This will be able to accept Record objects, but get rid of them once they have been loaded into the Buffer.
var encode_kv_buffer_pair = (model_row) => {
let arr_res;
if (model_row instanceof Buffer) {
arr_res = [xas2(model_row.length).buffer, model_row, xas2(0).buffer];
} else {
if (model_row[1]) {
arr_res = [xas2(model_row[0].length).buffer, model_row[0], xas2(model_row[1].length).buffer, model_row[1]];
} else {
arr_res = [xas2(model_row[0].length).buffer, model_row[0], xas2(0).buffer];
}
}
return Buffer.concat(arr_res);
}
var encode_kv_buffer_pairs = (model_rows) => {
var res = [];
each(model_rows, (model_row) => {
res.push(encode_kv_buffer_pair(model_row));
});
return Buffer.concat(res);
}
// This will handle both encoding and decoding of records. Will be a useful and reliable way to store and pass around a bunch of records.
class Record_List {
constructor() {
let a = arguments,
l = a.length,
sig = get_a_sig(a);
// if we were given an array
// its an array of records
// each of them could be encoded as a single buffer.
// Need some flexibility in the data that can be supplied, then supplies the data in exactly the form needed.
//console.log('Record_List sig', sig);
// The whole thing encoded within a buffer?
// Can't be decoded as a normal array, so no.
// Seems like the whole things needs to be wrapped in a Buffer.
// Each record in the list to get encoded as a buffer.
if (sig === '[B]') {
this._buffer = a[0];
} else if (sig === '[a]') {
// Will be an array of buffers or arr records
let arr_bufs = [];
let arr_records = a[0],
l = arr_records.length;
//console.log('l', l);
for (let c = 0; c < l; c++) {
let item = arr_records[c];
if (item instanceof Buffer) {
// May be best to ensure encoding, but just put it for the moment.
arr_bufs.push(item);
} else {
if (item.buffer) {
arr_bufs.push(item.buffer);
} else {
//console.log('item', item);
//console.log('item.length', item.length);
// Is it an array?
// Is it length 2?
if (item.length === 2 && (item[0] instanceof Buffer) && (item[1] instanceof Buffer)) {
//console.log('buffer pair');
// Are they both buffers?
let enc = encode_kv_buffer_pair(item);
//console.log('1) enc', enc);
arr_bufs.push(enc);
//
} else {
if (item.length === 2 && (Array.isArray(item[0])) && (Array.isArray(item[1]))) {
// Are they both buffers?
//console.log('array pair');
//console.log('item', item);
let enc_key = Binary_Encoding.encode_to_buffer_use_kps(item[0], 1);
let enc_value = Binary_Encoding.encode_to_buffer(item[1]);
let enc = Buffer.concat([xas2(enc_key.length).buffer, enc_key, xas2(enc_value.length).buffer, enc_value]);
//console.log('enc', enc);
arr_bufs.push(enc);
// then the record is just the two of them joined together.
// maybe not the ideal way. Means key needs to be read o skipped to read the value.
// and we need to know how many of the values are the key?
// Or better to encode them both as buffers, key and value, within an array.
// Encoding the key and the value, with the length, is a useful way of doing it.
// No, we have the length of each of them first.
// Length key, key, length value, value
//arr_bufs.push(encode_kv_buffer_pair(item));
//
} else {
if (Array.isArray(item)) {
//console.log('single array');
// encode the key
//let enc = database_encoding.encode_key(item);
// Meaning its an index record.
// The 0 on the end is the length of the value.
// Have the length first though
let enc_inner = Binary_Encoding.encode_to_buffer_use_kps(item, 2);
//console.log('enc_inner', enc_inner);
// An empty buffer... does that work
// an empty buffer?
// Just 0 at the end to signify 0 length...
// Should make this return an empty buffer?
let enc = Buffer.concat([xas2(enc_inner.length).buffer, enc_inner, xas2(0).buffer]);
//console.log('enc', enc);
arr_bufs.push(enc);
} else {
console.log('item', item);
console.trace();
throw 'NYI';
}
}
}
// If so, encode it as key and value.
}
}
}
//console.log('arr_bufs', arr_bufs);
// Looks like this means changing / simplifying the way that rows get encoded.
// Each row as its own encoded buffer.
// Maybe that's not the right way.
// Not sure if we want a buffer around whe whole thing.
// Maybe change encoding, but change decoding first.
// Better if this OO system encodes / decodes in the old way?
// However, that may mean that standard message decode is not possible.
// Possibly, moving to widescale change of the protocol and calling it would make sense here.
// Encode, decode.
//let buf_inner = Binary_Encoding.encode_to_buffer(arr_bufs);
let buf_inner = Buffer.concat(arr_bufs);
//let buf_inner = Binary_Encoding.encode_to_buffer([Buffer.concat(arr_bufs)])
this._buffer = buf_inner;
}
}
get_nth(n) {
// needs to skip through length item encoded buffer.
let pos = 0;
let item_length;
let c = 0;
let b_found;
while (c < n + 1) {
[item_length, pos] = xas2.read(this._buffer, pos);
//console.log('item_length, pos', item_length, pos);
if (c === n) {
b_found = Buffer.alloc(item_length);
this._buffer.copy(b_found, 0, pos, pos + item_length);
}
pos = pos + item_length;
c++;
}
//console.log('b_found', b_found);
return b_found;
}
get length() {
//
// let enc = Buffer.concat([xas2(enc_key.length).buffer, enc_key, xas2(enc_value.length).buffer, enc_value]);
let pos = 0;
let item_length;
let c = 0;
let b_found;
let l = this._buffer.length;
while (pos < l) {
[item_length, pos] = xas2.read(this._buffer, pos);
//console.log('item_length, pos', item_length, pos);
/*
if (c === n) {
b_found = Buffer.alloc(item_length);
this._buffer.copy(b_found, 0, pos, pos + item_length);
}
*/
pos = pos + item_length;
c++;
}
//console.log('c', c);
//console.log('c / 2', c / 2);
//throw 'stop';
return c / 2;
//
//throw 'NYI';
}
// just have to array
// so will just be an array of b_records
get arr() {
let pos = 0;
let complete = false;
let l = this._buffer.length;
let type_id, buf_l_key, buf_l_value;
let b = this._buffer;
//console.log('l', l);
//console.log('this._buffer', this._buffer);
//throw 'stop';
let res = [];
while (pos < l) {
//[type_]
//console.log('2) pos', pos);
//[type_id, pos] = xas2.read(b, pos);
[buf_l_key, pos] = xas2.read(b, pos);
// then can copy alloc and copy to the new buf
let key_buf = Buffer.alloc(buf_l_key);
b.copy(key_buf, 0, pos, pos + buf_l_key);
pos = pos + buf_l_key;
[buf_l_value, pos] = xas2.read(b, pos);
// then can copy alloc and copy to the new buf
let key_value = Buffer.alloc(buf_l_value);
b.copy(key_value, 0, pos, pos + buf_l_value);
pos = pos + buf_l_value;
//console.log('key_buf', key_buf);
//console.log('key_value', key_value);
//console.log('* item_buf', item_buf);
// Could yield a proper key instead.
let item = new Record([key_buf, key_value]);
//console.log('item', item);
//throw 'stop';
res.push(item);
//console.log('buf_l', buf_l);
//console.log('3) pos', pos);
}
return res;
}
// Need to iterate through the items.
// Iterate through buffer backed records.
* iterator() {
let pos = 0;
let complete = false;
let l = this._buffer.length;
let type_id, buf_l_key, buf_l_value;
let b = this._buffer;
//console.log('l', l);
//console.log('this._buffer', this._buffer);
//throw 'stop';
while (pos < l) {
//[type_]
//console.log('2) pos', pos);
//[type_id, pos] = xas2.read(b, pos);
[buf_l_key, pos] = xas2.read(b, pos);
// then can copy alloc and copy to the new buf
let key_buf = Buffer.alloc(buf_l_key);
b.copy(key_buf, 0, pos, pos + buf_l_key);
pos = pos + buf_l_key;
[buf_l_value, pos] = xas2.read(b, pos);
// then can copy alloc and copy to the new buf
let key_value = Buffer.alloc(buf_l_value);
b.copy(key_value, 0, pos, pos + buf_l_value);
pos = pos + buf_l_value;
//console.log('key_buf', key_buf);
//console.log('key_value', key_value);
//console.log('* item_buf', item_buf);
// Could yield a proper key instead.
let item = new Record([key_buf, key_value]);
//console.log('item', item);
//throw 'stop';
yield item;
//console.log('buf_l', buf_l);
//console.log('3) pos', pos);
}
//console.log('while complete');
}
[Symbol.iterator]() {
return this.iterator();
}
// each function...
each(handler) {
const iterator = this.iterator();
let value, done;
({ value, done } = iterator.next());
let i = 0;
while (!done) {
//console.log('v', v);
handler(value, i++);
({ value, done } = iterator.next());
}
}
get buffer() {
return this._buffer;
}
get decoded() {
let res = [];
//console.log('this._buffer', this._buffer);
// Seems like there are different ways this gets encoded.
// Maybe there is a more old-school way.
// Now I am optimising more for programming conciseness and simplicity, and making use of OO classes too.
// Not as keen on the older way that model rows have been encoded.
// Want to go for really simple encoding and decoding calls.
let kvps = Binary_Encoding.split_length_item_encoded_buffer_to_kv(this._buffer);
//console.log('kvps', kvps);
//throw 'stop';
//let buf_inner = Binary_Encoding.decode(this._buffer);
// Where each model row has its length given, but is not coded as a Buffer itself.
// A bit more encoding data in the protocol makes interpretation easier.
// However, it seems like being able to decode the old-style buffers could be useful.
// Maybe we need a widespread change throughout the system in how records (and indexes) get put into the DB.
// Or change the way that this does the encoding?
// Moving to a more standard encoding system makes sense however.
//console.log('buf_inner', buf_inner);
//console.log('buf_inner.length', buf_inner.length);
// then each of them is an encoded record.
// could use a buffer backed record.
// That would handle both key only records, as well as full records.
// then get the key value pairs out of it.
//let arr_buf_items = Binary_Encoding.split_length_item_encoded_buffer(buf_inner);
// Doesn't look right here.
// Need to be careful and specific in how records get sent to or from the server.
each(kvps, kvp => {
// split it as a key value pair.
//console.log('buf', buf);
// split length en
//let kv = database_encoding.decode_model_row(buf);
//let kv = Binary_Encoding.decode()
//let kv = Binary_Encoding.split_length_item_encoded_buffer(buf);
//console.log('kv', kv);
let mr = database_encoding.decode_model_row(kvp);
//console.log('mr', mr);
res.push(mr);
});
//console.log('arr_buf_items', arr_buf_items);
//console.log('arr_buf_items.length', arr_buf_items.length);
// then decode these...
// Not encoded as model rows though.
//let mrs = database_encoding.decode_model_rows(buf_inner);
//console.log('mrs', mrs);
// split them as key value pairs.
//console.log('res', res);
//throw 'stop';
// then
//
return res;
}
}
module.exports = Record_List;<file_sep>/buffer-backed/index-record-key.js
const Binary_Encoding = require('binary-encoding');
const xas2 = require('xas2');
// Maybe phase this out.
class Index_Record_Key {
constructor(spec) {
// Will take a buffer, or the index record key in array form.
//let a = arguments;
if (spec instanceof Buffer) {
this._buffer = spec;
} else if (Array.isArray(spec)) {
// encode idx key arr to buffer
this._buffer = Binary_Encoding.encode_to_buffer_use_kps(spec, 2);
}
}
get buffer() {
return this._buffer;
}
validate() {
try {
let d = this.decoded;
} catch (err) {
return false;
}
return true;
}
get decoded() {
return Binary_Encoding.decode_buffer(this._buffer, 2);
}
get kp() {
//console.log('xas2.read(this._buffer)', xas2.read(this._buffer));
return xas2.read(this._buffer);
}
get table_kp() {
return this.kp - 1;
}
get table_id() {
return (this.table_kp - 2) / 2;
}
get index_id() {
return xas2.read_nth(this._buffer, 0, 2);
}
get fields() {
// decoded
let pos = xas2.skip_n(this._buffer, 0, 2);
return Binary_Encoding.decode_buffer(this._buffer, 0, pos);
}
}
module.exports = Index_Record_Key;<file_sep>/database.js
// The Database abstraction.
// Much will happen within the Model. It will have the means to produce records that fit in within the DB structure, and be useful for normalization.
//
// This will help to get the theory of what gets added to the database more testable and explainable.
// Before a db is set up, it will be possible to tell how many records get added to LevelDB.
// May be worth making this into another module, that can be accessed by clients as well.
// Want it to be possible to use these models server-side as setup, but they could be useful on client-side to define databases.
// Could possibly upload the model to the server, or it could send the lower level setup / ensure instructions to the server.
// There is a bit more to do to get a database definition that works in theory, with clearly defined spaces for records to go.
// It would be nice if it generated getting started / help files.
// Next stage is to make more tables that are like plugins.
// Ideally want to define some data and record types.
// 07/08/2017 - May be best to put the indexes into the database in terms of how they are defined.
// Could require different encoding?
// Also will reconstruct model database from system rows.
// Recreating the Table objects will be useful.
// Will enable further data to be loaded into that model database, which would then assign it the key-value encoding to go into the database.
// Seems like a good few more days of programming.
// Want there to be a fairly seamless process where we have records, know what table they belong to, the data transformation pipeline is specified, then we put that data into the
// database in the right kind of formatting.
// Trades that get put into the db will be sequential, if they have trade ids.
// Also need to consider latest price snapshots when they are available.
// Different exchanges will provide data in different ways, some better than others.
// Decoding the tables and fields from db records seems useful.
// Would then be able to normalize and index data as it comes in.
// Then it would be easy to connect to an existing database, load the system model, then load some other tables (like currencies) into that model,
// then trading data records can be made within that model, and their keys and values sent to the live db. The live db would operate with its own model to create the proper index records for the trade / price data.
/*
1. connect to an existing database
2. load the system model
3. then load some other tables (like currencies and markets) into system model
4. trading data records can be made within model
5. their keys and values sent to the live db (encoded as binary)
6. live db would operate with its own model (system model + lookup tables) to create the proper index records for the trade / price data
Seems like quite a lot more work to do, but this way helps to guarantee consistency, and once it's made, ease of use.
Should be fairly performant, with some lookup data held within JavaScript objects.
Once this is done:
1. Capture live prices of currencies
2. Make an OO class to load up historical data and to provide updates
Connected to the DB, but keeps the values in-memory.
(More OO data structures that keep a bunch of time-value sequences in memory)
Once we have the classes that connect up the DB, sync with it (loading data to memory) and then have that data simply arranged in RAM for analysis, there will be a lot more potential.
Mathematical analysis
GUI programming displaying graphs
GUI programming displaying the contents of the tables
This seems important to push progress on the Data_Grid.
Displaying the last day or whatever of price movements using a Data_Grid along with a Line_Chart would look nice.
Very much worth making some kind of connected_db, connected_table (and connected_record classes later?)
With connected_table, we should be able to input a record with one simple command.
Would handle some normalization through key lookup itself.
Then will be able to accept the large number of records that need to be put in place.
Indexing records by [market, time] and / or [time] and putting those records into buckets as multiple can happen at the same time.
Or have a unique index by time [market, time, pk] or [time, pk], as that would be conceptually similar to buckets apart from using multiple lower level records.
Buckets on one record could be more efficient for getting the set of ids quickly.
Index bucketing seems like a useful technique for some situations, but at present not necessary.
Could be a decent efficiency improvement, could use a bucketed index, and define that as another index type (that does not require unique index keys)
Want there to be simple APIs to use in terms of getting the data, then storing it.
Then retrieving it.
Want an OO system that presents the data in a form that's easy to use for the programmer.
// Having a working CockroachDB would help with loading the data into it, then getting the data out of it.
// could have CockroachDB_Assets_Client
*/
var lang = require('lang-mini');
/*
var each = lang.each;
var get_a_sig = lang.get_a_sig;
var clone = lang.clone;
var tof = lang.tof;
*/
const {each, get_a_sig, clone, tof, Evented_Class} = lang;
var Incrementor = require('./incrementor');
var Table = require('./table');
var Record = require('./record');
const deep_equal = require('deep-equal');
var Binary_Encoding = require('binary-encoding');
var xas2 = require('xas2');
// should have some connected model classes, extending these.
let database_encoding = require('./encoding');
let decode_model_rows = database_encoding.decode_model_rows;
let encode_model_row = database_encoding.encode_model_row;
const deep_diff = require('deep-object-diff').diff;
const Record_List = require('./buffer-backed/record-list');
// However, we should include the native types.
// Don't completely correspond to the encoding number
// could have the type and the value encoded. Eg true, false, positive int 0, positive int 1
// 0 - xas2 number
// 1 - 64 bit BE float
// 2 - unix time in ms t
// 3 - unix time range in ms [t, t]
// 4 - string [xas2(l), str]
// 5 - indexed xas2 number, representing a string
// 6 - bool, 1 byte
// 7 - null. No further data
// 8 - buffer of binary data
const NT_XAS2_NUMBER = 1;
const NT_DATE = 2;
const NT_TIME = 3;
const NT_STRING = 4;
const NT_FLOAT32_LE = 5;
const map_core_table_names = {
'tables': true,
'native types': true,
'table fields': true,
'table indexes': true
}
const add_table_event_listeners = (db, table) => {
// listen for changes where the table has new foreign key fields (that refer elsewhere)
table.on('change', e_change => {
//console.log('add_table_event_listeners change', e_change);
});
}
class Database extends Evented_Class {
// Database could have a name.
// Storing a name as a DB Property would be useful.
// A System Properties table would be of use.
constructor(spec) {
super();
this.__type_name = 'database';
// Such a core part of the Model that we'll do it here.
var map_incrementors = this.map_incrementors = {};
var incrementors = this.incrementors = [];
var tables = this.tables = [];
var map_tables = this.map_tables = {};
let map_tables_by_id = this.map_tables_by_id = {};
// map indexes by fields.
if (typeof spec === 'undefined') {
//throw 'stop';
this.create_db_core_model();
}
if (spec === false) {
} else {
var t_spec = tof(spec);
if (t_spec === 'array') {
// load the db def.
this.load_db_arr_def(spec);
} else {
}
}
}
get arr_table_ids_and_names() {
var tables = this.tables,
l = tables.length;
var res = new Array(l);
each(tables, (table, i) => {
res[i] = [table.id, table.name];
})
return res;
}
get map_table_kps() {
var tables = this.tables,
l = tables.length;
var res = {};
each(tables, (table) => {
res[table.key_prefix] = table;
})
return res;
}
// show tables?
get description() {
var tables = this.tables,
l = tables.length;
var res = [];
each(tables, (table, i) => {
//res[i] = [table.id, table.name];
res.push(table.name + '\n');
res.push('-'.repeat(table.name.length) + '\n\n');
res.push('fields\n');
each(table.fields, (field) => {
//res.push('\t', field.description);
res.push(' ', field.description + '\n');
});
// and the table indexes
res.push('indexes\n');
each(table.indexes, (index) => {
//res.push('\t', field.description);
res.push(' ', index.description + '\n');
});
res.push('\n');
})
return res.join('');
}
get_obj_map(table_name, field_name) {
return this.map_tables[table_name].get_map_lookup(field_name);
}
view_decoded_rows() {
var model_rows = this.get_model_rows();
each(model_rows, (model_row) => {
//console.log('1) model_row', model_row);
console.log('model_row', Database.decode_model_row(model_row));
});
console.log('\n\n\n');
}
load_db_arr_def(arr_def) {
// Core model is important for some things, but it's got in the way of loading.
// May need to be careful to set some table ids etc.
// Definition is a list of tables.
this.create_db_core_model();
// definition supposes core model already exists.
var tables = arr_def;
//var that = this;
each(tables, (table) => {
//var table_name = table[0];
//var table_def = table[1];
//console.log('\n\n\n');
//console.log('table', table);
this.add_table(table);
});
}
load_db_def(def) {
var t_def = tof(def);
if (t_def === 'array') {
return load_db_arr_def(def);
}
}
// Worth having the full database creation here.
// Create the full rows / values of an initial database, and output that without needing to use any DB software.
// Should be able to test an existing model against an existing database.
// Show which keys from the model are there.
// The keys from the model will show the necessary database
create_db_core_model() {
// Maybe core model should not be created before loading.
// Or need to change loading code to avoid messing it up.
//console.log('create_db_core_model');
//console.trace();
this._init = true;
let incrementors = this.incrementors;
let map_incrementors = this.map_incrementors
let tables = this.tables;
let map_tables = this.map_tables;
let map_tables_by_id = this.map_tables_by_id;
//let map_tables_incoming_fks = {};
// target table, source table (with the field)
// want to quickly lookup when a table has got records that refer to it.
// then with any record, we can find the references using the db structure and the key, maybe index lookups on that field.
// [target table id, source table id, source field id]
let inc_incrementor = this.inc_incrementor = new Incrementor('incrementor', 0, 1);
incrementors.push(inc_incrementor);
let inc_table = this.inc_table = this.new_incrementor('table');
each(incrementors, (incrementor) => {
map_incrementors[incrementor.name] = incrementor;
});
// Only creates the model, rather than does anything connected directly with the db.
// Much of the core is created using lower level operations.
// This is because it is a platform that some higher level operations rely on.
// The platform for the higher level commands / oo is not fully in place before the core db has been created.
// Seems like it would need to get the id through using the incrementor.
let tbl_tables = new Table('tables', this);
// Don't use add_table, because it will create the relevant table record and table field records. These tables don't yet exist.
//this.add_table(tbl_tables);
tables.push(tbl_tables);
//inc_table.increment();
map_tables[tbl_tables.name] = tbl_tables;
map_tables_by_id[tbl_tables.id] = tbl_tables;
this.tbl_tables = tbl_tables;
// add event listeners for the tables.
tbl_tables.set_pk('+id');
tbl_tables.add_field('name', -1, NT_STRING);
tbl_tables.add_index([
['name'],
['id']
]);
var tbl_native_types = new Table('native types', this);
tbl_native_types.add_field('+id', -1);
tbl_native_types.add_field('name', -1, NT_STRING);
map_tables[tbl_native_types.name] = tbl_native_types;
map_tables_by_id[tbl_native_types.id] = tbl_native_types;
this.tbl_native_types = tbl_native_types;
tbl_native_types.add_index([
['name'],
['id']
]);
tbl_native_types.add_records([
[[0], ['xas2']],
[[1], ['date']],
[[2], ['string']],
[[3], ['float32le']]
]);
tables.push(tbl_native_types);
//inc_table.increment();
tbl_native_types.pk_incrementor.value = 4;
//map_tables[tbl_native_types.name] = tbl_native_types;
//this.add_table(tbl_native_types);
//this.tbl_native_types = tbl_native_types;
var tbl_fields = new Table('table fields', this);
tables.push(tbl_fields);
//inc_table.increment();
map_tables[tbl_fields.name] = tbl_fields;
map_tables_by_id[tbl_fields.id] = tbl_fields;
// Should not have its own autoincrementing id, apart from
var tbl_table_indexes = this.tbl_indexes = new Table('table indexes', this);
tables.push(tbl_table_indexes);
//inc_table.increment();
map_tables[tbl_table_indexes.name] = tbl_table_indexes;
map_tables_by_id[tbl_table_indexes.id] = tbl_table_indexes;
//
tbl_fields.set_pk(['table_id', 'id']);
tbl_fields.set_fk('table_id', tbl_tables);
tbl_fields.add_field('name', -1, NT_STRING);
this.tbl_fields = tbl_fields;
//this.tbl_fields = tbl_fields;
var add_table_table_record = (table) => {
//console.log('add_table_table_record table.name', table.name);
//console.log('table.inc_foreign_keys.id', table.inc_foreign_keys.id);
//console.log('table.inc_foreign_keys.id',)
// Need more work on binary encoding array items.
// Maybe need more work on binary decoding these embedded arrays.
//console.log('[table.inc_fields.id, table.inc_indexes.id, table.inc_foreign_keys.id]', [table.inc_fields.id, table.inc_indexes.id, table.inc_foreign_keys.id]);
//throw 'stop';
//
if (table.pk_incrementor) {
tbl_tables.add_record([
[table.id],
[table.name, [table.inc_fields.id, table.inc_indexes.id, table.inc_foreign_keys.id, table.pk_incrementor.id]]
]);
} else {
tbl_tables.add_record([
[table.id],
[table.name, [table.inc_fields.id, table.inc_indexes.id, table.inc_foreign_keys.id]]
]);
}
tbl_tables.pk_incrementor.increment();
}
//this.tbl_tables.add_record([[table.id], [table.name, [table.incrementors[0].id, table.incrementors[1].id, table.incrementors[2].id]]]);
add_table_table_record(tbl_tables);
add_table_table_record(tbl_native_types);
add_table_table_record(tbl_fields);
add_table_table_record(tbl_table_indexes);
this._init = false;
// Adding the record to the tables table.
// That should maybe be done later, or after changes to the table object.
this.add_tables_fields_to_fields_table(tbl_tables);
this.add_tables_fields_to_fields_table(tbl_native_types);
this.add_tables_fields_to_fields_table(tbl_fields);
this.add_tables_fields_to_fields_table(tbl_table_indexes);
this.add_tables_indexes_to_indexes_table(tbl_tables);
this.add_tables_indexes_to_indexes_table(tbl_native_types);
this.add_tables_indexes_to_indexes_table(tbl_fields);
this.add_tables_indexes_to_indexes_table(tbl_table_indexes);
// no, the table incrementor
// Seems more of an issue upon loading.
// this seems to make problems in some cases.
// This does cause some problems.
// It's worth making a fix specifically for this.
//inc_table.increment(6); // Space for more system tables.
//tbl_tables.pk_incrementor.increment(6);
}
add_incrementor() {
// Use the incrementor incrementor to get the new incrementor id?
var a = arguments;
a.l = arguments.length;
var sig = get_a_sig(a);
//console.log('add_incrementor sig', sig);
//throw 'stop';
if (sig === '[n,s,n]') {
var id = a[0],
name = a[1],
value = a[2];
var res = new Incrementor(name, id, value);
this.incrementors.push(res);
//console.log('this', this);
this.map_incrementors[name] = res;
return res;
} else {
console.trace();
throw 'Unexpected incrementor signature, ' + sig;
}
}
new_incrementor(name) {
var id = this.inc_incrementor.increment();
var res = new Incrementor(name, id, 0);
this.incrementors.push(res);
//console.log('this', this);
this.map_incrementors[name] = res;
return res;
}
//get tables() {
// return this.record_def.tables;
//}
add_tables_indexes_to_indexes_table(table) {
var tbl_indexes = this.tbl_indexes;
var that = this;
//console.log('table.fields.length', table.fields.length);
//throw 'stop';
each(table.indexes, (index) => {
// Store a bit of info alongside.
// Is it a primary key
// Info on it being a foreign key - what table it refers to.
// This is to do with the fields table's fields. Need to be somewhat careful with this.
var arr_kv_index_record = index.get_kv_record();
var ti_record = tbl_indexes.add_record(arr_kv_index_record);
});
//throw 'stop';
}
add_tables_fields_to_fields_table(table) {
//console.log('add_tables_fields_to_fields_table table.name', table.name);
var tbl_fields = this.tbl_fields;
//console.log('table.fields.length', table.fields.length);
//throw 'stop';
each(table.fields, (field) => {
var arr_kv_field_record = field.get_kv_record();
var tf_record = tbl_fields.add_record(arr_kv_field_record);
field.db_record = tf_record;
});
}
add_table(table_def) {
var a = arguments;
var tables = this.tables,
map_tables = this.map_tables,
map_tables_by_id = this.map_tables_by_id;
var table, name;
var sig = get_a_sig(a);
// , add_fields_and_indexes_table_records = false
// Long complex sig now.
let add_fields_and_indexes_table_records = false;
// Should probably get the table id here from using the incrementor, rather than from within the table constructor.
//console.log('add_table sig', sig);
//console.log('a', a);
//console.log('table_def', table_def);
// the table def maybe does not contain a reference to the database.
// That should be added.
//
// Could check that an added table has its fields set up right.
// A more thorough test procedure could do this, all within the model.
// Create a model with core rows, add a table, then check its autoincremented fields are set up right.
// Maybe they had been right all along, just it had not looked up FK references to find the type of the field.
//
if (sig === '[s,a]') {
name = a[0];
var spec_record_def = a[1];
table = new Table(name, this, spec_record_def);
} else if (table_def instanceof Table) {
table = table_def;
} else if (sig === '[a]') {
var a_sig = get_a_sig(a[0]);
//console.log('a_sig', a_sig);
//throw 'stop';
} else if (a_sig === '[s,a]') {
var table_name = a[0][0];
var table_inner_def = a[0][1];
table = new Table(table_name, this, table_inner_def);
} else if (sig === '[s]') {
table = new Table(a[0], this);
} else if (sig === '[s,n]') {
table = new Table(a[0], this, a[1]);
} else if (sig === '[s,n,a]') {
table = new Table(a[0], this, a[1], a[2]);
} else if (sig === '[s,n,a,b]') {
add_fields_and_indexes_table_records = a[3];
table = new Table(a[0], this, a[1], a[2]);
} else {
table = new Table(table_def, this);
}
each(table.fields, field => {
if (field.fk_to_table) {
//console.log('fk field', field);
let to_table_id = field.fk_to_table.id;
// and it goes to the pk of the table
let from_table_id = field.table.id;
let from_field_id = field.id;
console.log('foreign key: [to_table_id, from_table_id, from_field_id]', [to_table_id, from_table_id, from_field_id]);
this.map_tables_incoming_fks = this.map_tables_incoming_fks || {};
this.map_tables_incoming_fks[to_table_id] = this.map_tables_incoming_fks[to_table_id] || {};
this.map_tables_incoming_fks[to_table_id][from_table_id] = from_field_id;
//this.map_tables_incoming_fks[[to_table_id, from_table_id].toString()] = from_field_id;
}
// if it points towards another field, then we want to put it in a map of incoming fk refs
// With some tables, we may want to find every field that refers to it.
});
// Assign it to the db.
// name, db, record_def
//console.log('add_table table.name', table.name);
// We don't get these change listeners as its building the table.
// They would be useful as wqe want to notice when the table adds a field with a foreign key.
// At this point we could scan the tables to see which fields have got foreign keys.
// Iterating all fields would help.
//console.log('pre atl');
add_table_event_listeners(this, table);
tables.push(table);
map_tables[table.name] = table;
map_tables_by_id[table.id] = table;
// add_fields_and_indexes_table_records
if (add_fields_and_indexes_table_records || !this._init) {
this.add_tables_fields_to_fields_table(table);
// and assign the field records to the fields while doing this.
// add record to tables table
// Want to encode an array within a record. Should be OK.
var arr_inc_ids = table.own_incrementor_ids;
// ensure record?
this.tbl_tables.add_record([
[table.id],
[table.name, arr_inc_ids]
]);
this.add_tables_indexes_to_indexes_table(table);
}
//console.log('this', this);
this.raise('change', {
'name': 'add_table',
'value': table
});
return table;
}
iterate_all_fields(handler) {
each(this.arr_tables, table => {
each(table.fields, field => {
handler(field);
});
});
}
table_exists(table_name) {
return !!this.map_tables[table_name];
}
ensure_table(table_def) {
var sig = get_a_sig(table_def);
//console.log('Database ensure_table table_def sig', sig);
if (sig === '[s,a]') {
let name = a[0];
if (this.table_exists(name)) {
// nothing to do
return true;
} else {
var spec_record_def = a[1];
return this.add_table(table_def);
}
// check if the table exists.
//var spec_record_def = a[1];
//table = new Table(name, this, spec_record_def);
}
}
each_record(cb_record) {
each(this.tables, (table) => {
//console.log('table', table);
each(table.records, cb_record);
})
}
records_to_buffer() {
var arr_res = [];
this.each_record((record) => {
arr_res.push(record.to_buffer());
})
return Buffer.concat(arr_res);
}
// to_buffer?
records_with_indexes_to_buffer() {
var arr_res = [];
// Why are there empty records?
this.each_record((record) => {
if (!record) {
console.log('record.table.name', record.table.name);
console.trace();
console.log('empty record');
throw 'empty record';
}
arr_res.push(record.to_buffer_with_indexes());
})
return Buffer.concat(arr_res);
}
// to_buffer
// Maybe worth retiring or renaming this.
// Gets an array of encoded rows.
get_arr_model_rows() {
var incrementors = this.incrementors;
var tables = this.tables;
var res = [];
each(incrementors, (incrementor) => {
var incrementor_db_records = incrementor.get_all_db_records();
each(incrementor_db_records, (incrementor_db_record) => {
res.push(incrementor_db_record);
});
});
each(tables, (table) => {
var table_all_db_records = table.get_all_db_records();
each(table_all_db_records, (table_db_record) => {
res.push(table_db_record);
});
});
return res;
}
// Is there a way to get these decoded to start with, rather than getting all db records bin
// Gets them as binary
get_model_rows() {
// Still seems like the tables have been put together wrong.
// incrementor rows...
var incrementors = this.incrementors;
var tables = this.tables;
var res = [];
each(incrementors, (incrementor) => {
var incrementor_db_records = incrementor.get_all_db_records_bin();
each(incrementor_db_records, (incrementor_db_record) => {
res.push(incrementor_db_record);
});
});
// Tables should be in order.Not sure why it's not.
// Could look into the ordering of tables here.
//console.log('this.table_names', this.table_names);
each(tables, (table) => {
//console.log('get_model_rows table.name', table.name);
var table_all_db_records = table.get_all_db_records_bin();
each(table_all_db_records, (table_db_record) => {
res.push(table_db_record);
});
});
//throw 'stop';
return res;
}
// or records?
get rows() {
var incrementors = this.incrementors;
var tables = this.tables;
var res = [];
// these are definitions, not the table records themselves.
each(incrementors, (incrementor) => res.push(incrementor.record));
// invluding the index records of that table.
// should generate those index records too.
// but the model should have loaded the indexes
each(tables, table => res.push.apply(res, table.b_records));
//
return res;
}
get_table_records_length(table_name) {
var table = this.map_tables[table_name];
return table.records.length;
}
get_table_records(table_name) {
var table = this.map_tables[table_name];
return table.records;
}
get_idx_records_by_record(arr_record) {
let kp = arr_record[0][0];
let table_id = (kp - 2) / 2;
let table = this.map_tables_by_id[table_id];
arr_record[0].shift();
let record = new Record(arr_record, table);
let indexes = record.get_arr_index_records();
return indexes;
}
get_table_kv_field_names(table_name) {
return this.map_tables[table_name].kv_field_names;
}
get non_core_tables() {
var res = [];
each(this.tables, (table) => {
if (!map_core_table_names[table.name]) {
res.push(table);
}
})
return res;
}
get table_names() {
var res = [];
each(this.tables, (table) => {
res.push(table.name);
})
return res;
}
get_model_rows_decoded() {
var model_rows = this.get_model_rows();
return (decode_model_rows(model_rows));
}
get_model_rows_encoded() {
// Think this all the model rows though...?
var model_rows = this.get_model_rows();
//console.log('model_rows.length', model_rows.length);
//throw 'stop';
var arr_simple_encoded = [];
each(model_rows, (model_row) => {
// model_rows
//console.log('model_row', model_row);
arr_simple_encoded.push(encode_model_row(model_row));
});
var buf_simple_encoded = Buffer.concat(arr_simple_encoded);
return buf_simple_encoded;
//var res = new Array(model_rows.length);
}
encode_table_model_rows(table_name, arr_rows) {
var key_prefix = this.map_tables[table_name].key_prefix;
var res = [];
each(arr_rows, (row) => {
res.push(encode_model_row(database_encoding.encode_pair_to_buffers(row, key_prefix)));
});
return Buffer.concat(res);
//res.concat();
}
ensure_table_records_no_overwrite(table_name, arr_records) {
var table = this.map_tables[table_name];
// Don't overwrite the keys or the values
// Can't have matching names.
// Enforcing unique constraints while putting records in the normal way should be enough.
return table.ensure_records_no_overwrite(arr_records);
}
diff(other_model) {
let my_model_rows = this.get_model_rows_decoded();
let their_model_rows = other_model.get_model_rows_decoded();
//console.log('my_model_rows', my_model_rows);
//console.log('their_model_rows', their_model_rows);
let res = Database.diff_model_rows(my_model_rows, their_model_rows);
res.count = res.changed.length + res.added.length + res.deleted.length;
//res.orig = this;
//res.other = other_model;
return res;
}
get non_core_table_names() {
return this.table_names.filter(name => !map_core_table_names[name]);
}
get map_tables_fk_refs() {
let res = {};
each(this.tables, table => {
let outward_fk_refs = table.outward_fk_refs;
if (outward_fk_refs.length > 0) {
res[table.name] = outward_fk_refs;
}
});
return res;
}
table_id(table) {
let t_table = tof(table);
if (t_table === 'number') return table;
if (t_table === 'string') {
return this.map_tables[table].id
} else {
if (table.id !== undefined) return table.id;
}
return table;
}
// new_existing_record
//
create_index_records_by_record(arr_record) {
// generates them.
let table_pk = arr_record[0][0];
let table_id = (table_pk - 2) / 2;
//console.log('create_index_records_by_record table_pk', table_pk);
//console.log('table_id', table_id);
//
let table = this.map_tables_by_id[table_id];
//console.log
let record = new Record(arr_record, table);
//console.log('record', record);
let idxs = record.get_arr_index_records();
//console.log('idxs', idxs);
return idxs;
}
arr_records_to_records_with_index_records(arr_records) {
let res = [];
//console.log('arr_records', arr_records);
each(arr_records, record => {
//console.log('record', record);
res.push(record);
let index_records = this.create_index_records_by_record(record);
each(index_records, idx_record => res.push(idx_record));
})
return res;
}
get index_count_per_table() {
let res = [];
each(this.tables, table => {
res.push([table.name, table.indexes.length]);
})
return res;
}
}
// Should possibly be renamed
// More detail about what encoding it starts with, what the result is.
// This only does a partial encoding of already binary rows.
// filter out index rows.
var load_arr_core = (arr_core) => {
//console.log('load_arr_core');
//throw 'stop';
//console.log('arr_core', arr_core);
// Worth loading them up as OO rows.
//console.trace();
//var decoded_core = database_encoding.decode_model_rows(arr_core);
let record_list = new Record_List(arr_core);
//console.log('record_list', record_list);
//console.log('record_list.length', record_list.length);
//throw 'stop';
let db;
if (record_list.length > 0) {
var arr_by_prefix = [];
// simple to get the kp from each row now.
record_list.each(row => {
//console.log('load_arr_core row', row);
arr_by_prefix[row.kp] = arr_by_prefix[row.kp] || [];
// could keep the row here in the model in binary format and decode it as needed.
// for the moment, will use the decoded row, thats what it expects here.
//console.log('row.decoded_no_kp', row.decoded_no_kp);
arr_by_prefix[row.kp].push(row.decoded_no_kp);
});
//throw 'stop';
var arr_incrementor_rows = arr_by_prefix[0];
var arr_table_tables_rows = arr_by_prefix[2];
//console.log('arr_incrementor_rows', arr_incrementor_rows);
//console.log('arr_table_tables_rows', arr_table_tables_rows);
//console.log('arr_incrementor_rows.length', arr_incrementor_rows.length);
//console.log('arr_table_tables_rows.length', arr_table_tables_rows.length);
//throw 'stop';
var arr_table_native_types_rows = arr_by_prefix[4];
var arr_table_field_rows = arr_by_prefix[6];
var arr_table_index_rows = arr_by_prefix[8];
db = new Database(false);
db._init = true;
// May well be best to encode more data within each incrementor.
// What it does
// 0 incrementor incrementor
// 1 tables incrementor
// 2 table specific incrementor, table fields
// 3 table specific incrementor, table indexes
// 4 table specific incrementor, table fks
// 5 table specific incrementor, table specific field value autoincrement, field id
// Though the field can link back to the incrementor anyway.
// When recreting rows, may need to avoid using an incrementor.
//console.log('arr_incrementor_rows', arr_incrementor_rows);
each(arr_incrementor_rows, (inc_row) => {
//console.log('inc_row', inc_row);
var inc_id = inc_row[0][0];
var inc_name = inc_row[0][1];
var inc_value = inc_row[1] || 0;
//var inc = new Incrementor
//console.log('incrementor: inc_id, inc_name, inc_value', inc_id, inc_name, inc_value);
db.add_incrementor(inc_id, inc_name, inc_value);
});
//throw 'stop';
db.inc_incrementor = db.incrementors[0];
db.inc_table = db.incrementors[1];
var arr_table_names = new Array(arr_table_tables_rows.length);
each(arr_table_tables_rows, (v, i) => {
//console.log('v', v);
//console.log('i', i);
arr_table_names[v[0][0]] = v[1][0];
});
//console.trace();
//throw 'stop';
let map_table_id_incrementors = {};
let arr_id_incrementors = [];
each(db.incrementors, db_incrementor => {
//console.log('db_incrementor', db_incrementor);
if (db_incrementor.name.lastIndexOf('_id') === db_incrementor.name.length - 3) {
arr_id_incrementors.push(db_incrementor);
let table_name = db_incrementor.name.substring(4, db_incrementor.name.length - 3);
//console.log('table_name', table_name);
map_table_id_incrementors[table_name] = db_incrementor;
}
});
//
//console.log('arr_table_names', arr_table_names);
//throw 'stop';
// Make a DB and autoconstruct the core?
// Probably best to reconstruct the core out of what's in the database.
// Possibly some types could be changed / added?
let map_system_table_names = {
'tables': true,
'native types': true,
'table fields': true,
'table indexes': true
}
// Give the table an array of its incrementors too.
// Don't have the table recreate its own.
//this._init = false;
// needs to be a map of tables.
// Tables can now skip IDs.
// Leaving space for more system tables.
//console.log('arr_table_tables_rows', arr_table_tables_rows);
// Go through the table table rows themselves instead
each(arr_table_tables_rows, table_table_row => {
//console.log('table_table_row', table_table_row);
arr_table_incrementor_ids = table_table_row[1][1];
//console.log('arr_table_incrementor_ids', arr_table_incrementor_ids);
var arr_table_incrementors = [];
each(arr_table_incrementor_ids, (id) => {
arr_table_incrementors.push(db.incrementors[id]);
});
//console.log('arr_table_incrementors', arr_table_incrementors);
let table_name = table_table_row[1][0];
//console.log('table_name', table_name);
let table_id = table_table_row[0][0];
let is_system_table = map_system_table_names[table_name];
//let table;
//if (is_system_table) {
// table = db.add_table(table_name, table_id, arr_table_incrementors);
//} else {
// table = db.add_table(table_name, table_id, arr_table_incrementors);
//}
let table = db.add_table(table_name, table_id, arr_table_incrementors);
//console.log('db.tables.length', db.tables.length);
if (table.name === 'tables') {
db.tbl_tables = table;
}
if (table.name === 'native types') {
db.tbl_native_types = table;
}
if (table.name === 'table fields') {
db.tbl_fields = table;
}
if (table.name === 'table indexes') {
//console.log('we are making the indexes table.')
db.tbl_indexes = table;
//console.log('table', table);
}
if (table.name === 'users') {
db.tbl_users = table;
}
if (!table.pk_incrementor) {
if (map_table_id_incrementors[table.name]) {
table.pk_incrementor = map_table_id_incrementors[table.name];
//console.log('table.name', table.name);
//console.log('table.pk_incrementor', table.pk_incrementor);
}
}
});
// so seems the primary key incrementors were not recorded.
// They are vital for some operations.
//console.log('db.incrementors', db.incrementors);
//throw 'stop';
// Not sure the incrementors got created in the DB properly.
//throw 'stop';
//console.log('db.incrementors', db.incrementors);
//throw 'stop';
//each(db.tables, (table) => {
// Then once
//});
// Quite possibly load the system tables first?
// Probably not necessary, should be possible to reconstruct the Model structure.
//console.log('arr_table_field_rows', arr_table_field_rows);
//console.log('arr_table_field_rows.length', arr_table_field_rows.length);
//console.log('db.tables.length', db.tables.length);
//console.log('1) db.tables', db.tables);
// Stop the initialisation at some point, as we need the rest of the tables added in normal mode.
// Add the fields to the tables.
each(arr_table_field_rows, (table_field_row) => {
//var table_id = (table_field_row[0][0] - 2) / 2;
//console.log('table_field_row', table_field_row);
// Parse it differently depending on length
var lv = table_field_row[1].length;
var table_id = table_field_row[0][0];
//console.log('table_id', table_id);
var field_id = table_field_row[0][1];
var field_name, data_type_id, is_pk, fk_to_table_id;
if (lv === 1) {
field_name = table_field_row[1][0];
} else if (lv === 2) {
field_name = table_field_row[1][0];
if (typeof table_field_row[1][1] === 'boolean') {
//console.log('table_field_row', table_field_row);
//throw 'stop';
console.log('reinterpreting malformatted field row', table_field_row);
is_pk = table_field_row[1][1];
} else {
data_type_id = table_field_row[1][1];
}
} else if (lv === 3) {
field_name = table_field_row[1][0];
data_type_id = table_field_row[1][1];
is_pk = table_field_row[1][2];
} else if (lv === 4) {
field_name = table_field_row[1][0];
// Bug fix for field encoding problem in early version, wrting bug since fixed.
if (typeof table_field_row[1][1] === 'boolean') {
//console.log('table_field_row', table_field_row);
//throw 'stop';
console.log('reinterpreting malformatted field row', table_field_row);
data_type_id = table_field_row[1][2];
//console.log('data_type_id', data_type_id);
is_pk = table_field_row[1][1];
fk_to_table_id = table_field_row[1][3];
} else {
data_type_id = table_field_row[1][1];
//console.log('data_type_id', data_type_id);
is_pk = table_field_row[1][2];
fk_to_table_id = table_field_row[1][3];
}
}
//
//var table = db.tables[table_id];
var table = db.map_tables_by_id[table_id];
//console.log('!!table', !!table);
//console.log('field read ' + field_name + ': data_type_id', data_type_id);
/*
console.log('table_id', table_id);
console.log('field_id', field_id);
console.log('field_name', field_name);
console.log('data_type_id', data_type_id);
console.log('is_pk', is_pk);
console.log('fk_to_table_id', fk_to_table_id);
*/
//console.log('table', table);
//console.log('db.tables.length ' + db.tables.length);
// Definitely need to set the field ID!
//console.log('1) data_type_id', data_type_id);
if (typeof data_type_id === 'boolean') {
console.trace();
console.log('lv', lv);
console.log('table_field_row', table_field_row);
throw ('data_type_id expected to be integer');
}
table.add_field(field_name, field_id, data_type_id, is_pk, fk_to_table_id);
// then need to make sure the field appears in the map.
// Then will test this reconstruction with a more advanced database structure, such as one to hold cryptocurrencies.
if (is_pk) {
//table.record_def.
} else {
}
});
// arr_table_index_rows
each(arr_table_index_rows, (table_index_row) => {
//console.log('table_index_row', table_index_row);
// Then reconstruct the index
// Create an index object with this specification...
// May need to look up the exact fields, create the object references.
// Then, shortly after this, work on getting cryptodb records made.
// Get the data coming in.
// Save it to a record stream.
// May be useful indexing trades by time, in buckets.
// Would get more than one trade at any point in time.
var ir_key = table_index_row[0];
var ir_value = table_index_row[1];
var table_id = ir_key[0];
var index_id = ir_key[1]; // index within table
// then all the fields that are being indexed
var index_keys = clone(ir_key);
index_keys.splice(0, 2);
//console.log('index_keys', index_keys);
//console.log('ir_value', ir_value);
// the value probably corresponds with the primary key of the table.
var table = db.map_tables_by_id[table_id];
var idx_kv = [index_keys, ir_value];
// the keys and values may need a lookup
//console.log('idx_kv', idx_kv);
var idx = table.add_index(index_id, idx_kv);
//console.log('idx', idx);
});
db.tbl_native_types.add_records(arr_table_native_types_rows);
db.tbl_tables.add_record([
[db.tbl_tables.id],
[db.tbl_tables.name, db.tbl_tables.own_incrementor_ids]
]);
db.tbl_tables.add_record([
[db.tbl_native_types.id],
[db.tbl_native_types.name, db.tbl_native_types.own_incrementor_ids]
]);
db.tbl_tables.add_record([
[db.tbl_fields.id],
[db.tbl_fields.name, db.tbl_fields.own_incrementor_ids]
]);
if (db.tbl_indexes) db.tbl_tables.add_record([
[db.tbl_indexes.id],
[db.tbl_indexes.name, db.tbl_indexes.own_incrementor_ids]
]);
// May redo the creation of db model from rows.
// At least mak sure all bases are covered.
// So far, some records are missed from the model.
// Need to see why some records / incrementors have not been put in the db already.
// Hopefully there are not many fixes left to do until the data platform works.
each(db.tables, (table) => {
//console.log('db.tables table name', table.name);
let own_ttr = table.own_table_table_record;
//console.log('own_ttr', own_ttr);
let tr;
if (!own_ttr) {
// Should put the PK in there.
tr = db.tbl_tables.add_record([
[table.id],
[table.name, table.own_incrementor_ids]
]);
//console.log('[table.name, table.own_incrementor_ids]', [table.name, table.own_incrementor_ids]);
//console.log('tr', tr);
}
// db.table
//db.
// Ensure its in the tables table...
//this.add_tables_fields_to_fields_table(tbl_tables);
// OK but they should have been loaded already?
db.add_tables_fields_to_fields_table(table);
db.add_tables_indexes_to_indexes_table(table);
});
db._init = false;
}
// then go through the individual records.
/*
// When adding these, it will use the already high value of some incrementors.
db.add_tables_fields_to_fields_table(table);
each(record_list, row => {
console.log('row', row);
})
*/
//throw 'stop';
//console.log('decoded_core', decoded_core);
// The core should have been provided with the tables in the right order.
// Don't know why the order of tables has got jumbled.
// Should have the index table rows showing up in prefix 8
// then go through the table indexes.
// want to separate them by tables.
//
return db || new Database();
}
var load_buf = (buf) => {
//console.log('*load_buf');
//throw 'stop - likely to need fixing';
var arr_core = Binary_Encoding.split_length_item_encoded_buffer_to_kv(buf);
return load_arr_core(arr_core);
}
Database.load = (arr_core) => {
//console.log('arr_core', arr_core);
// Load it differently - in fact it would require less code as it's easier to decode now.
return load_arr_core(arr_core);
}
Database.kp_to_range = buf_kp => {
let buf_0 = Buffer.alloc(1);
buf_0.writeUInt8(0, 0);
let buf_1 = Buffer.alloc(1);
buf_1.writeUInt8(255, 0);
// and another 0 byte...?
return [Buffer.concat([buf_kp, buf_0]), Buffer.concat([buf_kp, buf_1])];
}
Database.diff_model_rows = (orig, current) => {
let changed = [],
added = [],
deleted = [];
let map_orig = {},
map_current = {},
map_orig_records = {};
each(orig, (record) => {
//console.log('record', record);
// so make a record iterable, and it's just the key and the value.
let [key, value] = record;
//console.log('[key, value]', [key, value]);
map_orig[key.toString('hex')] = [value];
map_orig_records[key.toString('hex')] = record;
});
//console.log('map_orig', map_orig);
each(current, (record) => {
let [key, value] = record;
map_current[key.toString('hex')] = [value];
// does it appear in orig?
if (map_orig[key.toString('hex')]) {
if (deep_equal(map_orig[key.toString('hex')][0], value)) {
} else {
//changed.push([record]);
changed.push([map_orig_records[key.toString('hex')], record]);
}
} else {
added.push(record);
}
});
each(orig, (record) => {
let [key, value] = record;
//map_orig[key] = value;
if (map_current[key.toString('hex')]) {
} else {
deleted.push(record);
}
});
let res = {
changed: changed,
added: added,
deleted: deleted,
same: changed.length === 0 && added.length === 0 && deleted.length === 0
}
return res;
}
Database.load_buf = load_buf;
Database.decode_key = database_encoding.decode_key;
Database.decode_keys = database_encoding.decode_keys;
Database.decode_model_row = database_encoding.decode_model_row;
Database.decode_model_rows = database_encoding.decode_model_rows;
Database.encode_model_row = database_encoding.encode_model_row;
Database.encode_model_rows = database_encoding.encode_model_rows;
Database.encode_arr_rows_to_buf = database_encoding.encode_arr_rows_to_buf;
Database.encode_index_key = database_encoding.encode_index_key;
Database.encode_key = database_encoding.encode_key;
var p = Database.prototype;
//p.encode_model_rows = encode_model_rows;
if (require.main === module) {
//setTimeout(() => {
var db = new Database();
// Gets creates automatically.
//db.create_db_core_model();
console.log('db.tables.length', db.tables.length);
var view_decoded_rows = () => {
var model_rows = db.get_model_rows();
//throw 'stop';
console.log('model_rows.length', model_rows.length);
each(model_rows, (model_row) => {
//console.log('model_row', model_row);
console.log('model_row', Database.decode_model_row(model_row));
});
console.log('\n\n\n');
//throw 'stop';
//var decoded_rows = crypto_db.get_model_rows_decoded();
//console.log('decoded_rows', decoded_rows);
}
var model_rows = db.get_model_rows();
console.log('model_rows.length', model_rows.length);
each(model_rows, (model_row) => {
console.log('model_row', model_row);
});
console.log('\n\n\n');
view_decoded_rows();
//throw 'stop';
var test_full_db_binary_encoding = () => {
//var decoded_rows = db.get_model_rows_decoded();
//console.log('decoded_rows', decoded_rows);
// Simpler encoding... Can get the ll row kvps, and encode them along with some lengths.
var buf_simple_encoded = db.get_model_rows_encoded();
//console.log('buf_simple_encoded', buf_simple_encoded);
//console.log('buf_simple_encoded.length', buf_simple_encoded.length);
// Then use streaming / evented decoding.
// Functional event driven programming?
// Could be a job for Binary_Encoding
var buf_se_length = buf_simple_encoded.length;
Binary_Encoding.evented_get_row_buffers(buf_simple_encoded, (arr_row) => {
//console.log('arr_row', arr_row);
var decoded_row = database_encoding.decode_model_row(arr_row);
console.log('decoded_row', decoded_row);
});
// Can try serializing the model to binary, then unserialising / reconstructing it to a model.
// Then can compre values from the two.
var db2 = load_buf(buf_simple_encoded);
//console.log('db2', db2);
console.log('db2.tables.length', db2.tables.length);
var decoded = db2.get_model_rows_decoded();
//console.log('decoded', decoded);
view_decoded_rows();
// It looks like this reloaded database is capable of doing what is needed.
}
test_full_db_binary_encoding();
// A test to do with making a new DB and checking if the autoincrementing fields are typed correctly.
// Could include binary encoding and decoding.
// Will also be useful to specify field types so that values can be checked before they get persisted to make sure they are of the right type.
// specified field types would definitely help fk-> pk lookups.
// that code could be somewhat complex.
// would be useful for normalising values.
let test_autoincrement_field_types = () => {
}
} else {
//console.log('required as a module');
}
module.exports = Database;<file_sep>/paging.js
// Note: This has gone way beyond just describing paging options, it's a useful part of the system that's being refactored to handle a variety of options.
// Expanding this allows for protocol changes while retaining backwards compatabiliy.
// New, and simpler to call ways can be made, and the old code removed later on and still used for the moment, for the most part.
// Such as specifying a 'limit' option with the count function.
// Want to make paging system easy to use, reading from binary buffer that represents a query.
// Using OO query parsing on the server may well be better.
// Easier to test. Queries get created on the client, and can be parsed there to see if the parsing works correctly.
var xas2 = require('xas2');
let x = xas2;
let Binary_Encoding = require('binary-encoding');
const NO_PAGING = 0;
const PAGING_RECORD_COUNT = 1;
const PAGING_KEY_COUNT = 2;
// Followed by p number
const PAGING_BYTE_COUNT = 3;
const PAGING_TIMED = 4;
const PAGING_AND_EXTENDED_OPTIONS = 5;
// This is going to allow extended options, such as limit and reverse.
// Want it so that it recognises that there are extended paging / results options, and then provides the data in that kind of way.
// Paging objects could raise events themselves. ???
// Reverse and limit within paging options would be cool.
// This will be used to make it easier to get the last value range.
// Allows more options to be encoded into the query, while maintaining backwards compatability.
// Direction and limit are options that are worth having as parameters.
// Maybe better to change to OO paging reading right now.
// Seems less important with get_last_key_in_table but it would be a useful feature nevertheless.
// Server Return Processing.
// Will cover more than just paging, such as removal of KPs from keys / records.
// Set remove kp to be true, then it renders the buffer differently.
// On the server-side, it will be parsed differently to include more paging / return options.
// Will parse it as a Binary_Encoded array.
// This is part of the system that will save on code complexity in the server's binary response handler.
// Middleware using this will enable responses to be specified cleanly and in terms of the inner server function that gets called.
// A 'limit' arg for paging and options could be useful.
// page_size, remove_kp, decode, limit
// could all be booleans, with the values then given
// Easiest just to encode all of these into a buffer.
// Some more work on Paging / Options writing and parsing will be useful.
// Use this code on both the client and the server.
class Paging {
constructor(spec) {
}
get buffer() {
let using_extended_options = false;
if (this.remove_kp !== undefined || this.remove_kps !== undefined || this.limit > 0) {
using_extended_options = true;
}
// Probably will need to extend the extended options some more?
// Right now, it is an array with a number of args.
// Moving away from record or key or binary paging?
// That helps us know what type of data it it.
// It's not really the type of paging.
if (using_extended_options) {
let ptb = xas2(PAGING_AND_EXTENDED_OPTIONS).buffer;
let ptb2 = xas2(this.paging_type).buffer;
let arr_args = [];
if (this.paging_type === NO_PAGING) {
//return xas2(NO_PAGING).buffer;
} else {
arr_args.push(this.page_size || 0);
//
//return Buffer.concat([xas2(this.paging_type).buffer, xas2(this.page_size).buffer]);
}
arr_args.push(this.remove_kp || false);
// optional 3rd argument being the limit?
// could leave it null if it's not there.
if (this.limit > 0) {
arr_args.push(this.limit);
}
let buf_args = Binary_Encoding.encode_to_buffer(arr_args);
//console.log('[ptb, ptb2, buf_args]', [ptb, ptb2, buf_args]);
return Buffer.concat([ptb, ptb2, buf_args]);
} else {
if (this.paging_type === NO_PAGING) {
return xas2(NO_PAGING).buffer;
} else {
//
// Can't include limit like this.
return Buffer.concat([xas2(this.paging_type).buffer, xas2(this.page_size).buffer]);
}
}
}
decode_inner() {
return Binary_Encoding.decode_buffer(this.buffer);
}
}
// Changing to item count paging may work better.
// Getting rid of key paging effectively.
// We get n of them, records or keys, and then sort out the paging as appropriate.
//
class No_Paging extends Paging {
constructor(num_records) {
super();
this.paging_type = NO_PAGING;
}
}
// Numbered paging.
// Either timed, or number of records.
// Record_Paging will change to Count_Paging
class Record_Paging extends Paging {
constructor(num_records) {
super();
this.num_records = num_records;
this.page_size = num_records;
this.paging_type = PAGING_RECORD_COUNT;
}
}
class Key_Paging extends Paging {
constructor(num_keys) {
console.log('DEPRACATION WARNING: Key_Paging');
super();
this.num_keys = num_keys;
this.page_size = num_keys;
this.paging_type = PAGING_KEY_COUNT;
}
}
// Byte paging will send complete records though.
class Byte_Paging extends Paging {
constructor(num_bytes) {
super();
this.num_bytes = num_bytes;
this.page_size = num_bytes;
this.paging_type = PAGING_BYTE_COUNT;
}
}
class Timed_Paging extends Paging {
constructor(ms_delay) {
super();
this.ms_delay = ms_delay;
this.page_size = ms_delay;
this.paging_type = PAGING_TIMED;
}
}
Paging.read_buffer = function (buf, pos = 0) {
//console.log('read_buffer buf', buf);
var paging_option, page_size = 0;
[paging_option, pos] = x.read(buf, pos);
//console.log('paging_option', paging_option);
if (paging_option > 0) {
if (paging_option === PAGING_AND_EXTENDED_OPTIONS) {
let sub_paging_option;
[sub_paging_option, pos] = x.read(buf, pos);
let decoded_args = Binary_Encoding.decode_buffer(buf, 0, pos);
//console.log('1) decoded_args', decoded_args);
page_size = decoded_args.shift();
let remove_kps = decoded_args.shift();
//console.log('page_size', page_size);
//console.log('remove_kps', remove_kps);
//console.log('2) decoded_args', decoded_args);
// These decoded args would provide non-paging args too.
pos = buf.length;
return [sub_paging_option, page_size, pos, remove_kps, decoded_args];
} else {
[page_size, pos] = x.read(buf, pos);
}
} else {
//pos++;
}
return [paging_option, page_size, pos, false];
}
let Paging_By_Option = {
0: No_Paging,
1: Record_Paging,
2: Key_Paging, // Will be depracated
3: Byte_Paging,
4: Timed_Paging
}
Paging.read = function (buf, pos = 0) {
//console.log('read buf', buf);
let paging_option, page_size, remove_kps, decoded_args;
[paging_option, page_size, pos, remove_kps, decoded_args] = Paging.read_buffer(buf, pos);
//console.log('paging_option', paging_option);
//console.log('page_size', page_size);
//console.log('pos', pos);
//console.log('Paging_By_Option', Paging_By_Option);
//console.log('Paging_By_Option[paging_option]', Paging_By_Option[paging_option]);
//if ()
let paging = new Paging_By_Option[paging_option](page_size);
if (remove_kps) {
paging.remove_kps = true;
}
if (decoded_args) {
paging.args = decoded_args;
}
return [paging, pos];
}
Paging.No_Paging = No_Paging;
Paging.No = No_Paging;
Paging.None = No_Paging;
// Going to remove Record Paging and Key Paging.
/// Rather, change Record_Paging to Count_Paging, remove Key_Paging.
Paging.Record_Paging = Record_Paging;
Paging.Record = Record_Paging;
Paging.Count_Paging = Record_Paging;
Paging.Count = Record_Paging;
Paging.Key_Paging = Key_Paging;
Paging.Key = Key_Paging;
Paging.Byte_Paging = Byte_Paging;
Paging.Byte = Byte_Paging;
Paging.Timed_Paging = Timed_Paging;
Paging.Timed = Timed_Paging;
module.exports = Paging;<file_sep>/buffer-backed/row.js
/*
17/05/2018 - Maybe this is a 'row' rather than a 'record'. A record encompasses index rows as well.
// A record could be composed of its index rows too.
// Removal of index rows when the record changes may be the best approach.
*/
const lang = require('lang-mini');
const def = lang.is_defined;
const each = lang.each;
let Binary_Encoding = require('binary-encoding');
let xas2 = require('xas2');
let Key = require('./key');
let Value = require('./value');
const database_encoding = require('../encoding');
const XAS2 = 0;
const STRING = 4;
const BUFFER = 9;
const ARRAY = 10;
// Standard data 0. just normal decoding.
// ~-~-~-~-~-~-~-~-~-~-~-~-~-
// Supplementary encoding
//const NONE = 0;
const RECORD = 200;
const KEY = 201;
const VALUE = 202;
// Record_Row
// That should be what this is really called.
// A row is not necessarily a record row. A record itself has got index rows too sometimes.
// Read_fields_to_buffer
// Then when making index records from normal records it could carry that out.
class Row {
constructor() {
let a = arguments,
l = a.length;
//console.log('Record l', l);
//console.log('a', a);
// can construct record out of array of two values.
// Not using sig right now to save speed.
// non-enumerable prop?
// ne_prop
var _kvp_bufs;
Object.defineProperty(this, 'kvp_bufs', {
// Using shorthand method names (ES2015 feature).
// This is equivalent to:
// get: function() { return bValue; },
// set: function(newValue) { bValue = newValue; },
get() {
return _kvp_bufs;
},
set(value) {
_kvp_bufs = value;
},
enumerable: false,
configurable: true
});
var _decoded;
Object.defineProperty(this, 'decoded', {
// Using shorthand method names (ES2015 feature).
// This is equivalent to:
// get: function() { return bValue; },
// set: function(newValue) { bValue = newValue; },
get() {
if (this.kp === 0) {
// Incrementor records.
//console.log('this.value.buffer', this.value.buffer);
// should be able to return null.
return [this.key.decoded, xas2.read(this.value.buffer)];
} else {
return [this.key.decoded, this.value.decoded];
}
},
enumerable: true,
configurable: true
});
//let kvp_bufs;
if (l === 1) {
if (a[0] instanceof Buffer) {
// a buffer that can be split?
// one buffer
// odd kp, it has no 'value'
// has a key
// key is the buffer given
// Not so sure about this with the constructor.
// Need to split the values into key value pair buffers.
// Need to split / decode the buffer.
// We may have been given the value, not a key.
//console.trace();
//this.kvp_bufs = [a[0], Buffer.alloc(0)];
//throw 'stop';
// length encoded buffers.
//console.log('pre split', a[0]);
this.kvp_bufs = Binary_Encoding.split_length_item_encoded_buffer(a[0]);
//console.log('this.kvp_bufs', this.kvp_bufs);
//
//throw 'NYI'
} else {
//console.log('else condition');
//console.log('a[0]', a[0]);
//console.log('a[0].length', a[0].length);
if (Array.isArray(a[0])) {
//console.log('its an array');
//console.log('a[0][1]', a[0][1]);
// buffer and a
if (a[0].length === 2 && a[0][0] instanceof Key && a[0][1] instanceof Value) {
// Check they are both buffers.
this.kvp_bufs = [a[0][0].buffer, a[0][1].buffer];
} else if (a[0].length === 2 && a[0][0] instanceof Buffer && a[0][1] instanceof Buffer) {
// Check they are both buffers.
this.kvp_bufs = a[0];
} else if (a[0].length === 2 && a[0][0] instanceof Buffer && a[0][1] === null) {
// Check they are both buffers.
//this.kvp_bufs = a[0];
this.kvp_bufs = [a[0][0], Buffer.alloc(0)];
} else {
//console.log('else 2');
if (Array.isArray(a[0][0]) && Array.isArray(a[0][1])) {
//console.log('both arrays');
//this.kvp_bufs = database_encoding.encode_model_row(a[0]);
this.kvp_bufs = [database_encoding.encode_key(a[0][0]), Binary_Encoding.encode_to_buffer(a[0][1])];
//console.log('this.kvp_bufs', this.kvp_bufs);
} else {
// undefined key, but has value.
if (def(a[0][0])) {
console.log('a', a);
//console.log('key', key);
//console.log('value', value);
// encode key...
//console.log('a[0]', a[0]);
//console.log('a[0][0]', a[0][0]);
//this.kvp_bufs = [database_encoding.encode_key(a[0][0]), Binary_Encoding.encode_to_buffer(a[0][1] || [])];
console.trace();
throw 'NYI';
} else {
if (Array.isArray(a[0][1])) {
this.kvp_bufs = [undefined, Binary_Encoding.encode_to_buffer(a[0][1])];
} else {
console.trace();
throw 'NYI';
}
}
}
// an array of arrays.
// in that case, we will need to use database_encoding.encode_record
//throw 'NYI';
}
} else {
//console.log('a[0] is not an array');
if (a.length === 2) {
if (a[0] instanceof Buffer && a[1] instanceof Buffer) {
this.kvp_bufs = Array.from(a);
// copy it to an array?
// Maybe no need, arraylike will be fine?
} else {
console.trace();
throw 'NYI';
}
} else {
console.trace();
throw 'NYI';
}
}
}
} else {
console.trace();
throw 'NYI';
}
// Then the key will be using the key buffer.
// Could do with an OO value class.
// So, just 'key' and 'value' types are needed for good OO representation of these records.
// Can get the key or the value from each of those buffers.
}
toJSON() {
console.log('to json');
return this.decoded;
}
get record() {
return this;
}
get row() {
return this;
}
get key() {
if (this._key) {
return this._key;
} else {
return this._key = new Key(this.kvp_bufs[0]);
}
}
//
set key(value) {
//can't replace the key, because then it's a different record.
// keep that logic for the moment.
//console.log('this._key', this._key);
//console.log('value', value);
if (this._key._buffer.length === 0) {
if (value instanceof Buffer) {
this._key._buffer = value;
} else {
console.trace();
throw 'NYI';
}
} else {
throw 'Cannot replace record\'s existing key';
}
}
get value() {
if (this._value) {
return this._value;
} else {
return this._value = new Value(this.kvp_bufs[1]);
}
}
// validate encoding...
get decoded_key_no_kp() {
let decoded_key = this.key.decoded;
decoded_key.shift();
return decoded_key;
}
get decoded_no_kp() {
let decoded = this.decoded;
decoded[0].shift();
return decoded;
}
get bpair() {
return this.kvp_bufs;
}
get kp() {
// read first xas2 from the key
//let res = xas2.read(this.kvp_bufs[0]);
//console.log('this.kvp_bufs', this.kvp_bufs);
//console.log('this', this);
//console.log('this.kvp_bufs[0]', this.kvp_bufs[0]);
if (this.kvp_bufs[0] && this.kvp_bufs[0].length > 0) {
return xas2.read(this.kvp_bufs[0]);
} else {
return undefined;
}
}
// get as a single buffer
// encoding:
// key length, key, value length, value.
get buffer() {
if (!def(this.kvp_bufs[0])) {
//console.log('key not defined');
// So, need to be able to read the record when there is not a key.
// First buffer has length 0.
return Buffer.concat([xas2(0).buffer, xas2(this.kvp_bufs[1].length).buffer, this.kvp_bufs[1]]);
} else {
//console.log('this.kvp_bufs[0]', this.kvp_bufs[0]);
//console.log('this.kvp_bufs[1]', this.kvp_bufs[1]);
return Buffer.concat([xas2(this.kvp_bufs[0].length).buffer, this.kvp_bufs[0], xas2(this.kvp_bufs[1].length).buffer, this.kvp_bufs[1]]);
}
}
get buffer_xas2_prefix() {
return new xas2(RECORD).buffer;
}
// get it to read the keys to find the number of items there.
// need to be able to identify the specific fields within the record.
get key_length() {
return this.key.length;
}
validate_encoding() {
let res = true;
try {
// don't want tracing done, not sure why it sometimes happens. Trae that!
let decoded = this.decoded;
} catch (err) {
//console.trace();
res = false;
}
return res;
}
to_obj(model_table) {
let fields = model_table.kv_fields.reduce((a, b) => a.concat(b));
let decoded = this.decoded_no_kp.reduce((a, b) => a.concat(b));
//console.log('fields', fields);
//console.log('decoded', decoded);
let res = {};
each(fields, (v, i) => {
if (def(decoded[i])) res[v] = decoded[i];
//if (v != undefined) res[v] = decoded[i];
//console.log('v', v);
});
//console.log('res', res);
return res;
//console.log('Array.prototype.flat', Array.prototype.flat);
}
get_field_value(idx) {
let kl = this.key_length;
//console.log('this.key_length', kl);
//console.log('idx', idx);
//console.log('');
//console.log('idx', idx);
//console.log('kl', kl);
//console.log('');
if (idx < kl) {
return this.key.get_value_at(idx);
} else {
//console.log('this.value', this.value);
//let r_idx = idx - kl;
//let res = this.value.get_value_at(r_idx);
//console.log('r_idx', r_idx);
//console.log('res', res);
//return res;
return this.value.get_value_at(idx - kl);
}
}
// make iterable...
// just a key and value
* iterator() {
yield this.kvp_bufs[0];
yield this.kvp_bufs[1];
}
[Symbol.iterator]() {
return this.iterator();
}
}
module.exports = Row;<file_sep>/primary-key.js
var Field = require('./field');
var lang = require('lang-mini');
var each = lang.each;
var tof = lang.tof;
const XAS2_VALUE_TYPE = 0;
class Primary_Key {
constructor(table) {
this.fields = [];
this.map_fields = {};
if (!table) {
console.trace();
throw 'requires table reference';
}
this.table = table;
}
get length() {
return this.fields.length;
}
add_field(field) {
if (!(field instanceof Field)) {
throw 'stop';
}
//this.table.add_field(field);
if (!this.map_fields[field.name]) {
this.fields.push(field);
this.map_fields[field.name] = field;
}
if (typeof field.name === 'undefined') {
console.trace();
throw 'Field name expected';
}
//throw 'stop';
//
return field;
}
set_def(def) {
var field;
var table = this.table,
t_item;
var that = this;
var t_def = tof(def);
//console.log('pk set_def', def);
var set_string = (item) => {
field = table.map_fields[item];
if (!field) {
// need to create the field.
//field = new Field(item, table, table.inc_fields.increment(), true);
// We don't know the type of the pk.
// Assume it is type 0?
// Maybe assume that for primary key (constituent) fields.
// Maybe the wrong place / way to create the field.
field = table.add_field(item, -1, XAS2_VALUE_TYPE, true);
//field = table.add_field(item, null, true);
} else {
console.trace();
throw 'stop';
}
//console.log('field', field);
//console.log('table.map_fields', table.map_fields);
//field = new Field(item, table, table.inc_fields.increment(), true);
that.add_field(field, -1);
}
if (t_def === 'string') {
set_string(def);
}
if (t_def === 'array') {
each(def, (item) => {
// need to create a new field
/*
str_field = a[0];
var table = this.table = a[1];
var id = this.id = a[2];
this.is_pk = a[3];
*/
t_item = tof(item);
if (t_item === 'string') {
set_string(item);
} else {
console.log('item', item);
console.trace();
throw 'Item expected as string';
}
// don't actually add it to the table's fields array?
//table.add_field(field);
});
}
return this;
}
}
module.exports = Primary_Key;<file_sep>/record-def.js
const lang = require('lang-mini');
const tof = lang.tof;
const xas2 = require('xas2');
const each = lang.each;
const is_array = lang.is_array;
const is_arr_of_strs = lang.is_arr_of_strs;
const is_arr_of_arrs = lang.is_arr_of_arrs;
const get_a_sig = lang.get_a_sig;
const Evented_Class = lang.Evented_Class;
const Incrementor = require('./incrementor');
const Record = require('./record');
const Field = require('./field');
const Index = require('./index-def');
const Foreign_Key = require('./foreign-key');
const Primary_Key = require('./primary-key');
const Binary_Encoding = require('binary-encoding');
const encode_to_buffer = Binary_Encoding.encode_to_buffer;
const Record_Value_Def = require('./record-value-def');
const NT_XAS2_NUMBER = 1;
const NT_DATE = 2;
const NT_TIME = 3;
const NT_STRING = 4;
const NT_FLOAT32_LE = 5;
//var Table = require('./table');
class Record_Def extends Evented_Class {
// Not sure I like this record def all that much?
// Hard to decide where to draw the line between Table and this.
//
// This will takes some of the parsing out of the Table constructor.
// I wonder if this is too big a change.
// Splitting core functionality out of Table?
// Not sure about the incrementor references.
// The incrementors are part of the table itself. They could be used here though.
// Should not rely on incrementors provided by the database.
// Link back to table, and use the incrementors there.
// The incrementors should belong to the table, not the record definition.
// The record definition should use the incrementors to get id values, such as the incrementing ids for indexes and fields.
constructor(obj_record_def, table) {
// should be given a table...
super();
// Maybe we build the key and value out of objects?
// Does the record definition contain info on indexing?
// Probably not, as that is a feature of how the table organises the records.
// Maybe it is easier to have the indexing here, as indexing info does get included in the definition of records.
// Putting quite a lot in the Record_Def.
// Going with the idea that Table will manage both Record_Def and actual records. Table manages that intersection, so less code there managing the specifics of either is an improvement.
this.table = table;
this.indexes = [];
this.map_indexes_by_field_names = {};
this.fields = [];
// All the fields in order here.
// Fields will also be in their orders within the key and value
this.foreign_keys = [];
this.map_fields = {};
// this map will combine the inner map fields.
//var that = this;
// There will be separate maps for the keys and values too.
var new_field;
var indexes = this.indexes;
// Map of fields belongs here
var pk = this.pk = new Primary_Key(table);
var value = this.value = new Record_Value_Def();
// add_field to the key or value.
//var storage = spec;
//var arr_fields = [];
var inc_fields = table.inc_fields;
var inc_indexes = table.inc_indexes;
var inc_foreign_keys = table.inc_foreign_keys;
//var inc_indexes = this.inc_indexes = db.new_incrementor('inc_idx_' + this.name);
//var inc_foreign_keys = this.inc_foreign_keys = db.new_incrementor('inc_fk_' + this.name);
//var inc_fields = this.inc_fields = db.new_incrementor('inc_field_' + this.name);
//var inc_indexes = this.inc_indexes = db.new_incrementor('inc_idx_' + this.name);
//var inc_foreign_keys = this.inc_foreign_keys = db.new_incrementor('inc_fk_' + this.name);
//console.log('is_arr_of_arrs(storage)', is_arr_of_arrs(storage));
if (obj_record_def) this.set_def(obj_record_def);
//console.log('fields_map', fields_map);
//console.log('map_fields', map_fields);
//throw 'stop';
// Only have the name->Field map.
//this.map_fields = map_fields;
}
set_fk(field, table) {
// fk gets set on a field once the field has been made?
var o_field, o_table, t_field, t_table;
var map_fields = this.map_fields;
if (field instanceof Field) {
o_field = field;
} else {
t_field = tof(field);
if (t_field === 'string') {
o_field = map_fields[field];
}
}
var Table = this.table.constructor;
if (table instanceof Table) {
//throw 'stop';
o_table = table;
} else {
t_table = tof(table);
if (t_table === 'string') {
o_table = this.table.db.map_tables[table];
}
}
if (o_field && o_table) {
o_field.fk_to_table = o_table;
// change event on the record def?
// then change on the table
// db hears the change, then updates the incoming fk references of the table which is being referred to.
// Being able to look this up quickly will help with getting a record and all of the associated records.
// Meaning joins can be avoided in queries, and this is a large auto outer join.
// If the field does not know its type, then it could lookup the type of the foreign key.
// May get on for foreign keys that have got two values encoded, ie a tuple.
// The binary encoding system should be able to store tuples, triples, arrays.
// Have not got tuple types defined yet.
// Could be a pair of xas2 numbers.
// Essentially, they need to match the primary key.
//console.log('o_table.pk', o_table.pk);
if (o_table.pk.fields.length === 1) {
// get the data type of that field.
var pk_field = o_table.pk.fields[0];
//console.log('pk_field.type_id', pk_field.type_id);
// type_id
//throw 'stop';
o_field.type_id = pk_field.type_id;
} else {
//console.log('this.table.name', this.table.name);
//console.log('field.name', field.name);
// the field for a foreign key would need to contain two values, if the pk contains two values.
// a field with two values seems fine.
// an array type of value.
console.log('previous throw exception here: Need to handle ' + o_table.pk.fields.length + ' fields');
//throw 'Need to handle ' + o_table.pk.fields.length + ' fields';
// Some kind of tuple data type for a value?
// Or just refer to part of the primary key?
// Need to look at the case.
}
//throw 'stop';
// raise an event saying that the def has changed, there is an fk reference.
// Then update the field record.
o_field.update_db_record();
//throw 'stop';
} else {
throw 'stop';
}
}
set_pk(pk) {
// could have an array of strings.
// in which case they are the field names of the pk.
// Needs to create the fields if they don't exist already.
if (pk instanceof Primary_Key) {
throw 'Dont use instance of Primary_Key here';
} else {
this.pk.set_def(pk);
}
//throw 'stop';
}
// Could be evented, so the DB can respond when a foreign key has been added / set
set_def(obj_record_def) {
// Not so sure that fields are being set in the model creation.
// Would add a table according to definition, and also make sure it's fields' types are in there properly.
// Possibly will result in creating a new pk incrementor for the table.
//console.log('set_def obj_record_def', obj_record_def);
//throw 'stop';
// is it an array, with 2 items?
// if each of those is an array, it's
// check if its an array, then deal with the items one by one
//
//console.trace();
//throw 'stop';
// It's an array, process the items in the array one by one.
// Deal with items that are specified differently in different ways.
// Some of them will have given types, as in [name, Type]
if (is_arr_of_strs(obj_record_def)) {
// Need to go through each line, adding the appropriate field.
// Worth adding indexes too.
// OO indexes would help...
// But maybe not yet.
// Makes a lot of sense with different ways that indexes can be specified.
// will need to put the system's storage into key value pairs.
// Use the table fields incrementor?
var kv_def = this.kv_def = [
[],
[]
];
each(obj_record_def, (item, i) => {
//first_char = item[0];
//console.log('first_char', first_char);
//console.log('item', item);
//throw 'stop';
let new_field = this.add_field(item);
// With autoincrement fields it should know the type.
// Should be one of the native types.
// Native types could be set up at the beginning anyway.
// Some fields have got built in indexes.
// Adding the field should also add the index where necessary.
// Add fields to the primary key...
//if (new_field.is_pk) {
//pk.add_field(new_field);
//}
// Already added to pk?
// Seems so, maybe its a side-effect elsewhere.
//this.add_field(new_field);
/*
if (new_field.is_pk) {
//kv_def[0].push(item);
pk.add_field(new_field);
} else {
value.add_field(new_field);
//kv_def[1].push(item);
}
*/
// Create kv_def object?
});
//throw 'stop';
} else if (is_arr_of_arrs(obj_record_def)) {
//console.log('arr of arrs');
//console.log('storage.length', storage.length);
// Could be key and values defined, without any indexes defined.
var kv_def;
var indexes_defs = [];
//console.log('obj_record_def.length', obj_record_def.length);
if (obj_record_def.length === 2) {
//kv_def = storage;
// The whole thing is a key value pair?
// Or it's key value pair, then an index definition.
// storage[0] is array of arrs
//console.log('is_arr_of_arrs(obj_record_def[0])', is_arr_of_arrs(obj_record_def[0]));
if (is_arr_of_arrs(obj_record_def[0])) {
kv_def = obj_record_def[0];
indexes_defs = obj_record_def[1];
} else {
kv_def = obj_record_def;
}
}
if (obj_record_def.length === 3) {
kv_def = storage[0];
indexes_defs = storage[1];
throw 'stop';
}
this.kv_def = kv_def;
//console.log('this.kv_def = kv_def', this.kv_def = kv_def);
if (tof(kv_def[0]) !== 'array') {
console.trace();
throw 'stop';
}
var f;
each(kv_def[0], (key_field, i) => {
//console.log('key_field', key_field);
// don't know the type
f = this.add_field(key_field, null, null, true);
// then add it to the pk
this.pk.add_field(f);
});
each(kv_def[1], (value_field, i) => {
//console.log('value_field', value_field);
this.add_field(value_field);
});
//console.log('indexes_defs', indexes_defs);
//console.log('kv_def', kv_def);
each(indexes_defs, (index_def) => {
//var new_index = new Index(index_def);
//console.log('index_def', index_def);
this.add_index(index_def);
//indexes.push(new_index);
});
//throw 'stop';
//console.log('kv_def', kv_def);
//console.log('kv_def[0]', kv_def[0]);
//console.log('kv_def[1]', kv_def[1]);
// Adding fields should maybe put the field into the fields table.
// check the typr of these...
//console.log('tof(kv_def[0])', tof(kv_def[0]));
}
}
// Seems like the field would be added to the record definition.
// add_field('type', XAS2_NUMBER, tbl_native_types);
// Second param is the type.
// Third param is is_pk
// Fourth param is a table it is a foreign key reference to.
/*
add_field_to_fields_table(field) {
var table_fields = this.table.db.map_tables['table fields'];
var field_record = field.
table_fields.add_record();
}
*/
// Possibility of creating a new pk incrementor in the table if it's a pk field.
// Generally would be better to set the field type.
add_field(field, id = -1, i_type = null, is_pk = false, fk_to_table) {
//console.log('add_field');
// make the id -1 for no id set here, use incrementor.
// want better parameter handling.
// maybe do that later.
//console.log('i_type', i_type);
// Make choosing the type optional.
// Less about enforcing types, more about being able to recognise an xas2 number (or more than one of them) has been given for a field which is a foreign key, that's the right type, put it in.
// Then if a string value is given, we can do a lookup. Would need to know something like the name is the identifier, or we are giving it the name value to look up.
var a = arguments;
//console.log('add_field arguments', arguments);
// Needs to determine if the field is a PK.
var item_field, field_name, first_char, field_incrementor;
var table = this.table;
// make use of own field incrementor.
// Should have an indexes incrementor too?
// Where the index gets an id?
// This is Field parsing.
// Could move this code to Field.ensure_is_field
// Depending on the name of the field, the type may be given.
// This is the point where we assign the type of the field if it is indicated in the name.
//let get_field_type_from_name
if (field instanceof Field) {
item_field = field;
} else {
//var id;
if (id === -1 || id === null) {
id = table.inc_fields.increment();
} else {
}
field_name = a[0];
// This does looks like field parsing, so could make a Field object from the 'field' object.
//console.log('field', field);
//console.log('tof field', tof(field));
// This is tricky, because the table is not fully defined.
// Its record def could be in the midst of being constructed.
//console.log('field_name', field_name);
// Or if we give the field a null type, it
item_field = new Field(field_name, table, id, i_type, is_pk, fk_to_table);
// raise change to record def
// change name: add_field
// Then could receive something back from the field object saying that it has an index?
// Saying that it is unique, then we set up the unique index.
}
//console.log('field_name', field_name);
//console.log('!!item_field', !!item_field);
//throw 'stop';
if (item_field) {
field_name = item_field.name;
//console.log('field_name', field_name);
//console.log('this.map_fields[field_name]', this.map_fields[field_name]);
if (!this.map_fields[field_name]) {
this.fields.push(item_field);
this.map_fields[field_name] = item_field;
if (item_field.is_pk) {
this.pk.add_field(item_field);
// Ensure the table has got a pk incrementor?
} else {
this.value.add_field(item_field);
}
this.raise('change', {
'name': 'add_field',
'value': item_field
});
}
//this.add_field_to_fields_table(item_field);
//throw 'stop';
// May be best to add it to the actual fields table?
}
return item_field;
}
// Maybe should not be done here, but within the DB as a separate process in getting the model as db rows.
// Seems easier not to keep these records of index structure, and calculate them when needed.
/*
add_index_to_index_table(index) {
var table_indexes = this.table.db.map_tables['table indexes'];
var field_record_def = index.to_arr_record_def();
console.log('field_record_def', JSON.stringify(field_record_def));
table_indexes.add_record(field_record_def);
}
*/
// Validation that records are in the correct format could be useful, however the encoding system is flexible so that it's not necessary in order to get the data stored.
// get map fields merges the fields from the pk with the value...?
// but would need to set it either within the primary key or value section.
// Possibly represent foreign keys as part of the field.
add_foreign_key(field_name, foreign_table) {
// foreign table is a string name or the table itself?
if (!(foreign_table instanceof Table)) {
foreign_table = this.table.db.map_tables[foreign_table];
if (!foreign_table) {
throw 'Table not found';
}
};
// link to the actual fk field?
// May keep a map of the foreign keys by field name / field number
// This way, when a record is added, its values can be looked up against the foreign key.
var fk = new Foreign_Key(field_name, foreign_table);
this.foreign_keys.push(fk);
this.map_foreign_keys[field_name] = fk;
this.raise('change', {
'name': 'add_foreign_key',
'value': item_field
});
// Then also set the field so that it's now labelled as a foreign key.
return fk;
}
get_arr_record_index_values(arr_record) {
//console.log('arr_record', arr_record);
var res = [];
each(this.indexes, index => {
//console.log('index', index);
var arr_rec_idx = index.arr_record_to_index_arr_data(arr_record);
res.push(arr_rec_idx);
});
// The index fields array daya
// won't have index prefix, index number,
//console.log('res', res);
//throw 'stop';
return res;
}
add_index(idx) {
//could give it more params.
var a = arguments;
a.l = a.length;
// Index should maybe use OO index objects, if it would help to structure the code better.
// Really want to use some autoincrementing records.
// As well as autoincrementing records, we need indexes that are specified to be unique.
// Making indexes OO would help them to have a 'unique' property specified.
// We can then use the Index class objects themselves to construct queries.
// Indexes will be defined in an OO way.
// Index ids could come about through an incrementor. Each table would have an index incrementor.
// Indexes need an id (within the table)
//console.log('add_index a', a);
//
var idx_2;
//console.log('idx instanceof Index', idx instanceof Index);
if (idx instanceof Index) {
idx_2 = idx;
} else {
var sig = get_a_sig(a);
if (a.l === 1) {
//console.log('this', this);
//console.log('this.table.db', this.table.db);
//console.log('this.table.name', this.table.name);
//console.log('this.table', this.table);
var id = this.table.inc_indexes.increment();
//console.log('** add_index id', id);
//console.log('idx', idx);
// Adds an index to a field?
// is it a field in an array?
//console.log('tof(idx)', tof(idx));
//console.log('idx[0]', idx[0]);
// Quite a hack here, solves Idx inside arr inside arr
if (tof(idx) === 'array' && idx.length === 1 && tof(idx[0]) === 'array' && idx[0][0].__type_name === 'index') {
idx = idx[0][0];
}
idx_2 = new Index(idx, this.table, id);
// index with idx spec, to this table with given id.
//console.log('idx_2', idx_2);
} else {
//console.log('add_index sig', sig);
if (sig === '[n,a]') {
//var table_id = a[0];
var field_id = a[0];
var arr_kv_index_record = a[1];
//console.log('field_id', field_id);
//console.log('arr_kv_index_record', arr_kv_index_record);
// Need to deal with these arr key values coming in as numbers, not string field names.
//var id = this.table.inc_indexes.increment();
idx_2 = new Index(arr_kv_index_record, this.table, field_id);
//console.log('idx_2', idx_2);
//throw 'stop';
}
if (sig === '[n,n,a]') {
throw 'stop';
//var table_id = a[0];
//var field_id = a[0];
//var arr_kv_index_record = a[1];
//var arr_kv_index_record = a[1];
//console.log('field_id', field_id);
//console.log('arr_kv_index_record', arr_kv_index_record);
// Need to deal with these arr key values coming in as numbers, not string field names.
var id = this.table.inc_indexes.increment();
idx_2 = new Index(arr_kv_index_record, this.table, id);
//console.log('idx_2', idx_2);
//throw 'stop';
}
// n,n,a
// don't autoincrement the table's incrementor.
//throw 'stop';
}
}
//ar index_key_def = idx[0];
//var index_value_field = idx[1]; // foreign key?
this.indexes.push(idx_2);
//console.log('JSON.stringify(idx_2.key_field_names)', JSON.stringify(idx_2.key_field_names));
this.map_indexes_by_field_names[JSON.stringify(idx_2.key_field_names)] = idx_2;
// if it's an unique index, then add the is_unique property to the field.
//this.add_index_to_index_table(idx_2);
// and maintain a map of indexes too?
// Worth making the record in the index table.
// Index links key and value pairs. Value should be a primary key.
// Sometime a primary key would be made up of a key and value.
// [table_id, index_id][index_type, kv_fields]
// will need to encode the various key and value fields as array of ids.
// we keep track of the field ids.
// Reconstructing the indexing system will help to create the records properly, creating the indexing records.
// Possibility of having that done on the server
// index.to_table_record_def
// Indexes are essentially a bunch of xas2 numbers that get stored.
//
// this.indexes.push([index_key_def, index_value_field]);
// idx
return idx_2;
}
get_field_names() {
var res = [];
each(this.fields, (field) => {
res.push(field.name);
});
return res;
}
get unique_fields() {
if (this._unique_fields) {
return this._unique_fields;
} else {
console.log('this.fields', this.fields);
return this.fields.filter(x => x.is_unique);
}
}
get kv_field_names() {
let res = [
[],
[]
];
each(this.pk.fields, pk_field => {
res[0].push(pk_field.name);
})
//console.log('this.pk.fields.length', this.pk.fields.length);
//console.log('this.value.fields.length', this.value.fields.length);
each(this.value.fields, value_field => {
res[1].push(value_field.name);
});
//console.log('kv_field_names res', res);
return res;
}
get kv_fields() {
return [this.pk.fields, this.value.fields];
}
// indexes with single field.
// single_field_index_field_names
get indexed_field_names() {
// just indexes with one field.
let map_indexed = {};
let res = [];
each(this.indexes, idx => {
//console.log('idx', idx);
if (idx.key_fields.length === 1) {
if (!map_indexed[idx.key_fields[0].name]) {
res.push(idx.key_fields[0].name);
}
map_indexed[idx.key_fields[0].name] = true;
}
});
return res;
}
get indexed_field_names_and_ids() {
// just indexes with one field.
let map_indexed = {};
let res = [];
each(this.indexes, idx => {
//console.log('idx', idx);
if (idx.key_fields.length === 1) {
if (!map_indexed[idx.key_fields[0].name]) {
res.push([idx.key_fields[0].name, idx.key_fields[0].id]);
}
map_indexed[idx.key_fields[0].name] = true;
}
})
return res;
}
}
module.exports = Record_Def;<file_sep>/_key-set.js
// Not sure about using this.
// Because of how it does the comparison.
// May make new classes for both key and record, binary backed.
class Key_Set extends Set {
constructor(spec) {
super();
}
}
// Takes keys, which have a binary representation.
module.exports = Key_Set;
if (require.main === module) {
let ks = new Key_Set();
} else {
}<file_sep>/model.js
var model = {
'Database': require('./database'),
'Table': require('./table'),
'Record': require('./record'),
'Incrementor': require('./incrementor'),
'Paging': require('./paging'),
'encoding': require('./encoding'),
// Buffer backed
'Command_Message': require('./command-message'),
'Command_Response_Message': require('./command-response-message'),
'Key_List': require('./buffer-backed/key-list'),
'Record_List': require('./buffer-backed/record-list'),
'Index_Record_Key': require('./buffer-backed/index-record-key'),
'BB_Row': require('./buffer-backed/row'),
'BB_Index_Row': require('./buffer-backed/index-row'),
'BB_Record': require('./buffer-backed/record'),
'BB_Key': require('./buffer-backed/key'),
'BB_Value': require('./buffer-backed/value'),
//
}
module.exports = model;<file_sep>/command-response-message.js
// Will be worth changing these names / values to make them less confusing.
// message_type_id
const BINARY_PAGING_NONE = 0;
const BINARY_PAGING_FLOW = 1;
const BINARY_PAGING_LAST = 2;
const RECORD_PAGING_NONE = 3;
const RECORD_PAGING_FLOW = 4;
const RECORD_PAGING_LAST = 5;
const RECORD_UNDEFINED = 6;
// A whole message type for undefined record?
const KEY_PAGING_NONE = 7;
const KEY_PAGING_FLOW = 8;
const KEY_PAGING_LAST = 9;
// Simplest error message.
// Could have a number, then could have encoded text.
//
const ERROR_MESSAGE = 10;
// -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
const lang = require('lang-mini');
const each = lang.each;
const get_a_sig = lang.get_a_sig;
const clone = lang.clone;
const tof = lang.tof;
const Evented_Class = lang.Evented_Class;
const get_truth_map_from_arr = lang.get_truth_map_from_arr;
const Binary_Encoding = require('binary-encoding');
const xas2 = require('xas2');
const Paging = require('./paging');
const database_encoding = require('./database');
const B_Record = require('./buffer-backed/record');
const B_Record_List = require('./buffer-backed/record-list');
// Will be used for storage, encoding and decoding.
// Will only / mainly store the data as the buffer, will read from and write to it.
// This reads the data from the whole page (in some cases.)
// May need to tell it what type of message it is after all.
//
// This is going to handle unpaging as well.
// (maybe)
// Functionality to split up the internal buffer according to the message_type_id
// Easy encoding of a Record as a result through the constructor.
class Command_Response_Message {
constructor(spec) {
let a = arguments,
l = a.length;
if (l === 1) {
let t_spec = tof(spec);
if (t_spec === 'buffer') {
this._buffer = spec;
}
} else {
if (l === 2) {
// a message id and a record.
if (typeof a[0] === 'number' && a[1] instanceof B_Record) {
// not paged, record encoding
// RECORD_PAGING_NONE
//console.log('a[0]', a[0]);
//console.log('a[1]', a[1]);
let record_buf = a[1].buffer;
//console.log('record_buf', record_buf);
this._buffer = Buffer.concat([xas2(a[0]).buffer, xas2(RECORD_PAGING_NONE).buffer, record_buf]);
} else {
// a1 instaceof array
if (typeof a[0] === 'number' && Array.isArray(a[1])) {
let all_are_records = true;
each(a[1], item => {
all_are_records = all_are_records && item instanceof B_Record
});
if (all_are_records) {
let rl = new B_Record_List(a[1]);
//
this._buffer = Buffer.concat([xas2(a[0]).buffer, xas2(RECORD_PAGING_NONE).buffer, rl.buffer]);
}
// if they are all records...?
// are they all records?
}
// use binary encoding on them
// Array of objects - will need to encode them.
// Array of b_records?
// Want a standard / built-in way of encoding B_Records with Binary_Encoding.
// array of B_Records
// Encodie them into B_Records list.
//console.log('Command_Response_Message spec a', a);
//console.trace();
//throw 'NYI';
}
}
// Assume no paging?
// May want to include a binary record in this?
// Specific record encoding makes sense here.
// (message_id, BINARY_PAGING_NONE, Binary_Encoding.encode_to_buffer(res));
//
if (l === 3) {
// and the page number?
// need to be able to include the page number in the response.
let [message_id, message_type_id, buf_inner] = a;
//console.log('Command_Response_Message buf_inner', buf_inner);
if (message_type_id === BINARY_PAGING_NONE) {
this._buffer = Buffer.concat([xas2(message_id).buffer, xas2(message_type_id).buffer, buf_inner]);
} else {
throw 'NYI';
}
}
if (l === 4) {
let [message_id, message_type_id, page_number, data] = a;
if (Array.isArray(data) && data[0] instanceof B_Record) {
let rl = new B_Record_List(data);
//this._buffer = rl.bu
this._buffer = Buffer.concat([xas2(message_id).buffer, xas2(message_type_id).buffer, xas2(page_number).buffer, rl.buffer]);
} else {
console.trace();
throw 'NYI';
}
}
}
// Want this to hold the whole message to avoid problems.
// Make sure that this contains the message id.
}
get id() {
// maybe don't know the message id.
// It seems worthwhile (maybe from now on) keeping the message id.
let [id, pos] = xas2.read(this._buffer, 0);
return id;
}
/*
get command_id() {
let [id, pos] = xas2.skip(this._buffer, 0);
[id, pos] = xas2.read(this._buffer, pos);
return id;
}
*/
// then (possibly) the paging/message type.
// call it message_type
// it always starts with message_type_id
get message_type_id() {
let [id, pos] = xas2.skip(this._buffer, 0);
//[id, pos] = xas2.skip(this._buffer, pos);
[id, pos] = xas2.read(this._buffer, pos);
return id;
}
// get paged bool
get paged() {
return (this.message_type_id === BINARY_PAGING_FLOW || this.message_type_id === BINARY_PAGING_LAST ||
this.message_type_id === RECORD_PAGING_FLOW || this.message_type_id === RECORD_PAGING_LAST ||
this.message_type_id === KEY_PAGING_FLOW || this.message_type_id === KEY_PAGING_LAST
)
}
get is_last() {
//console.log('this.message_type_id', this.message_type_id);
//console.log('this._buffer', this._buffer);
// Maybe not when there is no paging.
// Though technically it is the last.
return (this.message_type_id === BINARY_PAGING_NONE || this.message_type_id === BINARY_PAGING_LAST || this.message_type_id === RECORD_PAGING_NONE || this.message_type_id === RECORD_PAGING_LAST || this.message_type_id === KEY_PAGING_NONE || this.message_type_id === KEY_PAGING_LAST);
}
//get buffer
get buffer() {
// The whole thing as a buffer.
// The message id, the message encoding type (message_type_id) ...
return this._buffer;
//return this.value_buffer;
}
get value_buffer() {
let [message_type_id, pos] = xas2.skip(this._buffer, 0);
let page_number;
//[id, pos] = xas2.skip(this._buffer, pos);
[message_type_id, pos] = xas2.read(this._buffer, pos);
if (message_type_id === RECORD_PAGING_LAST) {
// break it into records.
// num records here?
[page_number, pos] = xas2.read(this._buffer, 0);
let buf2 = Buffer.alloc(buf_the_rest.length - pos);
buf_the_rest.copy(buf2, 0, pos);
return buf2;
} else {
throw 'NYI';
}
}
// Don't really just store the kv records in a single buffer.
// Always have used array kv pairs. Not sure that's most performant.
get kvp_buffers() {
let [message_type_id, pos] = xas2.skip(this._buffer, 0);
let page_number;
const remove_kp = false;
//[id, pos] = xas2.skip(this._buffer, pos);
[message_type_id, pos] = xas2.read(this._buffer, pos);
if (message_type_id === RECORD_PAGING_LAST) {
// break it into records.
// num records here?
[page_number, pos] = xas2.read(this._buffer, pos);
//console.log('page_number', page_number);
let buf2 = Buffer.alloc(this._buffer.length - pos);
this._buffer.copy(buf2, 0, pos);
return Binary_Encoding.split_length_item_encoded_buffer_to_kv(buf2, remove_kp);
} else if (message_type_id === RECORD_PAGING_LAST) {
// break it into records.
// num records here?
[page_number, pos] = xas2.read(this._buffer, pos);
//console.log('page_number', page_number);
let buf2 = Buffer.alloc(this._buffer.length - pos);
this._buffer.copy(buf2, 0, pos);
return Binary_Encoding.split_length_item_encoded_buffer_to_kv(buf2, remove_kp);
} else {
//console.log('message_type_id', message_type_id);
//throw 'NYI';
[page_number, pos] = xas2.read(this._buffer, pos);
//console.log('page_number', page_number);
let buf2 = Buffer.alloc(this._buffer.length - pos);
this._buffer.copy(buf2, 0, pos);
return Binary_Encoding.split_length_item_encoded_buffer_to_kv(buf2, remove_kp);
}
}
get page_number() {
let [message_type_id, pos] = xas2.skip(this._buffer, 0);
let page_number;
//[id, pos] = xas2.skip(this._buffer, pos);
[message_type_id, pos] = xas2.read(this._buffer, pos);
if (message_type_id === RECORD_PAGING_LAST) {
// break it into records.
// num records here?
[page_number, pos] = xas2.read(this._buffer, pos);
} else if (message_type_id === RECORD_PAGING_LAST) {
[page_number, pos] = xas2.read(this._buffer, pos);
} else {
[page_number, pos] = xas2.read(this._buffer, pos);
}
return page_number;
}
get unpaged() {
//let kvp_buffers = this.kvp_buffers;
// How about creating new Record objects...
return this.kvp_buffers.map(arr => new B_Record(arr));
}
// decoded?
// getting the value seems most important.
// value_buffers
get value() {
// message id
let message_type_id;
// response message does not yet contain the return message id?
//console.log('this._buffer', this._buffer);
let [message_id, pos] = xas2.read(this._buffer, 0);
//console.log('pos', pos);
//
let page_number;
const remove_kp = false;
//[id, pos] = xas2.skip(this._buffer, pos);
[message_type_id, pos] = xas2.read(this._buffer, pos);
console.log('message_type_id', message_type_id);
if (message_type_id === RECORD_PAGING_FLOW) {
// break it into records.
// num records here?
//console.log('RECORD_PAGING_FLOW');
[page_number, pos] = xas2.read(this._buffer, pos);
//console.log('page_number', page_number);
let buf2 = Buffer.alloc(this._buffer.length - pos);
this._buffer.copy(buf2, 0, pos);
return new B_Record_List(buf2).arr;
//return database_encoding.decode_model_rows(Binary_Encoding.split_length_item_encoded_buffer_to_kv(buf2), remove_kp);
} else if (message_type_id === RECORD_PAGING_LAST) {
// break it into records.
// num records here?
[page_number, pos] = xas2.read(this._buffer, pos);
//console.log('page_number', page_number);
let buf2 = Buffer.alloc(this._buffer.length - pos);
this._buffer.copy(buf2, 0, pos);
return new B_Record_List(buf2).arr;
} else if (message_type_id === RECORD_PAGING_NONE) {
// Just a single record?
//console.log('RECORD_PAGING_NONE');
// include buffer_xas2_prefix
//console.log('**a pos', pos);
let buf2 = Buffer.alloc(this._buffer.length - pos);
this._buffer.copy(buf2, 0, pos);
console.log('buf2', buf2);
let arr_records = new B_Record_List(buf2).arr;
//console.log('arr_records', arr_records);
console.log('arr_records.length', arr_records.length);
//console.log('arr_records[0].decoded', arr_records[0].decoded);
//console.log('arr_records[1].decoded', arr_records[1].decoded);
//console.log('arr_records[2].decoded', arr_records[2].decoded);
//console.log('arr_records[0].buffer', arr_records[0].buffer);
//console.log('arr_records[1].buffer', arr_records[1].buffer);
//console.log('arr_records[2].buffer', arr_records[2].buffer);
if (arr_records.length === 1) {
return arr_records[0];
} else {
return arr_records;
}
} else if (message_type_id === BINARY_PAGING_LAST) {
// BINARY_PAGING_LAST = 2
// read the page number
// decode it as binary.
// A way of saying that it's just a single value in the array?
[page_number, pos] = xas2.read(this._buffer, pos);
//console.log('page_number', page_number);
let buf2 = Buffer.alloc(this._buffer.length - pos);
this._buffer.copy(buf2, 0, pos);
// Decoding from position without buffer copy?
let res = Binary_Encoding.decode(buf2);
//console.log('res', res);
//console.log('this.singular_result', this.singular_result);
if (this.singular_result) {
return res[0];
} else {
return res;
}
} else {
throw 'NYI';
}
}
// This will also have
}
module.exports = Command_Response_Message;<file_sep>/buffer-backed/record.js
/*
17/05/2018 - Maybe this is a 'row' rather than a 'record'. A record encompasses index rows as well.
// A record could be composed of its index rows too.
// Removal of index rows when the record changes may be the best approach.
*/
const Row = require('./row');
const lang = require('lang-mini');
const def = lang.is_defined;
const each = lang.each;
let Binary_Encoding = require('binary-encoding');
let xas2 = require('xas2');
let Key = require('./key');
let Value = require('./value');
const database_encoding = require('../encoding');
const XAS2 = 0;
const STRING = 4;
const BUFFER = 9;
const ARRAY = 10;
// Standard data 0. just normal decoding.
// ~-~-~-~-~-~-~-~-~-~-~-~-~-
// Supplementary encoding
/*
const NONE = 0;
const RECORD = 1;
const KEY = 2;
const VALUE = 3;
*/
const RECORD = 200;
const KEY = 201;
const VALUE = 202;
const NONE = 210;
// Record_Row
// That should be what this is really called.
// A row is not necessarily a record row. A record itself has got index rows too sometimes.
class Record extends Row {
constructor(...args) {
super(...args);
//super.apply(this, a);
}
get table_id() {
let kp = this.kp;
if (kp === 0) {
throw 'NYI';
} else {
if (kp % 2 === 0) {
// even#
return (kp - 2) / 2;
// if its an incrementor, maybe we can know the table from it.
} else {
return (kp - 3) / 2;
}
}
}
get_index_rows(model_table) {
// Index row spec:
// index fields in order, then the key
// An Index_Row could be quite useful as a specific type of row.
// Record_List could produce these when asked.
// go through the indexes
//
// Different types of rows would have their own validation.
each(this.record_def.indexes.indexes, index => {
// then go through the index fields.
// use the field numbers / ids
// key fields and value fields.
// .kv_fields
// then use the ids.
// An Index_Row would help.
// get the keys and values for the index, then give them to the index row constructor.
// index row constructor will encode those double KPs in the constructor.
// index fields ids
let kv_field_ids = index.kv_field_ids;
// then read the field values into new kv arrays
let index_value_kv = [[], []];
// then go through each of them, putting them into the kv values.
// Extracting out the specific fields from the keys and the values.
// Database encoding get_values_at
// and give it a fwew indexes (in sorted order) to select from.
// it advances through those, skipping the ones that don't get selected from.
// There is buffer_select_from_buffer.
// it looks like it can be done.
// This will be a fairly high-performance way to get from the index kv defs and the b_record_row to the index b_rows
// [buf_selected_value_fields, total_value_fields_count] = Binary_Encoding.buffer_select_from_buffer(buf_value, arr_indexes, 0, 0, total_key_fields_count);
//
});
}
}
module.exports = Record;<file_sep>/table.js
const lang = require('lang-mini');
const tof = lang.tof;
const xas2 = require('xas2');
const each = lang.each;
const is_arr_of_strs = lang.is_arr_of_strs;
const is_arr_of_arrs = lang.is_arr_of_arrs;
const get_a_sig = lang.get_a_sig;
const Evented_Class = lang.Evented_Class;
const Incrementor = require('./incrementor');
const Record = require('./record');
const Field = require('./field');
const Index = require('./index-def');
const Foreign_Key = require('./foreign-key');
//var Database = require('./database');
const Binary_Encoding = require('binary-encoding');
const encode_to_buffer = Binary_Encoding.encode_to_buffer;
const Record_Def = require('./record-def');
const Table_Record_Collection = require('./table-record-collection');
//var Model_Database = require('./database');
const database_encoding = require('./encoding');
// Each table will have its lower level data in the DB, and means of interacting with it here.
const table_table_key_prefix = 2;
const special_characters = {
'!': true,
'+': true
}
let kp_to_range = buf_kp => {
let buf_0 = Buffer.alloc(1);
buf_0.writeUInt8(0, 0);
let buf_1 = Buffer.alloc(1);
buf_1.writeUInt8(255, 0);
return [Buffer.concat([buf_kp, buf_0]), Buffer.concat([buf_kp, buf_1])];
}
class Table extends Evented_Class {
constructor(spec) {
super(spec);
var a = arguments,
sig;
a.l = a.length;
sig = get_a_sig(a);
var id;
this.record_def = new Record_Def(null, this);
this.__type_name = 'table';
var that = this,
t, field_name, new_field;
// The table linking back to the db, so that it can get the global incrementor.
var name, db, storage;
var spec_record_def;
if (a.length === 1) {
var t_spec = tof(spec.name);
if (t_spec === 'string') this.name = spec.name;
}
if (a.length === 2) {
var ta0 = tof(a[0]);
//console.log('ta0', ta0);
if (ta0 === 'array') {
console.log('a[0]', a[0]);
console.trace();
throw 'stop';
}
name = a[0];
this.db = db = a[1];
}
if (a.length === 3) {
if (sig === '[s,?,n]') {
name = a[0];
this.db = db = a[1];
this.id = id = a[2];
} else {
name = a[0];
this.db = db = a[1];
spec_record_def = a[2];
}
}
if (sig === '[s,?,n,a]') {
name = a[0];
this.db = db = a[1];
this.id = id = a[2];
if (a[3].length === 3 || a[3].length === 4) {
if (tof(a[3][0]) === 'number') {
var inc_fields_id = a[3][0];
var inc_indexes_id = a[3][1];
var inc_foreign_keys_id = a[3][2];
this.inc_fields = db.incrementors[inc_fields_id];
this.inc_indexes = db.incrementors[inc_indexes_id];
this.inc_foreign_keys = db.incrementors[inc_foreign_keys_id];
} else {
this.inc_fields = a[3][0];
this.inc_indexes = a[3][1];
this.inc_foreign_keys = a[3][2];
}
}
if (a[3].length === 4) {
if (tof(a[3][0]) === 'number') {
var inc_pk_id = a[3][3];
this.pk_incrementor = db.incrementors[inc_pk_id];
} else {
this.pk_incrementor = a[3][3];
}
}
}
if (name) this.name = name;
if (db) {
if (!this.inc_fields) this.inc_fields = db.new_incrementor('inc_field_' + this.name);
if (!this.inc_indexes) this.inc_indexes = db.new_incrementor('inc_idx_' + this.name);
if (!this.inc_foreign_keys) this.inc_foreign_keys = db.new_incrementor('inc_fk_' + this.name);
if (typeof this.id === 'undefined') {
id = this.id = db.map_incrementors['table'].increment();
//console.log('id', id);
//console.log('name', name);
}
} else {
console.trace();
// Use a pseudo-incrementor?
// As in we don't add the incrementors to the database (yet)
// For the moment we just want to create the table object, not sure if we want it added to the db.
throw 'Create new incrementors not connected to db';
}
this.record_def.on('change', e_change => {
//console.log('record_def e_change', e_change);
// raise a change event on the Table
//console.log('pre raise table (this) change');
this.raise('change', e_change);
});
if (spec_record_def) {
//console.log('spec_record_def', spec_record_def);
this.record_def.set_def(spec_record_def);
//this.record_def = new Record_Def(spec_record_def, this);
} else {
//this.record_def = new Record_Def(null, this);
}
this.records = new Table_Record_Collection(this);
this.key_prefix = 2 + id * 2;
this.indexes_key_prefix = this.key_prefix + 1;
//throw 'stop';
var new_inc, is_key, first_char;
}
get inward_fk_tables() {
// search the db?
console.log('db.map_tables_incoming_fks', db.map_tables_incoming_fks)
console.log('db.map_tables_incoming_fks[this.id]', db.map_tables_incoming_fks[this.id]);
}
get buf_kp() {
if (this._buf_kp) {
return this._buf_kp;
} else {
this._buf_kp = xas2(this.key_prefix).buffer;
return this._buf_kp;
}
}
set_fk() {
return this.record_def.set_fk.apply(this.record_def, arguments);
}
set_pk() {
return this.record_def.set_pk.apply(this.record_def, arguments);
}
add_index() {
//this.record_def.add_index.apply(this.record_def, arguments);
return this.record_def.add_index.apply(this.record_def, arguments);
//return this.record_def.add_index.apply(this, arguments);
}
/*
add_field() {
var args = Array.prototype.slice.call(arguments);
args.push(this);
return this.record_def.add_field.apply(this.record_def, args);
}
*/
get unique_fields() {
console.trace();
throw 'NYI';
// Can't specify unique field constraints / indexes right now.
//return this.record_def.fields.filter(field => field.is_unique);
return this.record_def.unique_fields;
}
get incrementors() {
var res;
if (this.pk_incrementor) {
res = [this.inc_fields, this.inc_indexes, this.inc_foreign_keys, this.pk_incrementor];
} else {
res = [this.inc_fields, this.inc_indexes, this.inc_foreign_keys];
}
return res;
}
get own_incrementor_ids() {
var res;
if (this.pk_incrementor) {
res = [this.inc_fields.id, this.inc_indexes.id, this.inc_foreign_keys.id, this.pk_incrementor.id];
} else {
res = [this.inc_fields.id, this.inc_indexes.id, this.inc_foreign_keys.id];
}
return res;
}
add_field() {
//var args = Array.prototype.slice.call(arguments);
//args.push(this);
//console.log('table add_field ', arguments);
//console.log('table add_field ');
return this.record_def.add_field.apply(this.record_def, arguments);
}
add_record() {
return this.records.add_record.apply(this.records, arguments);
}
add_records() {
//console.log('this.records', this.records);
return this.records.add_records.apply(this.records, arguments);
}
ensure_records_no_overwrite() {
// ensure_records_no_overwrite
return this.records.ensure_records_no_overwrite.apply(this.records, arguments);
}
add_records_including_table_id_in_key() {
return this.records.add_records_including_table_id_in_key.apply(this.records, arguments);
}
// add_records_including_table_id_in_key
new_record() {
return this.records.new_record.apply(this.records, arguments);
}
new_records() {
return this.records.new_records.apply(this.records, arguments);
}
add_arr_table_records(at_records) {
return this.records.add_arr_table_records.apply(this.records, arguments);
}
get primary_key() {
return this.record_def.pk;
}
get pk() {
return this.record_def.pk;
}
get kp() {
return this.key_prefix;
}
get map_fields() {
return this.record_def.map_fields;
}
get fields() {
//return Array.concat();
return this.record_def.fields;
}
get indexes() {
//return Array.concat();
return this.record_def.indexes;
}
get field_names() {
var res = [];
each(this.fields, (field) => {
res.push(field.name);
});
return res;
}
get kv_field_names() {
return this.record_def.kv_field_names;
}
get kv_fields() {
return this.record_def.kv_field_names;
}
get indexed_field_names() {
return this.record_def.indexed_field_names;
}
get indexed_field_names_and_ids() {
return this.record_def.indexed_field_names_and_ids;
}
get map_indexes_by_field_names() {
return this.record_def.map_indexes_by_field_names;
}
get inward_fk_refs() {
let res = [];
each(this.db.tables, table => {
let has_ref = false;
each(table.fields, field => {
if (field.fk_to_table === this) {
has_ref = true;
}
});
//if (table.)
if (has_ref) {
res.push(table.name);
}
})
return res;
}
get structure_record() {
let incrementor_ids;
//console.log('!!this.pk_incrementor', !!this.pk_incrementor);
if (this.pk_incrementor) {
incrementor_ids = [this.inc_fields.id, this.inc_indexes.id, this.inc_foreign_keys.id, this.pk_incrementor.id];
} else {
incrementor_ids = [this.inc_fields.id, this.inc_indexes.id, this.inc_foreign_keys.id];
}
let res = [
[table_table_key_prefix, this.id],
[this.name, incrementor_ids]
]
return res;
}
get buf_structure_record() {
var prefix = this.key_prefix;
var res = Binary_Encoding.encode_pair_to_buffers(this.structure_record, prefix);
return res;
}
get outward_fk_refs() {
let res = [];
let map = {};
each(this.fields, field => {
if (field.fk_to_table && !map[field.fk_to_table.name]) {
map[field.fk_to_table.name] = true;
res.push(field.fk_to_table.name);
}
});
return res;
}
get fields_info() {
let table = this;
//console.log('table', table);
let fields = table.fields;
//console.log('fields', fields);
let res = [];
each(fields, field => {
let id = field.id;
let name = field.name;
let fk_to_table = field.fk_to_table;
let type_id = field.type_id;
let obj_res = {
'id': id,
'name': name,
'type_id': type_id
}
if (fk_to_table) {
let fk_pk = fk_to_table.pk;
let fk_pk_fields = fk_pk.fields;
let fk_to_fields = [];
each(fk_pk_fields, fk_to_field => {
fk_to_fields.push([fk_to_field.id, fk_to_field.name, fk_to_field.type_id]);
})
obj_res.fk_to = {
'table_name': fk_to_table.name,
'table_id': fk_to_table.id,
'fields': fk_to_fields
}
}
res.push(obj_res);
});
return res;
}
get_map_lookup(field_name) {
var i_field = this.map_fields[field_name].id;
//console.log('i_field', i_field);
var res = {};
this.records.each((record) => {
var arr_rec = record.to_flat_arr();
var field_value = arr_rec[i_field];
res[field_value] = record.key;
});
return res;
}
get_all_db_records() {
var arr_records = this.records.get_all_db_records.apply(this.records, arguments);
return arr_records;
}
get b_records() {
let res = [];
each(this.records.arr_records, record => {
each(record.to_b_records(), b_record => {
res.push(b_record);
})
})
return res;
//return this.records.arr_records.map(x => x.to_b_records());
}
get_all_db_records_bin() {
var buf_records = this.records.get_all_db_records_bin.apply(this.records, arguments);
return buf_records;
}
get_arr_data_index_records() {
// Get it for a specific record...
return this.records.get_arr_data_index_records.apply(this.records, arguments);
}
get_record_bb_index_records(record) {
let i = this.indexes,
l = i.length;
let res = new Array(l);
for (let c = 0; c < l; c++) {
res[c] = i[c].bb_record_to_bb_index_record(record);
}
//console.log('get_record_bb_index_records res', res);
//throw 'stop';
return res;
}
get_index_id_by_field_name(field_name) {
//console.log('field_name', field_name);
let field_id = this.map_fields[field_name];
return this.get_index_id_by_field_id(field_id);
}
get_index_id_by_field_id(field_id) {
let res;
each(this.indexes, (index, i, stop) => {
//console.log('index', index);
if (index.key_fields.length === 1) {
let kf0 = index.key_fields[0];
//console.log('Object.keys(kf0)', Object.keys(kf0));
if (kf0.id === field_id) {
res = index.id;
stop();
}
} else {
throw 'NYI';
}
})
return res;
}
buf_pk_query(arr_pk_part) {
var res = Binary_Encoding.encode_to_buffer(arr_pk_part, this.key_prefix);
return res;
}
get key_range() {
return kp_to_range(xas2(this.kp).buffer);
}
get buf_structure() {
// need the incrementor's records.
// table table record
// then the table field records
let all_buf_records = [];
let buf_inc_records = [];
let buf_kvs = [];
//let bufs_encoded_rows = [];
//throw 'stop';
// console.log('buf_inc_records', buf_inc_records);
let ttr = this.structure_record;
//console.log('ttr', ttr);
//throw 'stop';
// Model_Database.encode_arr_rows_to_buf
let rows = [];
rows.push(ttr);
//bufs_encoded_rows.push(encode_model_row(Binary_Encoding.encode_pair_to_buffers(ttr, 2)));
each(this.incrementors, incrementor => {
//let buf_inc = incrementor.get_all_db_records()[0];
//console.log('buf_inc', buf_inc);
let inc_row = incrementor.get_record();
rows.push(inc_row);
});
//throw 'stop';
each(this.fields, field => {
let kv_field = field.get_kv_record();
rows.push(kv_field);
//const kp_fields_table = 6;
//kv_field[0].unshift(kp_fields_table);
// Then encode these model rows, with that kp.
console.log('kv_field', kv_field);
//bufs_encoded_rows.push(encode_model_row(Binary_Encoding.encode_pair_to_buffers(kv_field, 6)));
})
each(rows, row => console.log('row', row));
let buf_encoded_rows = database_encoding.encode_rows_including_kps_to_buffer(rows);
//console.log('* buf_encoded_rows', buf_encoded_rows);
//throw 'stop';
return buf_encoded_rows;
}
validate_row(row) {
var res = true;
var r2 = [row[0].slice(1), row[1]];
if (r2[0].length + r2[1].length !== this.fields.length) {
res = false;
} else {
// check the fields of this to see if the types match.
}
return res;
}
// own table table record
// may be good to retain a link to it.
// maybe it does not exist...
get own_table_table_record() {
let tbl_tables = this.db.map_tables['tables'];
let own_index_record_retrieved = tbl_tables.records.indexes[0][JSON.stringify([this.name])];
return own_index_record_retrieved;
}
}
var p = Table.prototype;
p.get_obj_map = p.get_map_lookup;
module.exports = Table;<file_sep>/buffer-backed/key-list.js
//"use strict";
let Binary_Encoding = require('binary-encoding');
let xas2 = require('xas2');
let Record = require('./record');
let Key = require('./key');
// Could have option of encoding this as its own item type.
// However, by default will encode itself as an array of keys.
// It looks like a specific encoding item type for 'key' is necessary
// Would be a new type for Binary_Encoding. Wondering about plugins. Hardcoding is likely faster.
// Would be a somewhat significant change on the server. The code would become simpler. The protocol slightly longer, but not by much (1 byte).
// Would better enable to keys and values and records to better be constructed / deconstructed on the server.
// Its own encoding type may be useful, but would need an extended Binary_Encoding.
// Seems OK to say this is an array?
// Though decoding it as a Key_List makes the most sense.
const XAS2 = 0;
const STRING = 4;
const BUFFER = 9;
const ARRAY = 10;
class Key_List {
constructor() {
let a = arguments,
l = a.length;
// given an array
// array of key items, but then what type of key items are they
let arr_key_items;
// And key-list could take a record-list.
// Would extract or reference the keys.
if (l > 1) {
// have been given an array of key items
arr_key_items = a;
} else {
if (Array.isArray(a[0])) {
arr_key_items = a[0];
} else {
if (a[0] instanceof Buffer) {
this._buffer = a[0];
}
}
}
let process_key_item_to_buffer = (key_item) => {
//console.log('process_key_item_to_buffer', key_item);
// Will say each key is encoded as its own buffer.
// These key items should all decode to buffers, and then decode to individual rows.
// Iterate / read through the buffers. Could use generator. Then could decode each of them.
// then depending on the type of the key item.
// if it's an array
let res;
if (Array.isArray(key_item)) {
// depending on if it's an INCREMENTOR_KEY, RECORD_KEY, INDEX_KEY
// Inc key: var bufs_key = Buffer.concat([xas2(0).buffer, xas2(this.id).buffer, xas2(STRING).buffer, xas2(buf_name.length).buffer, buf_name]);
// 2 xas2s, then a string name
let kp0 = key_item[0];
// Specific type encoding for DB_KEY would be useful.
// Being able to get it back through Binary_Encoding or DB_Binary_Encoding, or DB_Encoding.
// Don't have this type within the db encoding system right now.
if (kp0 === 0) {
// incrementor
// Announce the type as an INCREMENTOR_KEY
// Individual buffers here.
//
// Don't say it's a string.
// The whole thing as a buffer.
// Each item should be encoded as a buffer first.
// Not making specific key encoding for the moment. Have what is available.
let buf_key = Buffer.concat([xas2(0).buffer, xas2(key_item[1]).buffer, xas2(STRING).buffer, xas2(key_item[2].length).buffer, Buffer.from(key_item[2])]);
let buf_item = Buffer.concat([xas2(BUFFER).buffer, xas2(buf_key.length).buffer, buf_key]);
res = buf_item;
} else {
if (kp0 % 2 === 0) {
// it's even, so a record key
// has one kp.
// want to be able to call encode_to_buffer, giving it the number of KPs to use.
//return Binary_Encoding.encode_to_buffer_use_kps(key_item, 1);
let buf_key = Binary_Encoding.encode_to_buffer_use_kps(key_item, 1);
let buf_item = Buffer.concat([xas2(BUFFER).buffer, xas2(buf_key.length).buffer, buf_key]);
res = buf_item;
//this._buffer = Binary_Encoding.encode_to_buffer()
} else {
// odd, so an index key
//return Binary_Encoding.encode_to_buffer_use_kps(key_item, 2);
let buf_key = Binary_Encoding.encode_to_buffer_use_kps(key_item, 2);
//console.log('[xas2(BUFFER).length, xas2(buf_key).length, buf_key]', [xas2(BUFFER), xas2(buf_key.length), buf_key]);
let buf_item = Buffer.concat([xas2(BUFFER).buffer, xas2(buf_key.length).buffer, buf_key]);
res = buf_item;
}
}
} else if (key_item instanceof Buffer) {
// Encode it as a buffer with length?
// Could check that it starts with BUFFER, then its length, and that it matches that length?
// If not, wrap that way.
// ensure that it is encoded with BUFFER TYPE, LENGTH, CONTENT
// Yes, encoding its length makes sense... but BE.ensure_length_encoded(key_item)
//let buf_item = Buffer.concat([xas2(BUFFER).buffer, xas2(buf_key.length).buffer, key_item]);
//res = buf_item;
throw 'NYI';
} else if (key_item instanceof Record) {
let buf_key = key_item.kvp_bufs[0];
res = Buffer.concat([xas2(BUFFER).buffer, xas2(buf_key.length).buffer, buf_key]);
} else {
throw 'NYI';
}
//console.log('key res', res);
return res;
}
let process_arr_key_items_to_buffer = (arr_key_items) => {
let item;
// then say its an array
let bufs_res = new Array(arr_key_items.length);
for (let c = 0, l = arr_key_items.length; c < l; c++) {
//item = arr_key_items[c];
bufs_res[c] = process_key_item_to_buffer(arr_key_items[c]);
}
return Buffer.concat(bufs_res);
}
if (arr_key_items) {
// but are they all buffers?
//console.log('arr_key_items', arr_key_items);
// what if they are of type 'Record'?
this._buffer = process_arr_key_items_to_buffer(arr_key_items);
}
}
get length() {
return Binary_Encoding.count_encoded_items(this._buffer);
}
get buffer() {
return this._buffer;
}
/*
[Symbol.iterator]() {
yield 1;
yield 2;
yield 3;
}
*/
* iterator() {
//for (let key in this.elements) {
// var value = this.elements[key];
// yield value;
//}
//yield 1;
//yield 2;
//yield 3;
// need to read through the items.
let pos = 0;
let complete = false;
let l = this._buffer.length;
let type_id, buf_l;
let b = this._buffer;
//console.log('l', l);
while (pos < l) {
//[type_]
//console.log('2) pos', pos);
[type_id, pos] = xas2.read(b, pos);
[buf_l, pos] = xas2.read(b, pos);
// then can copy alloc and copy to the new buf
let item_buf = Buffer.alloc(buf_l);
b.copy(item_buf, 0, pos, pos + buf_l);
//console.log('* item_buf', item_buf);
// Could yield a proper key instead.
let item = new Key(item_buf);
yield item;
pos = pos + buf_l;
//console.log('buf_l', buf_l);
//console.log('3) pos', pos);
}
//console.log('while complete');
}
[Symbol.iterator]() {
return this.iterator();
}
//
// Need to be able to iterate through these keys.
// buffer type code, buffer length, buffer itself
// Could get the buffer-backed keys as an array
// Iterate through them as an array
// Not getting the buffer-backed keys, getting the array items
get decoded() {
throw 'NYI';
// Need to process through the items encoded within this, one by one.
/*
var key_1st_value = Binary_Encoding.decode_first_value_xas2_from_buffer(buf_key);
var decoded_key;
//if (key_1st_value % 2 === 0) {
if (key_1st_value % 2 === 0 && key_1st_value > 0) {
// even, so it's a table, so 1 prefix
// Have incrementor work differently - just xas2s in keys and values.
console.log('buf_key', buf_key);
// Seems like a key has been put into the DB incorrectly in some cases.
// Checking for and correction various data errors / corruption makes sense.
if (remove_kp) {
decoded_key = Binary_Encoding.decode_buffer(buf_key, 1).slice(1);
} else {
decoded_key = Binary_Encoding.decode_buffer(buf_key, 1);
}
} else {
// odd, meaning indexes, so 2 prefixes. Includes the incrementors.
decoded_key = Binary_Encoding.decode_buffer(buf_key, 2);
}
return decoded_key;
*/
}
}
module.exports = Key_List;
|
3db62220cea3ca6b4e3c50aab3f7ac5e79512196
|
[
"JavaScript"
] | 21
|
JavaScript
|
metabench/nextleveldb-model
|
0faaa6d3319fbbc43e8076a013ff071b2b0952e9
|
988f5ccdcff31ebe8fc34e9bddb7413dba28fa4a
|
refs/heads/master
|
<file_sep>#include <iostream> // for std::cout
#include <utility> // for std::pair
#include <algorithm> // for std::for_each
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <vector>
// boost library
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
#include <boost/graph/adj_list_serialize.hpp>
#include <boost/graph/bipartite.hpp>
// arhivers
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
using namespace boost;
using namespace std;
//#define DEBUG 0;
void selSort(int s[], int index[], int length);// function to sort the relations to start vertice in a descending order
static const char* DAT_FILE_NAME = "graph.dat";// file to store the graph struct
int main(int,char*[])
{
// create a typedef for the Graph type
typedef adjacency_list<vecS, vecS, bidirectionalS> Graph;
typedef std::pair<int, int> Edge;
vector<Edge> edgeVec;
int num_vertices;// total number of vertices in party one and party two
char tracefilename[30];
int A, B;//for the nodes in party A and party B
double Eweight;
int max_id_A;
int max_id_B;
int max_weight;
int i=0, j=0;
int m=0, n=0;
FILE *fp;
char buf[100];
// *********************************************************************************
// read in the trace file **********************************************************
printf("Please input trace file name:");
scanf("%s",&tracefilename);
fp = fopen(tracefilename, "r");
if(fp==NULL){
printf("Could not open the trace file!\n");
exit(1);
}
printf("Has open the trace sucessfully!\n");
while(fgets(buf,100,fp)){
if(buf[0]=='%') continue;
if(i==0){
sscanf(buf,"%d%d%d", &max_id_A, &max_id_B, &max_weight);
i++;
}else{
sscanf(buf, "%d%d", &A, &B);
edgeVec.push_back(Edge(A,B+max_id_A));
}
}
fclose(fp);
num_vertices = max_id_A + max_id_B;// the total number of vertices in both the two parties
// declare and add the edges to the graph object
Graph g(edgeVec.begin(), edgeVec.end(), num_vertices);
// test if the Graph is a bipartite Graph
if(!is_bipartite(g)){
std::cerr << "The graph is not a bipartite graph !!" << std::endl;
return EXIT_FAILURE;
}
// graph serialization
// serialize and save graph
std::ofstream ofs(DAT_FILE_NAME, std::ios::out | std::ios::binary);
if(!ofs.is_open()) {
std::cerr << "Can't open " << DAT_FILE_NAME << " file." << std::endl;
return EXIT_FAILURE;
}
boost::archive::binary_oarchive oa(ofs);
oa << g;
ofs.close();
//reload the grap from the archive file
std::ifstream ifs(DAT_FILE_NAME, std::ios::in | std::ios::binary);
if(!ifs.is_open()) {
std::cerr << "Can't open " << DAT_FILE_NAME << " file." << std::endl;
return EXIT_FAILURE;
}
boost::archive::binary_iarchive ia(ifs);
Graph g1;
ia >> g1;
// Naive Graph collaborative Filtering
int v_start;// the start vertice for search
int k;// the k is the number of relational vertices need to be found
vector<int> final_result;// record the final result of collaborative filtering for visualization
vector<int>::iterator it;
std::cout << "Please input the start vertice as an interger number less than " << max_id_A << " :";
scanf("%d", &v_start);
while(v_start<0 || v_start > max_id_A){
cout << "Please input a valid start vertice less than " << max_id_A << " :";
scanf("%d", &v_start);
}
std::cout << "Please input the value of k as an interger number less than " << max_id_A << " :";
scanf("%d", &k);
while(k < 0 || k > max_id_A){
cout << "Please input a valid K less than " << max_id_A << " :";
scanf("%d", &k);
}
/* Define and initialize a two dimension dynamic matrix */
int **relation_count;// To record the relation between the vertices
int **index_vertice;
relation_count = (int **)malloc(sizeof(int *) * (k+1));
index_vertice = (int **)malloc(sizeof(int *) * (k+1));
for(i=0;i<k+1;i++){
relation_count[i] = (int *)malloc(sizeof(int) * max_id_A);
index_vertice[i] = (int *)malloc(sizeof(int) * max_id_A);
for(j=0;j<max_id_A;j++){
relation_count[i][j] = 0;
index_vertice[i][j] = j+1;
}
}
typedef graph_traits<Graph> GraphTraits;
GraphTraits::in_edge_iterator in_i, in_end; // in edges iterator for a vertice
GraphTraits::adjacency_iterator ai, ai_end;// adjacent vertices iterator
GraphTraits::edge_descriptor e; // edge
typedef graph_traits<Graph>::vertex_descriptor Vertex;
// get the property map for vertex indices
typedef property_map<Graph, vertex_index_t>::type IndexMap;
IndexMap index = get(vertex_index, g);
clock_t st = clock();
for(tie(ai,ai_end) = adjacent_vertices(v_start,g1); ai != ai_end; ++ai) // get all the vertices adjacent to the start vertice
for(tie(in_i,in_end) = in_edges(*ai,g1); in_i != in_end; ++in_i){ // if a vertice share a same nabor with the start vertice then the related counter increased by one
e = *in_i;
Vertex src = source(e, g1);
if(src != v_start)
relation_count[0][index[src]-1]++;
}
st = clock() - st;
printf("Latency of relation count for naive filtering is %.5f\n", (float)st/CLOCKS_PER_SEC);
st = clock();
selSort(relation_count[0], index_vertice[0], max_id_A);// sort the relation in a descending order by selection sort algorithm
st = clock() - st;
printf("Latency of sorting for naive filtering is %.5f\n", (float)st/CLOCKS_PER_SEC);
#if 0
std::cout << "the " << k << " relevant vertices to " << v_start << " is: " << std::endl;
for(i=0;i<k;i++)
std::cout << index_vertice[0][i] << " ";
std::cout << std::endl;
#endif
// ************************************************************************************************************************************
// Collaborative Filtering for Visualization
// **************************************************************************************************************************************
/* calculate the relation betweent the start vertice and the other vertices */
st = clock();
for(i=0,j=1;i<k;i++,j++)
for(tie(ai,ai_end) = adjacent_vertices(index_vertice[0][i],g1);ai != ai_end; ++ai)
for(tie(in_i,in_end) = in_edges(*ai,g1); in_i != in_end; ++in_i){
e = *in_i;
Vertex src = source(e, g1);
if(src != index_vertice[0][i])
relation_count[j][index[src]-1]++;
}
st = clock() - st;
printf("Latency of relation count for full filtering is %.5f\n", (float)st/CLOCKS_PER_SEC);
/* sort the relation in a descending order */
st = clock();
for(i=1;i<k+1;i++)
selSort(relation_count[i], index_vertice[i], max_id_A);
st = clock() - st;
printf("Latency of sorting for full filtering is %.5f\n", (float)st/CLOCKS_PER_SEC);
#if 0
cout << "The k related vertices to each vertice are:" << endl;
for(i=0;i<k+1;i++){
for(j=0;j<k;j++){
cout << index_vertice[i][j] << " ";
}
cout << endl;
}
#endif
/* copy the index_vertice to the final_result vector */
for(i=0;i<k+1;i++)
for(j=0;j<k;j++)
final_result.push_back(index_vertice[i][j]);
/* sort the final_result vector in a secending order */
st = clock();
sort(final_result.begin(), final_result.end());
/* remove the repeated vertices */
for(it=final_result.begin()+1, i=final_result.front();it!=final_result.end();){
if(i==*it)
final_result.erase(it);
else {
i = *it;
it++;
}
}
st = clock() - st;
printf("Latency of repeated vertices removal for full filtering is %.5f\n", (float)st/CLOCKS_PER_SEC);
#if 0
/* output the final result */
cout << "The final Collaborative Filtering result is:" << endl;
for(it=final_result.begin();it!=final_result.end();it++)
cout << *it << endl;
#endif
for(i=0;i<k+1;i++){
free((void *)relation_count[i]);
free((void *)index_vertice[i]);
}
return 0;
}
//Selection Sort
void selSort(int s[], int index[], int length)
{
int i, j, maxPos;
for(i=0;i<length-1;i++){
maxPos = i;
for(j=i+1;j<length;j++)
if(s[j] > s[maxPos])
maxPos = j;
if(i != maxPos){
swap(s[i],s[maxPos]);
swap(index[i],index[maxPos]);
}
}
}
|
6314a797e0fa84678d8580abab3f94c36e89bdcc
|
[
"C++"
] | 1
|
C++
|
dongwei-wang/bipartite_graph
|
a315f68272b78cbd4521525433b152979bf2f9d4
|
64a9e9f787f05a228d2e730da2315361ccd98b47
|
refs/heads/master
|
<repo_name>mjprude/dotfiles<file_sep>/scripts/macsetup.sh
#!/bin/bash
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew install git
git config --global user.name mjprude
git config --global user.email <EMAIL>
brew install vim
cd $HOME/dotfiles && ./install
brew install tmux
brew install reattach-to-user-namespace
brew install the_silver_searcher
defaults write com.apple.finder AppleShowAllFiles YES # show dotfiles in finder
defaults write NSGlobalDomain KeyRepeat -int 1 # increase key-repeat speed
|
a7fd8df6aa65ccdfbb4d65e2589b2c497913c9fc
|
[
"Shell"
] | 1
|
Shell
|
mjprude/dotfiles
|
5ca685806b0ffb4d6850d251f34b0a6b4f195827
|
836d74a90f9e70424f94c6a552c2b53cc5fc5a6a
|
refs/heads/master
|
<file_sep>// // "use strict";
// // console.log('Я учусь JS');
// // // 1.Ошибака! все открытое должно быть закрытым!
// // // 2.Ошибка! соблюдение регистра!
// // let boss = 'рефлежератор';
// // console.log(boss);
// // boss = 'холодильник';
// // console.log(boss);
// // let myAge = 35;
// // myAge = 18;
// // console.log(myAge);
// // let myNewAge = 33;
// // myAge = myNewAge;
// // console.log(myAge);
// // const userProfile = {
// // name: 'Александр',
// // age: 35,
// // massage: 'Живи, а работай в свободное время!'
// // }
// // console.log(userProfile);
// // userProfile.age = 18;
// // console.log(userProfile);
// // let user;
// // let userName = 'Петя';
// // user = userName;
// // console.log(user);
// let willYouMarryMe = true;
// if (willYouMarryMe) {
// console.log(':)');
// } else {
// console.log(':(');
// }
// let trueOrFalse = 58 < 18;
// if (trueOrFalse) {
// console.log('Верно');
// } else {
// console.log('Не верно');
// }
// let getInfinity = 49 / 0;
// console.log(getInfinity);
// console.log(typeof getInfinity);
// let userAge1 = 50;
// console.log(userAge1);
// console.log(typeof userAge1);
// userAge1 = String(userAge1);
// console.log(userAge1);
// console.log(typeof userAge1);
// let userTrue = true;
// console.log(userTrue);
// console.log(typeof userTrue);
// userTrue = String(userTrue);
// console.log(userTrue);
// console.log(typeof userTrue);
// let userAge = 30;
// let userInfo = `Работаю ${userAge}`;
// console.log(userInfo);
let x;
x = 5 + 9;
console.log('результат сложения', x);
x = 40 - 27;
console.log(`Результат вычетания ${x}`);
x = 34 % 5;
console.log(`Результат взятия остатка от деления ${x}`);
console.log(x);
let resultOne = "каждый охотник" + " " + "желает знать" + " " + "где сидит" + " фазан";
consconsole.log(resultOne);
let userCounter = 0;
let newUsers = 2 * ++userCounter;
console.log(newUsers);
let a = 1;
let b = 'sanya';
let c = 3;
const xyi = [a, b, c, 111111, 5, 'sdsdsd']
const obj = {
perviy: a,
name: b,
date: 2372637,
arr: xyi
}
console.log('!!!!!!!!!!!!!!!', obj)
[[1, 2, 3], [1, 2, 3], [5, 8, 4]]
const userInfo = {
name: Petia
}
|
b9831a2eb76195469ae4249c101bac5402d704e3
|
[
"JavaScript"
] | 1
|
JavaScript
|
effect717/project
|
e13d6beb0e52acd119211ac86e6bcd6ef2c95619
|
8e6e3fc7efda04dc7a6fceb5e2ed7f6fcbf6a50d
|
refs/heads/master
|
<file_sep>import unittest
import numpy as np
from deepchem.utils import hash_utils
def random_string(length, chars=None):
import string
if chars is None:
chars = list(string.ascii_letters + string.ascii_letters +
'()[]+-.=#@/\\')
return ''.join(np.random.choice(chars, length))
class TestHashUtils(unittest.TestCase):
def test_hash_ecfp(self):
for power in (2, 16, 64):
for _ in range(10):
string = random_string(10)
string_hash = hash_utils.hash_ecfp(string, power)
self.assertIsInstance(string_hash, int)
self.assertLess(string_hash, 2**power)
self.assertGreaterEqual(string_hash, 0)
def test_hash_ecfp_pair(self):
for power in (2, 16, 64):
for _ in range(10):
string1 = random_string(10)
string2 = random_string(10)
pair_hash = hash_utils.hash_ecfp_pair((string1, string2), power)
self.assertIsInstance(pair_hash, int)
self.assertLess(pair_hash, 2**power)
self.assertGreaterEqual(pair_hash, 0)
def test_vectorize(self):
size = 16
feature_dict = {0: "C", 1: "CC", 2: "CCC"}
hash_function = hash_utils.hash_ecfp
vector = hash_utils.vectorize(hash_function, feature_dict, size)
assert vector.shape == (size,)
assert np.count_nonzero(vector) == len(feature_dict)
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from hagcn_model import HAGCN
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney(
featurizer='GraphConv', split='index')
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
max_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_train, max_valid, max_test])
# Args
n_atom_feat = 75
batch_size = 128
k_max = 4
model = HAGCN(
max_nodes=max_atoms,
n_tasks=len(delaney_tasks),
num_node_features=n_atom_feat,
batch_size=batch_size,
k_max=k_max)
model.fit(dataset=train_dataset, nb_epoch=80)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep># DeepChem Docs Overview
This directory contains the DeepChem docs. DeepChem's docs aim to
serve as another resource to complement our collection of tutorials
and examples.
## Building the Documentation
To build the docs, you can use the `Makefile` that's been added to
this directory. To generate docs in html, run following commands.
```
$ pip install -r requirements.txt
$ make html
// clean build
$ make clean html
$ open build/html/index.html
```
If you want to confirm logs in more details,
```
$ make clean html SPHINXOPTS=-vvv
```
If you want to confirm the example tests,
```
$ make doctest_examples
```<file_sep>"""
Script that trains Chemception models on delaney dataset.
"""
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load Delaney dataset
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney(
featurizer='smiles2img', split='index', img_spec="engd")
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Get Metric
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
model = dc.models.ChemCeption(
img_spec="engd",
n_tasks=len(delaney_tasks),
model_dir="./model",
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=1)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Integration tests for multitask datasets.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import numpy as np
import unittest
import deepchem as dc
class TestMultitask(unittest.TestCase):
"""
Sanity tests for multitask data.
"""
def setUp(self):
super(TestMultitask, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_multitask_order(self):
"""Test that order of tasks in multitask datasets is preserved."""
input_file = os.path.join(self.current_dir,
"assets/multitask_example.csv")
tasks = [
"task0", "task1", "task2", "task3", "task4", "task5", "task6",
"task7", "task8", "task9", "task10", "task11", "task12", "task13",
"task14", "task15", "task16"
]
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
splitter = dc.splits.ScaffoldSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
assert np.array_equal(train_dataset.get_task_names(), tasks)
assert np.array_equal(test_dataset.get_task_names(), tasks)
def test_multitask_data(self):
"""Test that data associated with a tasks stays associated with it."""
tasks = ["task0", "task1"]
n_samples = 100
n_features = 3
n_tasks = len(tasks)
# Generate dummy dataset
ids = np.array(["C"] * n_samples, dtype=object)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids, tasks)
np.testing.assert_allclose(X, dataset.X)
np.testing.assert_allclose(y, dataset.y)
np.testing.assert_allclose(w, dataset.w)
<file_sep>"""
Testing singletask/multitask dataset merging
"""
import os
import deepchem as dc
import numpy as np
def test_merge():
"""Test that datasets can be merged."""
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
first_dataset = loader.create_dataset(dataset_file)
first_dataset.reshard(10)
second_dataset = loader.create_dataset(dataset_file)
merged_dataset = dc.data.DiskDataset.merge([first_dataset, second_dataset])
assert len(merged_dataset) == len(first_dataset) + len(second_dataset)
assert merged_dataset.get_shard_size() == 10
# Test merging of numpy datasets
X1, y1 = np.random.rand(5, 3), np.random.randn(5, 1)
first_dataset = dc.data.NumpyDataset(X1, y1)
X2, y2 = np.random.rand(5, 3), np.random.randn(5, 1)
second_dataset = dc.data.NumpyDataset(X2, y2)
merged_dataset = dc.data.NumpyDataset.merge([first_dataset, second_dataset])
assert len(merged_dataset) == len(first_dataset) + len(second_dataset)
def test_subset():
"""Tests that subsetting of datasets works."""
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file, shard_size=2)
shard_nums = [1, 2]
orig_ids = dataset.ids
_, _, _, ids_1 = dataset.get_shard(1)
_, _, _, ids_2 = dataset.get_shard(2)
subset = dataset.subset(shard_nums)
after_ids = dataset.ids
assert len(subset) == 4
assert sorted(subset.ids) == sorted(np.concatenate([ids_1, ids_2]))
assert list(orig_ids) == list(after_ids)
<file_sep>"""
MUV dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
import deepchem as dc
def load_permeability(featurizer='ECFP', split='index'):
"""Load membrain permeability datasets. Does not do train/test split"""
print("About to load membrain permeability dataset.")
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir,
"../../datasets/membrane_permeability.sdf")
# Featurize permeability dataset
print("About to featurize membrain permeability dataset.")
if featurizer == 'ECFP':
featurizer_func = dc.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer_func = dc.feat.ConvMolFeaturizer()
permeability_tasks = sorted(['LogP(RRCK)'])
loader = dc.data.SDFLoader(
tasks=permeability_tasks, clean_mols=True, featurizer=featurizer_func)
dataset = loader.featurize(dataset_file)
splitters = {
'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
return permeability_tasks, (train, valid, test), []
<file_sep>"""
Contains methods for generating a pdbbind dataset mapping
complexes (protein + ligand) to experimental binding measurement.
"""
from __future__ import print_function
import pickle
import os
import pandas as pd
from rdkit import Chem
from glob import glob
import re
from sklearn.externals import joblib
def extract_labels(pdbbind_label_file):
"""Extract labels from pdbbind label file."""
assert os.path.isfile(pdbbind_label_file)
labels = {}
with open(pdbbind_label_file) as f:
content = f.readlines()
for line in content:
if line[0] == "#":
continue
line = line.split()
# lines in the label file have format
# PDB-code Resolution Release-Year -logKd Kd reference ligand-name
#print line[0], line[3]
labels[line[0]] = line[3]
return labels
def construct_df(pdb_stem_directory, pdbbind_label_file, pdbbind_df_joblib):
"""
Takes as input a stem directory containing subdirectories with ligand
and protein pdb/mol2 files, a pdbbind_label_file containing binding
assay data for the co-crystallized ligand in each pdb file,
and a pdbbind_df_pkl to which will be saved a pandas DataFrame
where each row contains a pdb_id, smiles string, unique complex id,
ligand pdb as a list of strings per line in file, protein pdb as a list
of strings per line in file, ligand mol2 as a list of strings per line in
mol2 file, and a "label" containing the experimental measurement.
"""
labels = extract_labels(pdbbind_label_file)
df_rows = []
os.chdir(pdb_stem_directory)
pdb_directories = [pdb.replace('/', '') for pdb in glob('*/')]
for pdb_dir in pdb_directories:
print("About to extract ligand and protein input files")
pdb_id = os.path.basename(pdb_dir)
ligand_pdb = None
protein_pdb = None
for f in os.listdir(pdb_dir):
if re.search("_ligand_hyd.pdb$", f):
ligand_pdb = f
elif re.search("_protein_hyd.pdb$", f):
protein_pdb = f
elif re.search("_ligand.mol2$", f):
ligand_mol2 = f
print("Extracted Input Files:")
print (ligand_pdb, protein_pdb, ligand_mol2)
if not ligand_pdb or not protein_pdb or not ligand_mol2:
raise ValueError("Required files not present for %s" % pdb_dir)
ligand_pdb_path = os.path.join(pdb_dir, ligand_pdb)
protein_pdb_path = os.path.join(pdb_dir, protein_pdb)
ligand_mol2_path = os.path.join(pdb_dir, ligand_mol2)
with open(protein_pdb_path, "rb") as f:
protein_pdb_lines = f.readlines()
with open(ligand_pdb_path, "rb") as f:
ligand_pdb_lines = f.readlines()
try:
with open(ligand_mol2_path, "rb") as f:
ligand_mol2_lines = f.readlines()
except:
ligand_mol2_lines = []
print("About to compute ligand smiles string.")
ligand_mol = Chem.MolFromPDBFile(ligand_pdb_path)
if ligand_mol is None:
continue
smiles = Chem.MolToSmiles(ligand_mol)
complex_id = "%s%s" % (pdb_id, smiles)
label = labels[pdb_id]
df_rows.append([pdb_id, smiles, complex_id, protein_pdb_lines,
ligand_pdb_lines, ligand_mol2_lines, label])
pdbbind_df = pd.DataFrame(df_rows, columns=('pdb_id', 'smiles', 'complex_id',
'protein_pdb', 'ligand_pdb',
'ligand_mol2', 'label'))
joblib.dump(pdbbind_df, pdbbind_df_joblib)
<file_sep>import logging
import importlib.util
from typing import Optional, Union, List, Dict
logger = logging.getLogger(__name__)
def is_wandb_available():
return importlib.util.find_spec("wandb") is not None
class WandbLogger(object):
"""Weights & Biases Logger.
This is a logger class that can be passed into the initialization
of a KerasModel or TorchModel. It initializes and sets up a wandb logger which
will log the specified metrics calculated on the specific datasets
to the user's W&B dashboard.
If a WandbLogger is provided to the wandb_logger flag,
the metrics are logged to Weights & Biases, along with other information
such as epoch number, losses, sample counts, and model configuration data.
"""
def __init__(self,
name: Optional[str] = None,
entity: Optional[str] = None,
project: Optional[str] = "deepchem",
mode: Optional[str] = "online",
id: Optional[str] = None,
resume: Optional[Union[bool, str]] = None,
anonymous: Optional[str] = "never",
save_run_history: Optional[bool] = False,
**kwargs):
"""Creates a WandbLogger.
Parameters
----------
name: str
a display name for the run in the W&B dashboard
entity: str
an entity is a username or team name where you're sending the W&B run
project: str
the name of the project where you're sending the new W&B run
mode: str
W&B online or offline mode
id: str
a unique ID for this run, used for resuming
resume: bool or str
sets the resuming behavior
anonymous: str
controls anonymous data logging
save_run_history: bool
whether to save the run history to the logger at the end (for testing purposes)
"""
assert is_wandb_available(
), "WandbLogger requires wandb to be installed. Please run `pip install wandb --upgrade`"
import wandb
self._wandb = wandb
if mode == "offline":
logger.warning(
'Note: Model checkpoints will not be uploaded to W&B in offline mode.\n'
'Please set `mode="online"` if you need to log your model.')
self.save_run_history = save_run_history
# set wandb init arguments
self.wandb_init_params = dict(name=name,
project=project,
entity=entity,
mode=mode,
id=id,
resume=resume,
anonymous=anonymous)
self.wandb_init_params.update(**kwargs)
self.initialized = False
# Dataset ids are used to differentiate datasets seen by the logger
self.dataset_ids: List[Union[int, str]] = []
def setup(self):
"""Initializes a W&B run and create a run object.
If a pre-existing run is already initialized, use that instead.
"""
if self._wandb.run is None:
self.wandb_run = self._wandb.init(**self.wandb_init_params)
else:
self.wandb_run = self._wandb.run
self.initialized = True
def log_data(self,
data: Dict,
step: int,
dataset_id: Optional[Union[int, str]] = None):
"""Log data to W&B.
Parameters
----------
data: dict
the data to be logged to W&B
step: int
the step number at which the data is to be logged
dataset_id: int or str, optional (default None)
the unique id of the dataset to differentiate during logging.
Typically used when there are multiple ValidationCallbacks with
different datasets.
"""
if dataset_id is not None:
if dataset_id in self.dataset_ids:
for key in list(data.keys()):
idx = self.dataset_ids.index(dataset_id)
new_key = str(key) + "_(" + str(idx) + ")"
data[new_key] = data.pop(key)
else:
self.dataset_ids.append(dataset_id)
for key in list(data.keys()):
idx = self.dataset_ids.index(dataset_id)
new_key = str(key) + "_(" + str(idx) + ")"
data[new_key] = data.pop(key)
# log data
self.wandb_run.log(data, step=step)
def finish(self):
"""Finishes and closes the W&B run.
Save run history data as field if configured to do that.
"""
if self.save_run_history:
history = self.wandb_run.history._data
self.run_history = history
if self.wandb_run is not None:
self.wandb_run.finish()
def update_config(self, config_data):
"""Updates the W&B configuration.
Parameters
----------
config_data: dict
additional configuration data to add
"""
self.wandb_run.config.update(config_data)
<file_sep>"""
Featurizers for complex.
"""
# flake8: noqa
from deepchem.feat.complex_featurizers.rdkit_grid_featurizer import RdkitGridFeaturizer
from deepchem.feat.complex_featurizers.complex_atomic_coordinates import NeighborListAtomicCoordinates
from deepchem.feat.complex_featurizers.complex_atomic_coordinates import NeighborListComplexAtomicCoordinates
from deepchem.feat.complex_featurizers.complex_atomic_coordinates import AtomicConvFeaturizer
from deepchem.feat.complex_featurizers.complex_atomic_coordinates import ComplexNeighborListFragmentAtomicCoordinates
from deepchem.feat.complex_featurizers.contact_fingerprints import ContactCircularFingerprint
from deepchem.feat.complex_featurizers.contact_fingerprints import ContactCircularVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import ChargeVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import SaltBridgeVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import CationPiVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import PiStackVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import HydrogenBondVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import HydrogenBondCounter
from deepchem.feat.complex_featurizers.splif_fingerprints import SplifFingerprint
from deepchem.feat.complex_featurizers.splif_fingerprints import SplifVoxelizer
<file_sep>The Delaney dataset is a collection of 2874 aqueous solubility measurements from this paper:
Delaney, <NAME>. "ESOL: estimating aqueous solubility directly from molecular structure." Journal of chemical information and computer sciences 44.3 (2004): 1000-1005.
This dataset is commonly used since it's a small molecular
regression dataset that's convenient for benchmarking various
techniques. In this example, we train a series of different
DeepChem models against this task:
- `DAGModel`: In `delaney_DAG.py`. This model will train and
converge very slowly.
- `TextCNNModel`: In `delaney_textcnn.py`. This model featurizes compounds as SMILES strings directly and trains a convolutional network directly on the text.
- `WeaveModel`: In `delaney_weave.py`. This model trains a weave style convolution on Delaney.
- `ChemCeption`: In `delaney_chemception.py`. This model trains a variant of an Inception convolutional network on images generated from molecules.
- `MPNNModel`: In `delaney_MPNN.py`. This model trains a little slower, but is faster than `DAGModel`.
<file_sep># -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 22:31:24 2017
@author: <NAME>
"""
import torch
import time
import numpy as np
from deepchem.trans import undo_transforms
from deepchem.utils.save import log
from deepchem.models import Model
class TorchMultitaskModel(Model):
def __init__(self,
layer_sizes=[1000],
weight_init_stddevs=[.02],
bias_init_consts=[1.],
penalty=0.0,
penalty_type="l2",
dropouts=[0.5],
learning_rate=.001,
momentum=.9,
optimizer="adam",
batch_size=50,
pad_batches=False,
verbose=True,
seed=None,
**kwargs):
"""Constructs the computational graph.
This function constructs the computational graph for the model. It relies
subclassed methods (build/cost) to construct specific graphs.
Parameters
----------
layer_sizes: list
List of layer sizes.
weight_init_stddevs: list
List of standard deviations for weights (sampled from zero-mean
gaussians). One for each layer.
bias_init_consts: list
List of bias initializations. One for each layer.
penalty: float
Amount of penalty (l2 or l1 applied)
penalty_type: str
Either "l2" or "l1"
dropouts: list
List of dropout amounts. One for each layer.
learning_rate: float
Learning rate for model.
momentum: float
Momentum. Only applied if optimizer=="momentum"
optimizer: str
Type of optimizer applied.
batch_size: int
Size of minibatches for training.GraphConv
verbose: True
Perform logging.
seed: int
If not none, is used as random seed for tensorflow.
"""
# Save hyperparameters
self.layer_sizes = layer_sizes
self.weight_init_stddevs = weight_init_stddevs
self.bias_init_consts = bias_init_consts
self.penalty = penalty
self.penalty_type = penalty_type
self.dropouts = dropouts
self.learning_rate = learning_rate
self.momentum = momentum
self.optimizer = optimizer
self.batch_size = batch_size
self.pad_batches = pad_batches
self.verbose = verbose
self.seed = seed
self.build()
self.optimizer = self.get_training_op()
def add_training_cost(self, outputs, labels, weights):
weighted_costs = [] # weighted costs for each example
for task in range(self.n_tasks):
weighted_cost = self.cost(outputs[task], labels[:, task],
weights[:, task])
weighted_costs.append(weighted_cost)
loss = torch.cat(weighted_costs).sum()
# weight decay
if self.penalty > 0.0:
for variable in self.regularizaed_variables:
loss += self.penalty * 0.5 * variable.mul(variable).sum()
return loss
def get_training_op(self):
"""Get training op for applying gradients to variables.
Subclasses that need to do anything fancy with gradients should override
this method.
Returns:
An optimizer
"""
if self.optimizer == "adam":
train_op = torch.optim.Adam(self.trainables, lr=self.learning_rate)
elif self.optimizer == 'adagrad':
train_op = torch.optim.Adagrad(self.trainables, lr=self.learning_rate)
elif self.optimizer == 'rmsprop':
train_op = torch.optim.RMSprop(
self.trainables, lr=self.learning_rate, momentum=self.momentum)
elif self.optimizer == 'sgd':
train_op = torch.optim.SGD(self.trainables, lr=self.learning_rate)
else:
raise NotImplementedError('Unsupported optimizer %s' % self.optimizer)
return train_op
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
"""Fit the model.
Parameters
----------
dataset: dc.data.Dataset
Dataset object holding training data
nb_epoch: 10
Number of training epochs.
max_checkpoints_to_keep: int
Maximum number of checkpoints to keep; older checkpoints will be deleted.
log_every_N_batches: int
Report every N batches. Useful for training on very large datasets,
where epochs can take long time to finish.
checkpoint_interval: int
Frequency at which to write checkpoints, measured in epochs
Raises
------
AssertionError
If model is not in training mode.
"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
log("Training for %d epochs" % nb_epoch, self.verbose)
for epoch in range(nb_epoch):
avg_loss, n_batches = 0., 0
for ind, (X_b, y_b, w_b, ids_b) in enumerate(
# Turns out there are valid cases where we don't want pad-batches
# on by default.
#dataset.iterbatches(batch_size, pad_batches=True)):
dataset.iterbatches(self.batch_size, pad_batches=self.pad_batches)):
if ind % log_every_N_batches == 0:
log("On batch %d" % ind, self.verbose)
# Run training op.
self.optimizer.zero_grad()
X_b_input = torch.autograd.Variable(torch.cuda.FloatTensor(X_b))
y_b_input = torch.autograd.Variable(torch.cuda.FloatTensor(y_b))
w_b_input = torch.autograd.Variable(torch.cuda.FloatTensor(w_b))
outputs = self.forward(X_b_input, training=True)
loss = self.add_training_cost(outputs, y_b_input, w_b_input)
loss.backward()
self.optimizer.step()
avg_loss += loss
n_batches += 1
avg_loss = float(avg_loss.data.cpu().numpy()) / n_batches
log('Ending epoch %d: Average loss %g' % (epoch, avg_loss), self.verbose)
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
def predict(self, dataset, transformers=[]):
"""
Uses self to make predictions on provided Dataset object.
Returns:
y_pred: numpy ndarray of shape (n_samples,)
"""
y_preds = []
n_tasks = self.n_tasks
for (X_batch, _, _, ids_batch) in dataset.iterbatches(
self.batch_size, deterministic=True):
n_samples = len(X_batch)
y_pred_batch = self.predict_on_batch(X_batch)
assert y_pred_batch.shape == (n_samples, n_tasks)
y_pred_batch = undo_transforms(y_pred_batch, transformers)
y_preds.append(y_pred_batch)
y_pred = np.vstack(y_preds)
# The iterbatches does padding with zero-weight examples on the last batch.
# Remove padded examples.
n_samples = len(dataset)
y_pred = np.reshape(y_pred, (n_samples, n_tasks))
# Special case to handle singletasks.
if n_tasks == 1:
y_pred = np.reshape(y_pred, (n_samples,))
return y_pred
def predict_proba(self, dataset, transformers=[], n_classes=2):
y_preds = []
n_tasks = self.n_tasks
for (X_batch, y_batch, w_batch, ids_batch) in dataset.iterbatches(
self.batch_size, deterministic=True):
n_samples = len(X_batch)
y_pred_batch = self.predict_proba_on_batch(X_batch)
assert y_pred_batch.shape == (n_samples, n_tasks, n_classes)
y_pred_batch = undo_transforms(y_pred_batch, transformers)
y_preds.append(y_pred_batch)
y_pred = np.vstack(y_preds)
# The iterbatches does padding with zero-weight examples on the last batch.
# Remove padded examples.
n_samples = len(dataset)
y_pred = y_pred[:n_samples]
y_pred = np.reshape(y_pred, (n_samples, n_tasks, n_classes))
return y_pred
def build(self):
raise NotImplementedError('Must be overridden by concrete subclass')
def forward(self, X, training=False):
raise NotImplementedError('Must be overridden by concrete subclass')
def cost(self, logit, label, weight):
raise NotImplementedError('Must be overridden by concrete subclass')
def predict_on_batch(self, X_batch):
raise NotImplementedError('Must be overridden by concrete subclass')
def predict_proba_on_batch(self, X_batch):
raise NotImplementedError('Must be overridden by concrete subclass')
<file_sep>bace_user_specified_features = [
'MW', 'AlogP', 'HBA', 'HBD', 'RB', 'HeavyAtomCount', 'ChiralCenterCount',
'ChiralCenterCountAllPossible', 'RingCount', 'PSA', 'Estate', 'MR', 'Polar',
'sLi_Key', 'ssBe_Key', 'ssssBem_Key', 'sBH2_Key', 'ssBH_Key', 'sssB_Key',
'ssssBm_Key', 'sCH3_Key', 'dCH2_Key', 'ssCH2_Key', 'tCH_Key', 'dsCH_Key',
'aaCH_Key', 'sssCH_Key', 'ddC_Key', 'tsC_Key', 'dssC_Key', 'aasC_Key',
'aaaC_Key', 'ssssC_Key', 'sNH3_Key', 'sNH2_Key', 'ssNH2_Key', 'dNH_Key',
'ssNH_Key', 'aaNH_Key', 'tN_Key', 'sssNH_Key', 'dsN_Key', 'aaN_Key',
'sssN_Key', 'ddsN_Key', 'aasN_Key', 'ssssN_Key', 'daaN_Key', 'sOH_Key',
'dO_Key', 'ssO_Key', 'aaO_Key', 'aOm_Key', 'sOm_Key', 'sF_Key', 'sSiH3_Key',
'ssSiH2_Key', 'sssSiH_Key', 'ssssSi_Key', 'sPH2_Key', 'ssPH_Key',
'sssP_Key', 'dsssP_Key', 'ddsP_Key', 'sssssP_Key', 'sSH_Key', 'dS_Key',
'ssS_Key', 'aaS_Key', 'dssS_Key', 'ddssS_Key', 'ssssssS_Key', 'Sm_Key',
'sCl_Key', 'sGeH3_Key', 'ssGeH2_Key', 'sssGeH_Key', 'ssssGe_Key',
'sAsH2_Key', 'ssAsH_Key', 'sssAs_Key', 'dsssAs_Key', 'ddsAs_Key',
'sssssAs_Key', 'sSeH_Key', 'dSe_Key', 'ssSe_Key', 'aaSe_Key', 'dssSe_Key',
'ssssssSe_Key', 'ddssSe_Key', 'sBr_Key', 'sSnH3_Key', 'ssSnH2_Key',
'sssSnH_Key', 'ssssSn_Key', 'sI_Key', 'sPbH3_Key', 'ssPbH2_Key',
'sssPbH_Key', 'ssssPb_Key', 'sLi_Cnt', 'ssBe_Cnt', 'ssssBem_Cnt',
'sBH2_Cnt', 'ssBH_Cnt', 'sssB_Cnt', 'ssssBm_Cnt', 'sCH3_Cnt', 'dCH2_Cnt',
'ssCH2_Cnt', 'tCH_Cnt', 'dsCH_Cnt', 'aaCH_Cnt', 'sssCH_Cnt', 'ddC_Cnt',
'tsC_Cnt', 'dssC_Cnt', 'aasC_Cnt', 'aaaC_Cnt', 'ssssC_Cnt', 'sNH3_Cnt',
'sNH2_Cnt', 'ssNH2_Cnt', 'dNH_Cnt', 'ssNH_Cnt', 'aaNH_Cnt', 'tN_Cnt',
'sssNH_Cnt', 'dsN_Cnt', 'aaN_Cnt', 'sssN_Cnt', 'ddsN_Cnt', 'aasN_Cnt',
'ssssN_Cnt', 'daaN_Cnt', 'sOH_Cnt', 'dO_Cnt', 'ssO_Cnt', 'aaO_Cnt',
'aOm_Cnt', 'sOm_Cnt', 'sF_Cnt', 'sSiH3_Cnt', 'ssSiH2_Cnt', 'sssSiH_Cnt',
'ssssSi_Cnt', 'sPH2_Cnt', 'ssPH_Cnt', 'sssP_Cnt', 'dsssP_Cnt', 'ddsP_Cnt',
'sssssP_Cnt', 'sSH_Cnt', 'dS_Cnt', 'ssS_Cnt', 'aaS_Cnt', 'dssS_Cnt',
'ddssS_Cnt', 'ssssssS_Cnt', 'Sm_Cnt', 'sCl_Cnt', 'sGeH3_Cnt', 'ssGeH2_Cnt',
'sssGeH_Cnt', 'ssssGe_Cnt', 'sAsH2_Cnt', 'ssAsH_Cnt', 'sssAs_Cnt',
'dsssAs_Cnt', 'ddsAs_Cnt', 'sssssAs_Cnt', 'sSeH_Cnt', 'dSe_Cnt', 'ssSe_Cnt',
'aaSe_Cnt', 'dssSe_Cnt', 'ssssssSe_Cnt', 'ddssSe_Cnt', 'sBr_Cnt',
'sSnH3_Cnt', 'ssSnH2_Cnt', 'sssSnH_Cnt', 'ssssSn_Cnt', 'sI_Cnt',
'sPbH3_Cnt', 'ssPbH2_Cnt', 'sssPbH_Cnt', 'ssssPb_Cnt', 'sLi_Sum',
'ssBe_Sum', 'ssssBem_Sum', 'sBH2_Sum', 'ssBH_Sum', 'sssB_Sum', 'ssssBm_Sum',
'sCH3_Sum', 'dCH2_Sum', 'ssCH2_Sum', 'tCH_Sum', 'dsCH_Sum', 'aaCH_Sum',
'sssCH_Sum', 'ddC_Sum', 'tsC_Sum', 'dssC_Sum', 'aasC_Sum', 'aaaC_Sum',
'ssssC_Sum', 'sNH3_Sum', 'sNH2_Sum', 'ssNH2_Sum', 'dNH_Sum', 'ssNH_Sum',
'aaNH_Sum', 'tN_Sum', 'sssNH_Sum', 'dsN_Sum', 'aaN_Sum', 'sssN_Sum',
'ddsN_Sum', 'aasN_Sum', 'ssssN_Sum', 'daaN_Sum', 'sOH_Sum', 'dO_Sum',
'ssO_Sum', 'aaO_Sum', 'aOm_Sum', 'sOm_Sum', 'sF_Sum', 'sSiH3_Sum',
'ssSiH2_Sum', 'sssSiH_Sum', 'ssssSi_Sum', 'sPH2_Sum', 'ssPH_Sum',
'sssP_Sum', 'dsssP_Sum', 'ddsP_Sum', 'sssssP_Sum', 'sSH_Sum', 'dS_Sum',
'ssS_Sum', 'aaS_Sum', 'dssS_Sum', 'ddssS_Sum', 'ssssssS_Sum', 'Sm_Sum',
'sCl_Sum', 'sGeH3_Sum', 'ssGeH2_Sum', 'sssGeH_Sum', 'ssssGe_Sum',
'sAsH2_Sum', 'ssAsH_Sum', 'sssAs_Sum', 'dsssAs_Sum', 'ddsAs_Sum',
'sssssAs_Sum', 'sSeH_Sum', 'dSe_Sum', 'ssSe_Sum', 'aaSe_Sum', 'dssSe_Sum',
'ssssssSe_Sum', 'ddssSe_Sum', 'sBr_Sum', 'sSnH3_Sum', 'ssSnH2_Sum',
'sssSnH_Sum', 'ssssSn_Sum', 'sI_Sum', 'sPbH3_Sum', 'ssPbH2_Sum',
'sssPbH_Sum', 'ssssPb_Sum', 'sLi_Avg', 'ssBe_Avg', 'ssssBem_Avg',
'sBH2_Avg', 'ssBH_Avg', 'sssB_Avg', 'ssssBm_Avg', 'sCH3_Avg', 'dCH2_Avg',
'ssCH2_Avg', 'tCH_Avg', 'dsCH_Avg', 'aaCH_Avg', 'sssCH_Avg', 'ddC_Avg',
'tsC_Avg', 'dssC_Avg', 'aasC_Avg', 'aaaC_Avg', 'ssssC_Avg', 'sNH3_Avg',
'sNH2_Avg', 'ssNH2_Avg', 'dNH_Avg', 'ssNH_Avg', 'aaNH_Avg', 'tN_Avg',
'sssNH_Avg', 'dsN_Avg', 'aaN_Avg', 'sssN_Avg', 'ddsN_Avg', 'aasN_Avg',
'ssssN_Avg', 'daaN_Avg', 'sOH_Avg', 'dO_Avg', 'ssO_Avg', 'aaO_Avg',
'aOm_Avg', 'sOm_Avg', 'sF_Avg', 'sSiH3_Avg', 'ssSiH2_Avg', 'sssSiH_Avg',
'ssssSi_Avg', 'sPH2_Avg', 'ssPH_Avg', 'sssP_Avg', 'dsssP_Avg', 'ddsP_Avg',
'sssssP_Avg', 'sSH_Avg', 'dS_Avg', 'ssS_Avg', 'aaS_Avg', 'dssS_Avg',
'ddssS_Avg', 'ssssssS_Avg', 'Sm_Avg', 'sCl_Avg', 'sGeH3_Avg', 'ssGeH2_Avg',
'sssGeH_Avg', 'ssssGe_Avg', 'sAsH2_Avg', 'ssAsH_Avg', 'sssAs_Avg',
'dsssAs_Avg', 'ddsAs_Avg', 'sssssAs_Avg', 'sSeH_Avg', 'dSe_Avg', 'ssSe_Avg',
'aaSe_Avg', 'dssSe_Avg', 'ssssssSe_Avg', 'ddssSe_Avg', 'sBr_Avg',
'sSnH3_Avg', 'ssSnH2_Avg', 'sssSnH_Avg', 'ssssSn_Avg', 'sI_Avg',
'sPbH3_Avg', 'ssPbH2_Avg', 'sssPbH_Avg', 'ssssPb_Avg', 'First Zagreb (ZM1)',
'First Zagreb index by valence vertex degrees (ZM1V)',
'Second Zagreb (ZM2)',
'Second Zagreb index by valence vertex degrees (ZM2V)', 'Polarity (Pol)',
'Narumi Simple Topological (NST)', 'Narumi Harmonic Topological (NHT)',
'Narumi Geometric Topological (NGT)', 'Total structure connectivity (TSC)',
'Wiener (W)', 'Mean Wiener (MW)', 'Xu (Xu)', 'Quadratic (QIndex)',
'Radial centric (RC)', 'Mean Square Distance Balaban (MSDB)',
'Superpendentic (SP)', 'Harary (Har)', 'Log of product of row sums (LPRS)',
'Pogliani (Pog)', 'Schultz Molecular Topological (SMT)',
'Schultz Molecular Topological by valence vertex degrees (SMTV)',
'Mean Distance Degree Deviation (MDDD)', 'Ramification (Ram)',
'Gutman Molecular Topological (GMT)',
'Gutman MTI by valence vertex degrees (GMTV)',
'Average vertex distance degree (AVDD)', 'Unipolarity (UP)',
'Centralization (CENT)', 'Variation (VAR)',
'Molecular electrotopological variation (MEV)',
'Maximal electrotopological positive variation (MEPV)',
'Maximal electrotopological negative variation (MENV)',
'Eccentric connectivity (ECCc)', 'Eccentricity (ECC)',
'Average eccentricity (AECC)', 'Eccentric (DECC)',
'Valence connectivity index chi-0 (vX0)',
'Valence connectivity index chi-1 (vX1)',
'Valence connectivity index chi-2 (vX2)',
'Valence connectivity index chi-3 (vX3)',
'Valence connectivity index chi-4 (vX4)',
'Valence connectivity index chi-5 (vX5)',
'Average valence connectivity index chi-0 (AvX0)',
'Average valence connectivity index chi-1 (AvX1)',
'Average valence connectivity index chi-2 (AvX2)',
'Average valence connectivity index chi-3 (AvX3)',
'Average valence connectivity index chi-4 (AvX4)',
'Average valence connectivity index chi-5 (AvX5)', 'Quasi Wiener (QW)',
'First Mohar (FM)', 'Second Mohar (SM)', 'Spanning tree number (STN)',
'Kier benzene-likeliness index (KBLI)',
'Topological charge index of order 1 (TCI1)',
'Topological charge index of order 2 (TCI2)',
'Topological charge index of order 3 (TCI3)',
'Topological charge index of order 4 (TCI4)',
'Topological charge index of order 5 (TCI5)',
'Topological charge index of order 6 (TCI6)',
'Topological charge index of order 7 (TCI7)',
'Topological charge index of order 8 (TCI8)',
'Topological charge index of order 9 (TCI9)',
'Topological charge index of order 10 (TCI10)',
'Mean topological charge index of order 1 (MTCI1)',
'Mean topological charge index of order 2 (MTCI2)',
'Mean topological charge index of order 3 (MTCI3)',
'Mean topological charge index of order 4 (MTCI4)',
'Mean topological charge index of order 5 (MTCI5)',
'Mean topological charge index of order 6 (MTCI6)',
'Mean topological charge index of order 7 (MTCI7)',
'Mean topological charge index of order 8 (MTCI8)',
'Mean topological charge index of order 9 (MTCI9)',
'Mean topological charge index of order 10 (MTCI10)',
'Global topological charge (GTC)', 'Hyper-distance-path index (HDPI)',
'Reciprocal hyper-distance-path index (RHDPI)',
'Square reciprocal distance sum (SRDS)',
'Modified Randic connectivity (MRC)', 'Balaban centric (BC)',
'Lopping centric (LC)', 'Kier Hall electronegativity (KHE)',
'Sum of topological distances between N..N (STD(N N))',
'Sum of topological distances between N..O (STD(N O))',
'Sum of topological distances between N..S (STD(N S))',
'Sum of topological distances between N..P (STD(N P))',
'Sum of topological distances between N..F (STD(N F))',
'Sum of topological distances between N..Cl (STD(N Cl))',
'Sum of topological distances between N..Br (STD(N Br))',
'Sum of topological distances between N..I (STD(N I))',
'Sum of topological distances between O..O (STD(O O))',
'Sum of topological distances between O..S (STD(O S))',
'Sum of topological distances between O..P (STD(O P))',
'Sum of topological distances between O..F (STD(O F))',
'Sum of topological distances between O..Cl (STD(O Cl))',
'Sum of topological distances between O..Br (STD(O Br))',
'Sum of topological distances between O..I (STD(O I))',
'Sum of topological distances between S..S (STD(S S))',
'Sum of topological distances between S..P (STD(S P))',
'Sum of topological distances between S..F (STD(S F))',
'Sum of topological distances between S..Cl (STD(S Cl))',
'Sum of topological distances between S..Br (STD(S Br))',
'Sum of topological distances between S..I (STD(S I))',
'Sum of topological distances between P..P (STD(P P))',
'Sum of topological distances between P..F (STD(P F))',
'Sum of topological distances between P..Cl (STD(P Cl))',
'Sum of topological distances between P..Br (STD(P Br))',
'Sum of topological distances between P..I (STD(P I))',
'Sum of topological distances between F..F (STD(F F))',
'Sum of topological distances between F..Cl (STD(F Cl))',
'Sum of topological distances between F..Br (STD(F Br))',
'Sum of topological distances between F..I (STD(F I))',
'Sum of topological distances between Cl..Cl (STD(Cl Cl))',
'Sum of topological distances between Cl..Br (STD(Cl Br))',
'Sum of topological distances between Cl..I (STD(Cl I))',
'Sum of topological distances between Br..Br (STD(Br Br))',
'Sum of topological distances between Br..I (STD(Br I))',
'Sum of topological distances between I..I (STD(I I))',
'Wiener-type index from Z weighted distance matrix - Barysz matrix (WhetZ)',
'Wiener-type index from electronegativity weighted distance matrix (Whete)',
'Wiener-type index from mass weighted distance matrix (Whetm)',
'Wiener-type index from van der waals weighted distance matrix (Whetv)',
'Wiener-type index from polarizability weighted distance matrix (Whetp)',
'Balaban-type index from Z weighted distance matrix - Barysz matrix (JhetZ)',
'Balaban-type index from electronegativity weighted distance matrix (Jhete)',
'Balaban-type index from mass weighted distance matrix (Jhetm)',
'Balaban-type index from van der waals weighted distance matrix (Jhetv)',
'Balaban-type index from polarizability weighted distance matrix (Jhetp)',
'Topological diameter (TD)', 'Topological radius (TR)',
'Petitjean 2D shape (PJ2DS)', 'Balaban distance connectivity index (J)',
'Solvation connectivity index chi-0 (SCIX0)',
'Solvation connectivity index chi-1 (SCIX1)',
'Solvation connectivity index chi-2 (SCIX2)',
'Solvation connectivity index chi-3 (SCIX3)',
'Solvation connectivity index chi-4 (SCIX4)',
'Solvation connectivity index chi-5 (SCIX5)',
'Connectivity index chi-0 (CIX0)',
'Connectivity chi-1 [Randic connectivity] (CIX1)',
'Connectivity index chi-2 (CIX2)', 'Connectivity index chi-3 (CIX3)',
'Connectivity index chi-4 (CIX4)', 'Connectivity index chi-5 (CIX5)',
'Average connectivity index chi-0 (ACIX0)',
'Average connectivity index chi-1 (ACIX1)',
'Average connectivity index chi-2 (ACIX2)',
'Average connectivity index chi-3 (ACIX3)',
'Average connectivity index chi-4 (ACIX4)',
'Average connectivity index chi-5 (ACIX5)',
'reciprocal distance Randic-type index (RDR)',
'reciprocal distance square Randic-type index (RDSR)',
'1-path Kier alpha-modified shape index (KAMS1)',
'2-path Kier alpha-modified shape index (KAMS2)',
'3-path Kier alpha-modified shape index (KAMS3)', 'Kier flexibility (KF)',
'path/walk 2 - Randic shape index (RSIpw2)',
'path/walk 3 - Randic shape index (RSIpw3)',
'path/walk 4 - Randic shape index (RSIpw4)',
'path/walk 5 - Randic shape index (RSIpw5)',
'E-state topological parameter (ETP)', 'Ring Count 3 (RNGCNT3)',
'Ring Count 4 (RNGCNT4)', 'Ring Count 5 (RNGCNT5)',
'Ring Count 6 (RNGCNT6)', 'Ring Count 7 (RNGCNT7)',
'Ring Count 8 (RNGCNT8)', 'Ring Count 9 (RNGCNT9)',
'Ring Count 10 (RNGCNT10)', 'Ring Count 11 (RNGCNT11)',
'Ring Count 12 (RNGCNT12)', 'Ring Count 13 (RNGCNT13)',
'Ring Count 14 (RNGCNT14)', 'Ring Count 15 (RNGCNT15)',
'Ring Count 16 (RNGCNT16)', 'Ring Count 17 (RNGCNT17)',
'Ring Count 18 (RNGCNT18)', 'Ring Count 19 (RNGCNT19)',
'Ring Count 20 (RNGCNT20)', 'Atom Count (ATMCNT)', 'Bond Count (BNDCNT)',
'Atoms in Ring System (ATMRNGCNT)', 'Bonds in Ring System (BNDRNGCNT)',
'Cyclomatic number (CYCLONUM)', 'Number of ring systems (NRS)',
'Normalized number of ring systems (NNRS)', 'Ring Fusion degree (RFD)',
'Ring perimeter (RNGPERM)', 'Ring bridge count (RNGBDGE)',
'Molecule cyclized degree (MCD)', 'Ring Fusion density (RFDELTA)',
'Ring complexity index (RCI)', 'Van der Waals surface area (VSA)',
'MR1 (MR1)', 'MR2 (MR2)', 'MR3 (MR3)', 'MR4 (MR4)', 'MR5 (MR5)',
'MR6 (MR6)', 'MR7 (MR7)', 'MR8 (MR8)', 'ALOGP1 (ALOGP1)', 'ALOGP2 (ALOGP2)',
'ALOGP3 (ALOGP3)', 'ALOGP4 (ALOGP4)', 'ALOGP5 (ALOGP5)', 'ALOGP6 (ALOGP6)',
'ALOGP7 (ALOGP7)', 'ALOGP8 (ALOGP8)', 'ALOGP9 (ALOGP9)',
'ALOGP10 (ALOGP10)', 'PEOE1 (PEOE1)', 'PEOE2 (PEOE2)', 'PEOE3 (PEOE3)',
'PEOE4 (PEOE4)', 'PEOE5 (PEOE5)', 'PEOE6 (PEOE6)', 'PEOE7 (PEOE7)',
'PEOE8 (PEOE8)', 'PEOE9 (PEOE9)', 'PEOE10 (PEOE10)', 'PEOE11 (PEOE11)',
'PEOE12 (PEOE12)', 'PEOE13 (PEOE13)', 'PEOE14 (PEOE14)'
]
<file_sep>import os
import deepchem as dc
def test_sdf_load():
current_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.CircularFingerprint(size=16)
loader = dc.data.SDFLoader(["LogP(RRCK)"],
featurizer=featurizer,
sanitize=True)
dataset = loader.create_dataset(
os.path.join(current_dir, "membrane_permeability.sdf"))
assert len(dataset) == 2
def test_singleton_sdf_load():
current_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.CircularFingerprint(size=16)
loader = dc.data.SDFLoader(["LogP(RRCK)"],
featurizer=featurizer,
sanitize=True)
dataset = loader.create_dataset(os.path.join(current_dir, "singleton.sdf"))
assert len(dataset) == 1
def test_singleton_sdf_zip_load():
current_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.CircularFingerprint(size=16)
loader = dc.data.SDFLoader(["LogP(RRCK)"],
featurizer=featurizer,
sanitize=True)
dataset = loader.create_dataset(os.path.join(current_dir, "singleton.zip"))
assert len(dataset) == 1
def test_sharded_sdf_load():
current_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.CircularFingerprint(size=16)
loader = dc.data.SDFLoader(["LogP(RRCK)"],
featurizer=featurizer,
sanitize=True)
dataset = loader.create_dataset(os.path.join(current_dir,
"membrane_permeability.sdf"),
shard_size=1)
assert dataset.get_number_shards() == 2
assert len(dataset) == 2
def test_sharded_multi_file_sdf_load():
current_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.CircularFingerprint(size=16)
loader = dc.data.SDFLoader(["LogP(RRCK)"],
featurizer=featurizer,
sanitize=True)
input_files = [
os.path.join(current_dir, "membrane_permeability.sdf"),
os.path.join(current_dir, "singleton.sdf")
]
dataset = loader.create_dataset(input_files, shard_size=1)
assert dataset.get_number_shards() == 3
assert len(dataset) == 3
def test_sharded_multi_file_sdf_zip_load():
current_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.CircularFingerprint(size=16)
loader = dc.data.SDFLoader(["LogP(RRCK)"],
featurizer=featurizer,
sanitize=True)
dataset = loader.create_dataset(os.path.join(current_dir,
"multiple_sdf.zip"),
shard_size=1)
assert dataset.get_number_shards() == 3
assert len(dataset) == 3
def test_sdf_load_with_csv():
"""Test a case where SDF labels are in associated csv file"""
current_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.CircularFingerprint(size=16)
loader = dc.data.SDFLoader(["atomization_energy"],
featurizer=featurizer,
sanitize=True)
dataset = loader.create_dataset(os.path.join(current_dir, "water.sdf"),
shard_size=1)
assert len(dataset) == 10
assert dataset.get_number_shards() == 10
assert dataset.get_task_names() == ["atomization_energy"]
<file_sep>"""
Tests for weave featurizer.
"""
import numpy as np
import deepchem as dc
from deepchem.feat.graph_features import max_pair_distance_pairs
def test_max_pair_distance_pairs():
"""Test that max pair distance pairs are computed properly."""
from rdkit import Chem
# Carbon
mol = Chem.MolFromSmiles('C')
# Test distance 1
pair_edges = max_pair_distance_pairs(mol, 1)
assert pair_edges.shape == (2, 1)
assert np.all(pair_edges.flatten() == np.array([0, 0]))
# Test distance 2
pair_edges = max_pair_distance_pairs(mol, 2)
assert pair_edges.shape == (2, 1)
assert np.all(pair_edges.flatten() == np.array([0, 0]))
# Test alkane
mol = Chem.MolFromSmiles('CCC')
# Test distance 1
pair_edges = max_pair_distance_pairs(mol, 1)
# 3 self connections and 2 bonds which are both counted twice because of
# symmetry for 7 total
assert pair_edges.shape == (2, 7)
# Test distance 2
pair_edges = max_pair_distance_pairs(mol, 2)
# Everything is connected at this distance
assert pair_edges.shape == (2, 9)
def test_max_pair_distance_infinity():
"""Test that max pair distance pairs are computed properly with infinity distance."""
from rdkit import Chem
# Test alkane
mol = Chem.MolFromSmiles('CCC')
# Test distance infinity
pair_edges = max_pair_distance_pairs(mol, None)
# Everything is connected at this distance
assert pair_edges.shape == (2, 9)
# Test pentane
mol = Chem.MolFromSmiles('CCCCC')
# Test distance infinity
pair_edges = max_pair_distance_pairs(mol, None)
# Everything is connected at this distance
assert pair_edges.shape == (2, 25)
def test_weave_single_carbon():
"""Test that single carbon atom is featurized properly."""
mols = ['C']
featurizer = dc.feat.WeaveFeaturizer()
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# Only one carbon
assert mol.get_num_atoms() == 1
# Test feature sizes
assert mol.get_num_features() == 75
# No bonds, so only 1 pair feature (for the self interaction)
assert mol.get_pair_features().shape == (1 * 1, 14)
def test_chiral_weave():
"""Test weave features on a molecule with chiral structure."""
mols = [r"F\C=C\F"]
featurizer = dc.feat.WeaveFeaturizer(use_chirality=True)
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# Only 4 atoms
assert mol.get_num_atoms() == 4
# Test feature sizes for chirality
assert mol.get_num_features() == 78
def test_weave_alkane():
"""Test on simple alkane"""
mols = ['CCC']
featurizer = dc.feat.WeaveFeaturizer()
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# 3 carbonds in alkane
assert mol.get_num_atoms() == 3
# Test feature sizes
assert mol.get_num_features() == 75
# Should be a 3x3 interaction grid
assert mol.get_pair_features().shape == (3 * 3, 14)
def test_weave_alkane_max_pairs():
"""Test on simple alkane with max pairs distance cutoff"""
mols = ['CCC']
featurizer = dc.feat.WeaveFeaturizer(max_pair_distance=1)
# mol_list = featurizer.featurize(mols)
# mol = mol_list[0]
from rdkit import Chem
mol = featurizer._featurize(Chem.MolFromSmiles(mols[0]))
# 3 carbonds in alkane
assert mol.get_num_atoms() == 3
# Test feature sizes
assert mol.get_num_features() == 75
# Should be a 7x14 interaction grid since there are 7 pairs within graph
# distance 1 (3 self interactions plus 2 bonds counted twice because of
# symmetry)
assert mol.get_pair_features().shape == (7, 14)
def test_carbon_nitrogen():
"""Test on carbon nitrogen molecule"""
# Note there is a central nitrogen of degree 4, with 4 carbons
# of degree 1 (connected only to central nitrogen).
mols = ['C[N+](C)(C)C']
# import rdkit.Chem
# mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
mol = mols[0]
# 5 atoms in compound
assert mol.get_num_atoms() == 5
# Test feature sizes
assert mol.get_num_features() == 75
# Should be a 3x3 interaction grid
assert mol.get_pair_features().shape == (5 * 5, 14)
<file_sep>import numpy as np
import time
import logging
from collections.abc import Sequence as SequenceCollection
from deepchem.data import Dataset, NumpyDataset
from deepchem.metrics import Metric
from deepchem.models.models import Model
from deepchem.models.losses import Loss
from deepchem.models.optimizers import Optimizer, Adam
from deepchem.utils.evaluate import GeneratorEvaluator
from deepchem.trans.transformers import Transformer, undo_transforms
from typing import Any, Callable, Iterable, List, Optional, Tuple, Union, Sequence
from deepchem.utils.typing import LossFn, OneOrMany
# JAX depend
import jax.numpy as jnp
import jax
import haiku as hk
import optax
import warnings
logger = logging.getLogger(__name__)
def create_default_eval_fn(forward_fn, params):
"""
Calls the function to evaluate the model
"""
@jax.jit
def eval_model(batch, rng=None):
predict = forward_fn(params, rng, batch)
return predict
return eval_model
def create_default_update_fn(optimizer, model_loss):
"""
This function calls the update function, to implement the backpropogation
"""
@jax.jit
def update(params, opt_state, batch, target, weights,
rng) -> Tuple[hk.Params, optax.OptState, jnp.ndarray]:
batch_loss, grads = jax.value_and_grad(model_loss)(params, batch,
target, weights, rng)
updates, opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state, batch_loss
return update
def create_default_gradient_fn(forward_fn, loss_outputs, loss_fn):
"""
This function calls the gradient function, to implement the backpropogation
"""
@jax.jit
def model_loss(params, batch, target, weights, rng):
predict = forward_fn(params, rng, batch)
if loss_outputs is not None:
predict = [predict[i] for i in loss_outputs]
return loss_fn(predict, target, weights)
return model_loss
class JaxModel(Model):
"""This is a DeepChem model implemented by a Jax Model
Here is a simple example of that uses JaxModel to train a
Haiku (JAX Neural Network Library) based model on deepchem
dataset.
>>>
>> def forward_model(x):
>> net = hk.nets.MLP([512, 256, 128, 1])
>> return net(x)
>> def rms_loss(pred, tar, w):
>> return jnp.mean(optax.l2_loss(pred, tar))
>> params_init, forward_fn = hk.transform(forward_model)
>> rng = jax.random.PRNGKey(500)
>> inputs, _, _, _ = next(iter(dataset.iterbatches(batch_size=256)))
>> params = params_init(rng, inputs)
>> j_m = JaxModel(forward_fn, params, rms_loss, 256, 0.001, 100)
>> j_m.fit(train_dataset)
All optimizations will be done using the optax library.
"""
def __init__(self,
forward_fn: hk.State,
params: hk.Params,
loss: Optional[Union[Loss, LossFn]],
output_types: Optional[List[str]] = None,
batch_size: int = 100,
learning_rate: float = 0.001,
optimizer: Optional[Union[optax.GradientTransformation,
Optimizer]] = None,
grad_fn: Callable = create_default_gradient_fn,
update_fn: Callable = create_default_update_fn,
eval_fn: Callable = create_default_eval_fn,
rng=jax.random.PRNGKey(1),
log_frequency: int = 100,
**kwargs):
"""
Create a new JaxModel
Parameters
----------
model: hk.State or Function
Any Jax based model that has a `apply` method for computing the network. Currently
only haiku models are supported.
params: hk.Params
The parameter of the Jax based networks
loss: dc.models.losses.Loss or function
a Loss or function defining how to compute the training loss for each
batch, as described above
output_types: list of strings, optional (default None)
the type of each output from the model, as described above
batch_size: int, optional (default 100)
default batch size for training and evaluating
learning_rate: float or LearningRateSchedule, optional (default 0.001)
the learning rate to use for fitting. If optimizer is specified, this is
ignored.
optimizer: optax object
For the time being, it is optax object
rng: jax.random.PRNGKey, optional (default 1)
A default global PRNG key to use for drawing random numbers.
log_frequency: int, optional (default 100)
The frequency at which to log data. Data is logged using
`logging` by default.
Miscellanous Parameters Yet To Add
----------------------------------
model_dir: str, optional (default None)
Will be added along with the save & load method
tensorboard: bool, optional (default False)
whether to log progress to TensorBoard during training
wandb: bool, optional (default False)
whether to log progress to Weights & Biases during training
Work in Progress
----------------
[1] Integrate the optax losses, optimizers, schedulers with Deepchem
[2] Support for saving & loading the model.
"""
super(JaxModel, self).__init__(model=(forward_fn, params), **kwargs)
warnings.warn(
'JaxModel is still in active development and all features may not yet be implemented'
)
self._loss_fn = loss # lambda pred, tar: jnp.mean(optax.l2_loss(pred, tar))
self.batch_size = batch_size
self.learning_rate = learning_rate
if optimizer is None:
optimizer = Adam(1e-3)
if not isinstance(optimizer, optax.GradientTransformation):
self.optimizer = optimizer._create_jax_optimizer()
else:
self.optimizer = optimizer
self.forward_fn = forward_fn
self.params = params
self._built = False
self.log_frequency = log_frequency
self.rng = rng
self._create_gradient_fn = grad_fn
self._create_update_fn = update_fn
self._create_eval_fn = eval_fn
if output_types is None:
self._prediction_outputs = None
self._loss_outputs = None
self._variance_outputs = None
self._other_outputs = None
else:
self._prediction_outputs = []
self._loss_outputs = []
self._variance_outputs = []
self._other_outputs = []
for i, type in enumerate(output_types):
if type == 'prediction':
self._prediction_outputs.append(i)
elif type == 'loss':
self._loss_outputs.append(i)
elif type == 'variance':
self._variance_outputs.append(i)
else:
self._other_outputs.append(i)
if len(self._loss_outputs) == 0:
self._loss_outputs = self._prediction_outputs
def _ensure_built(self):
"""The first time this is called, create internal data structures.
Work in Progress
----------------
[1] Integerate the optax losses, optimizers, schedulers with Deepchem
"""
if self._built:
return
self._built = True
self._global_step = 0
self.opt_state = self.optimizer.init(self.params)
def fit(self,
dataset: Dataset,
nb_epochs: int = 10,
deterministic: bool = False,
loss: Optional[Union[Loss, LossFn]] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
"""Train this model on a dataset.
Parameters
----------
dataset: Dataset
the Dataset to train on
nb_epoch: int
the number of epochs to train for
deterministic: bool
if True, the samples are processed in order. If False, a different random
order is used for each epoch.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
all_losses: Optional[List[float]], optional (default None)
If specified, all logged losses are appended into this list. Note that
you can call `fit()` repeatedly with the same list and losses will
continue to be appended.
Returns
-------
The average loss over the most recent checkpoint interval
Miscellanous Parameters Yet To Add
----------------------------------
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in training steps.
Set this to 0 to disable automatic checkpointing.
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
variables: list of hk.Variable
the variables to train. If None (the default), all trainable variables in
the model are used.
Work in Progress
----------------
[1] Integerate the optax losses, optimizers, schedulers with Deepchem
[2] Support for saving & loading the model.
[3] Adding support for output types (choosing only self._loss_outputs)
"""
return self.fit_generator(
self.default_generator(dataset,
epochs=nb_epochs,
deterministic=deterministic), loss,
callbacks, all_losses)
def fit_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
loss: Optional[Union[Loss, LossFn]] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
if not isinstance(callbacks, SequenceCollection):
callbacks = [callbacks]
self._ensure_built()
avg_loss = 0.0
last_avg_loss = 0.0
averaged_batches = 0
if loss is None:
loss = self._loss_fn
model_loss_fn = self._create_gradient_fn(self.forward_fn,
self._loss_outputs, loss)
grad_update = self._create_update_fn(self.optimizer, model_loss_fn)
params, opt_state = self._get_trainable_params()
rng = self.rng
time1 = time.time()
# Main training loop
for batch in generator:
inputs, labels, weights = self._prepare_batch(batch)
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
if isinstance(labels, list) and len(labels) == 1:
labels = labels[0]
if isinstance(weights, list) and len(weights) == 1:
weights = weights[0]
params, opt_state, batch_loss = grad_update(params,
opt_state,
inputs,
labels,
weights,
rng=rng)
rng, _ = jax.random.split(rng)
avg_loss += jax.device_get(batch_loss)
self._global_step += 1
current_step = self._global_step
averaged_batches += 1
should_log = (current_step % self.log_frequency == 0)
if should_log:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
# Capture the last avg_loss in case of return since we're resetting to 0 now
last_avg_loss = avg_loss
avg_loss = 0.0
averaged_batches = 0
for c in callbacks:
c(self, current_step)
# Report final results.
if averaged_batches > 0:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
last_avg_loss = avg_loss
time2 = time.time()
logger.info("TIMING: model fitting took %0.3f s" % (time2 - time1))
self._set_trainable_params(params, opt_state)
return last_avg_loss
def _predict(self, generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[Transformer], uncertainty: bool,
other_output_types: Optional[OneOrMany[str]]):
"""
Predict outputs for data provided by a generator.
This is the private implementation of prediction. Do not
call it directly. Instead call one of the public prediction
methods.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
transformers: List[dc.trans.Transformers]
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
uncertainty: bool
specifies whether this is being called as part of estimating uncertainty.
If True, it sets the training flag so that dropout will be enabled, and
returns the values of the uncertainty outputs.
other_output_types: list, optional
Provides a list of other output_types (strings) to predict from model.
Returns
-------
A NumpyArray if the model produces a single output, or a list of arrays otherwise.
"""
results: Optional[List[List[np.ndarray]]] = None
variances: Optional[List[List[np.ndarray]]] = None
if uncertainty and (other_output_types is not None):
raise ValueError(
'This model cannot compute uncertainties and other output types simultaneously. Please invoke one at a time.'
)
if uncertainty:
if self._variance_outputs is None or len(
self._variance_outputs) == 0:
raise ValueError('This model cannot compute uncertainties')
if len(self._variance_outputs) != len(self._prediction_outputs):
raise ValueError(
'The number of variances must exactly match the number of outputs'
)
if other_output_types:
if self._other_outputs is None or len(self._other_outputs) == 0:
raise ValueError(
'This model cannot compute other outputs since no other output_types were specified.'
)
self._ensure_built()
eval_fn = self._create_eval_fn(self.forward_fn, self.params)
rng = self.rng
for batch in generator:
inputs, _, _ = self._prepare_batch(batch)
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
output_values = eval_fn(inputs, rng)
if isinstance(output_values, jnp.ndarray):
output_values = [output_values]
output_values = [jax.device_get(t) for t in output_values]
# Apply tranformers and record results.
if uncertainty:
var = [output_values[i] for i in self._variance_outputs]
if variances is None:
variances = [var]
else:
for i, t in enumerate(var):
variances[i].append(t)
access_values = []
if other_output_types:
access_values += self._other_outputs
elif self._prediction_outputs is not None:
access_values += self._prediction_outputs
if len(access_values) > 0:
output_values = [output_values[i] for i in access_values]
if len(transformers) > 0:
if len(output_values) > 1:
raise ValueError(
"predict() does not support Transformers for models with multiple outputs."
)
elif len(output_values) == 1:
output_values = [
undo_transforms(output_values[0], transformers)
]
if results is None:
results = [[] for i in range(len(output_values))]
for i, t in enumerate(output_values):
results[i].append(t)
# Concatenate arrays to create the final results.
final_results = []
final_variances = []
if results is not None:
for r in results:
final_results.append(np.concatenate(r, axis=0))
if uncertainty and variances is not None:
for v in variances:
final_variances.append(np.concatenate(v, axis=0))
return zip(final_results, final_variances)
if len(final_results) == 1:
return final_results[0]
else:
return final_results
def predict_on_generator(
self,
generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[Transformer] = [],
output_types: Optional[OneOrMany[str]] = None
) -> OneOrMany[np.ndarray]:
"""
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
transformers: List[dc.trans.Transformers]
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
output_types: String or list of Strings
If specified, all outputs of this type will be retrieved
from the model. If output_types is specified, outputs must
be None.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
return self._predict(generator, transformers, False, output_types)
def predict_on_batch(
self,
X: np.typing.ArrayLike,
transformers: List[Transformer] = []) -> OneOrMany[np.ndarray]:
"""Generates predictions for input samples, processing samples in a batch.
Parameters
----------
X: ndarray
the input data, as a Numpy array.
transformers: List[dc.trans.Transformers]
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
dataset = NumpyDataset(X=X, y=None)
return self.predict(dataset, transformers)
def predict_uncertainty_on_batch(
self,
X: Sequence,
masks: int = 50) -> OneOrMany[Tuple[np.ndarray, np.ndarray]]:
raise NotImplementedError(
'Predicting uncertainity on batch is not supported currently for JAX models'
)
def predict(
self,
dataset: Dataset,
transformers: List[Transformer] = [],
output_types: Optional[List[str]] = None) -> OneOrMany[np.ndarray]:
"""
Uses self to make predictions on provided Dataset object.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
transformers: List[dc.trans.Transformers]
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
output_types: String or list of Strings
If specified, all outputs of this type will be retrieved
from the model. If output_types is specified, outputs must
be None.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
generator = self.default_generator(dataset,
mode='predict',
pad_batches=False)
return self.predict_on_generator(generator,
transformers=transformers,
output_types=output_types)
def get_global_step(self) -> int:
"""Get the number of steps of fitting that have been performed."""
return self._global_step
def predict_embedding(self, dataset: Dataset) -> OneOrMany[np.ndarray]:
raise NotImplementedError(
'Predicting embedding is not supported currently for JAX models')
# def predict_uncertainty(self, dataset: Dataset, masks: int = 50
# ) -> OneOrMany[Tuple[np.ndarray, np.ndarray]]:
# """
# Predict the model's outputs, along with the uncertainty in each one.
# The uncertainty is computed as described in https://arxiv.org/abs/1703.04977.
# It involves repeating the prediction many times with different dropout masks.
# The prediction is computed as the average over all the predictions. The
# uncertainty includes both the variation among the predicted values (epistemic
# uncertainty) and the model's own estimates for how well it fits the data
# (aleatoric uncertainty). Not all models support uncertainty prediction.
# Parameters
# ----------
# dataset: dc.data.Dataset
# Dataset to make prediction on
# masks: int
# the number of dropout masks to average over
# Returns
# -------
# for each output, a tuple (y_pred, y_std) where y_pred is the predicted
# value of the output, and each element of y_std estimates the standard
# deviation of the corresponding element of y_pred
# """
# sum_pred: List[np.ndarray] = []
# sum_sq_pred: List[np.ndarray] = []
# sum_var: List[np.ndarray] = []
# for i in range(masks):
# generator = self.default_generator(
# dataset, mode='uncertainty', pad_batches=False)
# results = self._predict(generator, [], True, None)
# if len(sum_pred) == 0:
# for p, v in results:
# sum_pred.append(p)
# sum_sq_pred.append(p * p)
# sum_var.append(v)
# else:
# for j, (p, v) in enumerate(results):
# sum_pred[j] += p
# sum_sq_pred[j] += p * p
# sum_var[j] += v
# output = []
# std = []
# for i in range(len(sum_pred)):
# p = sum_pred[i] / masks
# output.append(p)
# std.append(np.sqrt(sum_sq_pred[i] / masks - p * p + sum_var[i] / masks))
# if len(output) == 1:
# return (output[0], std[0])
# else:
# return list(zip(output, std))
def evaluate_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
metrics: List[Metric],
transformers: List[Transformer] = [],
per_task_metrics: bool = False):
"""Evaluate the performance of this model on the data produced by a generator.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
metric: list of deepchem.metrics.Metric
Evaluation metric
transformers: List[dc.trans.Transformers]
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
per_task_metrics: bool
If True, return per-task scores.
Returns
-------
dict
Maps tasks to scores under metric.
"""
evaluator = GeneratorEvaluator(self, generator, transformers)
return evaluator.compute_model_performance(metrics, per_task_metrics)
def _get_trainable_params(self):
"""
Will be used to seperate freezing parameters while transfer learning
"""
return self.params, self.opt_state
def _set_trainable_params(self, params: hk.Params,
opt_state: optax.OptState):
"""
A functional approach to setting the final parameters after training
"""
self.params = params
self.opt_state = opt_state
def _prepare_batch(self, batch):
inputs, labels, weights = batch
inputs = [
x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs
]
if labels is not None:
labels = [
x.astype(np.float32) if x.dtype == np.float64 else x
for x in labels
]
else:
labels = []
if weights is not None:
weights = [
x.astype(np.float32) if x.dtype == np.float64 else x
for x in weights
]
else:
weights = []
return (inputs, labels, weights)
def default_generator(
self,
dataset: Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
"""Create a generator that iterates batches for a dataset.
Subclasses may override this method to customize how model inputs are
generated from the data.
Parameters
----------
dataset: Dataset
the data to iterate
epochs: int
the number of times to iterate over the full dataset
mode: str
allowed values are 'fit' (called during training), 'predict' (called
during prediction), and 'uncertainty' (called during uncertainty
prediction)
deterministic: bool
whether to iterate over the dataset in order, or randomly shuffle the
data for each epoch
pad_batches: bool
whether to pad each batch up to this model's preferred batch size
Returns
-------
a generator that iterates batches, each represented as a tuple of lists:
([inputs], [outputs], [weights])
"""
for epoch in range(epochs):
for (X_b, y_b, w_b,
_) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
yield ([X_b], [y_b], [w_b])
<file_sep>"""
This file contains utilities for molecular docking.
"""
from typing import List, Optional, Tuple
import os
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.utils.pdbqt_utils import pdbqt_to_pdb
def write_vina_conf(protein_filename: str,
ligand_filename: str,
centroid: np.ndarray,
box_dims: np.ndarray,
conf_filename: str,
num_modes: int = 9,
exhaustiveness: Optional[int] = None) -> None:
"""Writes Vina configuration file to disk.
Autodock Vina accepts a configuration file which provides options
under which Vina is invoked. This utility function writes a vina
configuration file which directs Autodock vina to perform docking
under the provided options.
Parameters
----------
protein_filename: str
Filename for protein
ligand_filename: str
Filename for the ligand
centroid: np.ndarray
A numpy array with shape `(3,)` holding centroid of system
box_dims: np.ndarray
A numpy array of shape `(3,)` holding the size of the box to dock
conf_filename: str
Filename to write Autodock Vina configuration to.
num_modes: int, optional (default 9)
The number of binding modes Autodock Vina should find
exhaustiveness: int, optional
The exhaustiveness of the search to be performed by Vina
"""
with open(conf_filename, "w") as f:
f.write("receptor = %s\n" % protein_filename)
f.write("ligand = %s\n\n" % ligand_filename)
f.write("center_x = %f\n" % centroid[0])
f.write("center_y = %f\n" % centroid[1])
f.write("center_z = %f\n\n" % centroid[2])
f.write("size_x = %f\n" % box_dims[0])
f.write("size_y = %f\n" % box_dims[1])
f.write("size_z = %f\n\n" % box_dims[2])
f.write("num_modes = %d\n\n" % num_modes)
if exhaustiveness is not None:
f.write("exhaustiveness = %d\n" % exhaustiveness)
def write_gnina_conf(protein_filename: str,
ligand_filename: str,
conf_filename: str,
num_modes: int = 9,
exhaustiveness: Optional[int] = None,
**kwargs) -> None:
"""Writes GNINA configuration file to disk.
GNINA accepts a configuration file which provides options
under which GNINA is invoked. This utility function writes a
configuration file which directs GNINA to perform docking
under the provided options.
Parameters
----------
protein_filename: str
Filename for protein
ligand_filename: str
Filename for the ligand
conf_filename: str
Filename to write Autodock Vina configuration to.
num_modes: int, optional (default 9)
The number of binding modes GNINA should find
exhaustiveness: int, optional
The exhaustiveness of the search to be performed by GNINA
kwargs:
Args supported by GNINA documented here
https://github.com/gnina/gnina#usage
"""
with open(conf_filename, "w") as f:
f.write("receptor = %s\n" % protein_filename)
f.write("ligand = %s\n\n" % ligand_filename)
f.write("autobox_ligand = %s\n\n" % protein_filename)
if exhaustiveness is not None:
f.write("exhaustiveness = %d\n" % exhaustiveness)
f.write("num_modes = %d\n\n" % num_modes)
for k, v in kwargs.items():
f.write("%s = %s\n" % (str(k), str(v)))
def read_gnina_log(log_file: str) -> np.ndarray:
"""Read GNINA logfile and get docking scores.
GNINA writes computed binding affinities to a logfile.
Parameters
----------
log_file: str
Filename of logfile generated by GNINA.
Returns
-------
scores: np.array, dimension (num_modes, 3)
Array of binding affinity (kcal/mol), CNN pose score,
and CNN affinity for each binding mode.
"""
scores = []
lines = open(log_file).readlines()
mode_start = np.inf
for idx, line in enumerate(lines):
if line[:6] == '-----+':
mode_start = idx
if idx > mode_start:
mode = line.split()
score = [float(x) for x in mode[1:]]
scores.append(score)
return np.array(scores)
def load_docked_ligands(
pdbqt_output: str) -> Tuple[List[RDKitMol], List[float]]:
"""This function loads ligands docked by autodock vina.
Autodock vina writes outputs to disk in a PDBQT file format. This
PDBQT file can contain multiple docked "poses". Recall that a pose
is an energetically favorable 3D conformation of a molecule. This
utility function reads and loads the structures for multiple poses
from vina's output file.
Parameters
----------
pdbqt_output: str
Should be the filename of a file generated by autodock vina's
docking software.
Returns
-------
Tuple[List[rdkit.Chem.rdchem.Mol], List[float]]
Tuple of `molecules, scores`. `molecules` is a list of rdkit
molecules with 3D information. `scores` is the associated vina
score.
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
lines = open(pdbqt_output).readlines()
molecule_pdbqts = []
scores = []
current_pdbqt: Optional[List[str]] = None
for line in lines:
if line[:5] == "MODEL":
current_pdbqt = []
elif line[:19] == "REMARK VINA RESULT:":
words = line.split()
# the line has format
# REMARK VINA RESULT: score ...
# There is only 1 such line per model so we can append it
scores.append(float(words[3]))
elif line[:6] == "ENDMDL":
molecule_pdbqts.append(current_pdbqt)
current_pdbqt = None
else:
# FIXME: Item "None" of "Optional[List[str]]" has no attribute "append"
current_pdbqt.append(line) # type: ignore
molecules = []
for pdbqt_data in molecule_pdbqts:
pdb_block = pdbqt_to_pdb(pdbqt_data=pdbqt_data)
mol = Chem.MolFromPDBBlock(str(pdb_block),
sanitize=False,
removeHs=False)
molecules.append(mol)
return molecules, scores
def prepare_inputs(protein: str,
ligand: str,
replace_nonstandard_residues: bool = True,
remove_heterogens: bool = True,
remove_water: bool = True,
add_hydrogens: bool = True,
pH: float = 7.0,
optimize_ligand: bool = True,
pdb_name: Optional[str] = None) -> Tuple[RDKitMol, RDKitMol]:
"""This prepares protein-ligand complexes for docking.
Autodock Vina requires PDB files for proteins and ligands with
sensible inputs. This function uses PDBFixer and RDKit to ensure
that inputs are reasonable and ready for docking. Default values
are given for convenience, but fixing PDB files is complicated and
human judgement is required to produce protein structures suitable
for docking. Always inspect the results carefully before trying to
perform docking.
Parameters
----------
protein: str
Filename for protein PDB file or a PDBID.
ligand: str
Either a filename for a ligand PDB file or a SMILES string.
replace_nonstandard_residues: bool (default True)
Replace nonstandard residues with standard residues.
remove_heterogens: bool (default True)
Removes residues that are not standard amino acids or nucleotides.
remove_water: bool (default True)
Remove water molecules.
add_hydrogens: bool (default True)
Add missing hydrogens at the protonation state given by `pH`.
pH: float (default 7.0)
Most common form of each residue at given `pH` value is used.
optimize_ligand: bool (default True)
If True, optimize ligand with RDKit. Required for SMILES inputs.
pdb_name: Optional[str]
If given, write sanitized protein and ligand to files called
"pdb_name.pdb" and "ligand_pdb_name.pdb"
Returns
-------
Tuple[RDKitMol, RDKitMol]
Tuple of `protein_molecule, ligand_molecule` with 3D information.
Note
----
This function requires RDKit and OpenMM to be installed.
Read more about PDBFixer here: https://github.com/openmm/pdbfixer.
Examples
--------
>>> p, m = prepare_inputs('3cyx', 'CCC')
>> p.GetNumAtoms()
>> m.GetNumAtoms()
>>> p, m = prepare_inputs('3cyx', 'CCC', remove_heterogens=False)
>> p.GetNumAtoms()
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
except ModuleNotFoundError:
raise ImportError(
"This function requires RDKit and OpenMM to be installed.")
if protein.endswith('.pdb'):
fixer = PDBFixer(protein)
else:
fixer = PDBFixer(url='https://files.rcsb.org/download/%s.pdb' %
(protein))
if ligand.endswith('.pdb'):
m = Chem.MolFromPDBFile(ligand)
else:
m = Chem.MolFromSmiles(ligand, sanitize=True)
# Apply common fixes to PDB files
if replace_nonstandard_residues:
fixer.findMissingResidues()
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
if remove_heterogens and not remove_water:
fixer.removeHeterogens(True)
if remove_heterogens and remove_water:
fixer.removeHeterogens(False)
if add_hydrogens:
fixer.addMissingHydrogens(pH)
PDBFile.writeFile(fixer.topology, fixer.positions, open('tmp.pdb', 'w'))
p = Chem.MolFromPDBFile('tmp.pdb', sanitize=True)
os.remove('tmp.pdb')
# Optimize ligand
if optimize_ligand:
m = Chem.AddHs(m) # need hydrogens for optimization
AllChem.EmbedMolecule(m)
AllChem.MMFFOptimizeMolecule(m)
if pdb_name:
Chem.rdmolfiles.MolToPDBFile(p, '%s.pdb' % (pdb_name))
Chem.rdmolfiles.MolToPDBFile(m, 'ligand_%s.pdb' % (pdb_name))
return (p, m)
<file_sep>
#Processing of ToxCast data
#Author - <NAME>
import pandas as pd
import numpy as np
#Loading dataframes and editing indices
path_to_casn_smiles = "./casn_to_smiles.csv.gz"
path_to_code_casn = "./code_to_casn.csv.gz"
path_to_hitc_code = "./code_to_hitc.csv.gz"
casn_smiles_df = pd.read_csv(path_to_casn_smiles)
code_casn_df = pd.read_csv(path_to_code_casn)
hitc_code_df = pd.read_csv(path_to_hitc_code)
casn_smiles_df = casn_smiles_df[['Substance_CASRN', 'Structure_SMILES']]
code_casn_df = code_casn_df[['casn', 'code']]
hitc_code_df.rename(columns = {'Unnamed: 0': 'code'}, inplace = True)
casn_smiles_df.rename(columns = {'Substance_CASRN': 'casn', 'Structure_SMILES': 'smiles'}, inplace = True)
code_casn_df.set_index('code', inplace = True)
casn_smiles_df.set_index('casn', inplace= True)
#Loop through rows of hitc matrix and replace codes with smiles strings
badCounter = 0 #keep track of rows with no corresponding smiles strings
for index, data in hitc_code_df.iterrows():
rowList = data.values.tolist()
code = rowList[0]
#get corresponding casn
try:
casn = code_casn_df.loc[code, 'casn']
except KeyError:
badCounter+=1
pass
#get corresponding smiles
try:
smiles = casn_smiles_df.loc[casn, 'smiles']
except KeyError:
badCounter+=1
pass
#write to cell
hitc_code_df.loc[index, 'code'] = smiles
#Tidy up and write to csv
hitc_code_df.rename(columns = {'code': 'smiles'}, inplace = True)
hitc_code_df.dropna(subset = ['smiles'], inplace = True)
hitc_code_df.reset_index(inplace = True, drop = True)
hitc_code_df.to_csv("./reprocessed_tox_cast.csv", index=False)
<file_sep>MoleculeNet
===========
The DeepChem library is packaged alongside the MoleculeNet suite of datasets.
One of the most important parts of machine learning applications is finding a suitable dataset.
The MoleculeNet suite has curated a whole range of datasets and loaded them into DeepChem
:code:`dc.data.Dataset` objects for convenience.
.. include:: moleculenet_cheatsheet.rst
Contributing a new dataset to MoleculeNet
-----------------------------------------
If you are proposing a new dataset to be included in the
MoleculeNet benchmarking suite, please follow the instructions below.
Please review the `datasets already available in MolNet`_ before contributing.
0. Read the `Contribution guidelines`_.
1. Open an `issue`_ to discuss the dataset you want to add to MolNet.
2. Write a `DatasetLoader` class that inherits from `deepchem.molnet.load_function.molnet_loader._MolnetLoader`_ and implements a `create_dataset` method. See the `_QM9Loader`_ for a simple example.
3. Write a `load_dataset` function that documents the dataset and add your load function to `deepchem.molnet.__init__.py`_ for easy importing.
4. Prepare your dataset as a .tar.gz or .zip file. Accepted filetypes include CSV, JSON, and SDF.
5. Ask a member of the technical steering committee to add your .tar.gz or .zip file to the DeepChem AWS bucket. Modify your load function to pull down the dataset from AWS.
6. Add documentation for your loader to the `MoleculeNet docs`_.
7. Submit a [WIP] PR (Work in progress pull request) following the PR `template`_.
Example Usage
-------------
Below is an example of how to load a MoleculeNet dataset and featurizer. This approach will work for any dataset in MoleculeNet by changing the load function and featurizer. For more details on the featurizers, see the `Featurizers` section.
::
import deepchem as dc
from deepchem.feat.molecule_featurizers import MolGraphConvFeaturizer
featurizer = MolGraphConvFeaturizer(use_edges=True)
dataset_dc = dc.molnet.load_qm9(featurizer=featurizer)
tasks, dataset, transformers = dataset_dc
train, valid, test = dataset
x,y,w,ids = train.X, train.y, train.w, train.ids
Note that the "w" matrix represents the weight of each sample. Some assays may have missing values, in which case the weight is 0. Otherwise, the weight is 1.
Additionally, the environment variable ``DEEPCHEM_DATA_DIR`` can be set like ``os.environ['DEEPCHEM_DATA_DIR'] = path/to/store/featurized/dataset``. When the ``DEEPCHEM_DATA_DIR`` environment variable is set, molnet loader stores the featurized dataset in the specified directory and when the dataset has to be reloaded the next time, it will be fetched from the data directory directly rather than featurizing the raw dataset from scratch.
BACE Dataset
------------
.. autofunction:: deepchem.molnet.load_bace_classification
.. autofunction:: deepchem.molnet.load_bace_regression
BBBC Datasets
-------------
.. autofunction:: deepchem.molnet.load_bbbc001
.. autofunction:: deepchem.molnet.load_bbbc002
BBBP Datasets
-------------
BBBP stands for Blood-Brain-Barrier Penetration
.. autofunction:: deepchem.molnet.load_bbbp
Cell Counting Datasets
----------------------
.. autofunction:: deepchem.molnet.load_cell_counting
Chembl Datasets
---------------
.. autofunction:: deepchem.molnet.load_chembl
Chembl25 Datasets
-----------------
.. autofunction:: deepchem.molnet.load_chembl25
Clearance Datasets
------------------
.. autofunction:: deepchem.molnet.load_clearance
Clintox Datasets
----------------
.. autofunction:: deepchem.molnet.load_clintox
Delaney Datasets
----------------
.. autofunction:: deepchem.molnet.load_delaney
Factors Datasets
----------------
.. autofunction:: deepchem.molnet.load_factors
Freesolv Dataset
----------------------
.. autofunction:: deepchem.molnet.load_freesolv
HIV Datasets
------------
.. autofunction:: deepchem.molnet.load_hiv
HOPV Datasets
-------------
HOPV stands for the Harvard Organic Photovoltaic Dataset.
.. autofunction:: deepchem.molnet.load_hopv
HPPB Datasets
-------------
.. autofunction:: deepchem.molnet.load_hppb
KAGGLE Datasets
---------------
.. autofunction:: deepchem.molnet.load_kaggle
Kinase Datasets
---------------
.. autofunction:: deepchem.molnet.load_kinase
Lipo Datasets
-------------
.. autofunction:: deepchem.molnet.load_lipo
Materials Datasets
------------------
Materials datasets include inorganic crystal structures, chemical
compositions, and target properties like formation energies and band
gaps. Machine learning problems in materials science commonly include
predicting the value of a continuous (regression) or categorical
(classification) property of a material based on its chemical composition
or crystal structure. "Inverse design" is also of great interest, in which
ML methods generate crystal structures that have a desired property.
Other areas where ML is applicable in materials include: discovering new
or modified phenomenological models that describe material behavior
.. autofunction:: deepchem.molnet.load_bandgap
.. autofunction:: deepchem.molnet.load_perovskite
.. autofunction:: deepchem.molnet.load_mp_formation_energy
.. autofunction:: deepchem.molnet.load_mp_metallicity
MUV Datasets
------------
.. autofunction:: deepchem.molnet.load_muv
NCI Datasets
------------
.. autofunction:: deepchem.molnet.load_nci
PCBA Datasets
-------------
.. autofunction:: deepchem.molnet.load_pcba
PDBBIND Datasets
----------------
.. autofunction:: deepchem.molnet.load_pdbbind
PPB Datasets
------------
.. autofunction:: deepchem.molnet.load_ppb
QM7 Datasets
------------
.. autofunction:: deepchem.molnet.load_qm7
QM8 Datasets
------------
.. autofunction:: deepchem.molnet.load_qm8
QM9 Datasets
------------
.. autofunction:: deepchem.molnet.load_qm9
SAMPL Datasets
--------------
.. autofunction:: deepchem.molnet.load_sampl
SIDER Datasets
--------------
.. autofunction:: deepchem.molnet.load_sider
Thermosol Datasets
------------------
.. autofunction:: deepchem.molnet.load_thermosol
Tox21 Datasets
--------------
.. autofunction:: deepchem.molnet.load_tox21
Toxcast Datasets
----------------
.. autofunction:: deepchem.molnet.load_toxcast
USPTO Datasets
--------------
.. autofunction:: deepchem.molnet.load_uspto
UV Datasets
-----------
.. autofunction:: deepchem.molnet.load_uv
.. _`datasets already available in MolNet`: https://moleculenet.org/datasets-1
.. _`Contribution guidelines`: https://github.com/deepchem/deepchem/blob/master/CONTRIBUTING.md
.. _`issue`: https://github.com/deepchem/deepchem/issues
.. _`_QM9Loader`: https://github.com/deepchem/deepchem/blob/master/deepchem/molnet/load_function/qm9_datasets.py
.. _`deepchem.molnet.load_function.molnet_loader._MolnetLoader`: https://github.com/deepchem/deepchem/blob/master/deepchem/molnet/load_function/molnet_loader.py#L82
.. _`deepchem.molnet.load_function`: https://github.com/deepchem/deepchem/tree/master/deepchem/molnet/load_function
.. _`deepchem.molnet.load_function.load_dataset_template`: https://github.com/deepchem/deepchem/blob/master/deepchem/molnet/load_function/load_dataset_template.py
.. _`deepchem.molnet.defaults`: https://github.com/deepchem/deepchem/tree/master/deepchem/molnet/defaults.py
.. _`deepchem.molnet.__init__.py`: https://github.com/deepchem/deepchem/blob/master/deepchem/molnet/__init__.py
.. _`MoleculeNet docs`: https://github.com/deepchem/deepchem/blob/master/docs/source/api_reference/moleculenet.rst
.. _`template`: https://github.com/deepchem/deepchem/blob/master/.github/MOLNET_PR_TEMPLATE.md
ZINC15 Datasets
---------------
.. autofunction:: deepchem.molnet.load_zinc15
Platinum Adsorption Dataset
---------------------------
.. autofunction:: deepchem.molnet.load_Platinum_Adsorption
<file_sep>"""
Script that trains Atomic Conv models on PDBbind dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import deepchem as dc
import numpy as np
from deepchem.molnet import load_pdbbind
from deepchem.feat import AtomicConvFeaturizer
# For stable runs
np.random.seed(123)
frag1_num_atoms = 70 # for ligand atoms
frag2_num_atoms = 24000 # for protein atoms
complex_num_atoms = frag1_num_atoms + frag2_num_atoms
max_num_neighbors = 12
acf = AtomicConvFeaturizer(
frag1_num_atoms=frag1_num_atoms,
frag2_num_atoms=frag2_num_atoms,
complex_num_atoms=complex_num_atoms,
max_num_neighbors=max_num_neighbors,
neighbor_cutoff=4)
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind(
featurizer=acf, split="random", subset="core")
train_dataset, valid_dataset, test_dataset = pdbbind_datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
model = dc.models.AtomicConvModel(
n_tasks=len(pdbbind_tasks),
frag1_num_atoms=frag1_num_atoms,
frag2_num_atoms=frag2_num_atoms,
complex_num_atoms=complex_num_atoms)
# Fit trained model
print("Fitting model on train dataset")
model.fit(train_dataset)
# TODO The below line should be fixes
# See: https://github.com/deepchem/deepchem/issues/2373
# model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep># The number of elements to print for dataset ids/tasks
_print_threshold = 10
def get_print_threshold() -> int:
"""Return the printing threshold for datasets.
The print threshold is the number of elements from ids/tasks to
print when printing representations of `Dataset` objects.
Returns
----------
threshold: int
Number of elements that will be printed
"""
return _print_threshold
def set_print_threshold(threshold: int):
"""Set print threshold
The print threshold is the number of elements from ids/tasks to
print when printing representations of `Dataset` objects.
Parameters
----------
threshold: int
Number of elements to print.
"""
global _print_threshold
_print_threshold = threshold
# If a dataset contains more than this number of elements, it won't
# print any dataset ids
_max_print_size = 1000
def get_max_print_size() -> int:
"""Return the max print size for a dataset.
If a dataset is large, printing `self.ids` as part of a string
representation can be very slow. This field controls the maximum
size for a dataset before ids are no longer printed.
Returns
-------
max_print_size: int
Maximum length of a dataset for ids to be printed in string
representation.
"""
return _max_print_size
def set_max_print_size(max_print_size: int):
"""Set max_print_size
If a dataset is large, printing `self.ids` as part of a string
representation can be very slow. This field controls the maximum
size for a dataset before ids are no longer printed.
Parameters
----------
max_print_size: int
Maximum length of a dataset for ids to be printed in string
representation.
"""
global _max_print_size
_max_print_size = max_print_size
<file_sep># Written by <NAME> and <NAME>
# Modified by <NAME> to make python2 compatible
import h5py
import numpy as np
import platform
import os
PY_VERSION = int(platform.python_version().split('.')[0]) > 3
class datapacker(object):
def __init__(self, store_file, mode='w-', complib='gzip', complevel=6):
"""Wrapper to store arrays within HFD5 file
"""
# opening file
self.store = h5py.File(store_file, mode=mode)
self.clib = complib
self.clev = complevel
def store_data(self, store_loc, **kwargs):
"""Put arrays to store
"""
#print(store_loc)
g = self.store.create_group(store_loc)
for k, v, in kwargs.items():
#print(type(v[0]))
#print(k)
if type(v) == list:
if len(v) != 0:
if type(v[0]) is np.str_ or type(v[0]) is str:
v = [a.encode('utf8') for a in v]
g.create_dataset(
k, data=v, compression=self.clib, compression_opts=self.clev)
def cleanup(self):
"""Wrapper to close HDF5 file
"""
self.store.close()
class anidataloader(object):
''' Contructor '''
def __init__(self, store_file):
if not os.path.exists(store_file):
exit('Error: file not found - ' + store_file)
self.store = h5py.File(store_file)
''' Group recursive iterator (iterate through all groups in all branches and return datasets in dicts) '''
def h5py_dataset_iterator(self, g, prefix=''):
for key in g.keys():
item = g[key]
path = '{}/{}'.format(prefix, key)
keys = [i for i in item.keys()]
if isinstance(item[keys[0]], h5py.Dataset): # test for dataset
data = {'path': path}
for k in keys:
if not isinstance(item[k], h5py.Group):
dataset = np.array(item[k].value)
if type(dataset) is np.ndarray:
if dataset.size != 0:
if type(dataset[0]) is np.bytes_:
dataset = [a.decode('ascii') for a in dataset]
data.update({k: dataset})
yield data
else: # test for group (go down)
for s in self.h5py_dataset_iterator(item, path):
yield s
''' Default class iterator (iterate through all data) '''
def __iter__(self):
for data in self.h5py_dataset_iterator(self.store):
yield data
''' Returns a list of all groups in the file '''
def get_group_list(self):
return [g for g in self.store.values()]
''' Allows interation through the data in a given group '''
def iter_group(self, g):
for data in self.h5py_dataset_iterator(g):
yield data
''' Returns the requested dataset '''
def get_data(self, path, prefix=''):
item = self.store[path]
path = '{}/{}'.format(prefix, path)
keys = [i for i in item.keys()]
data = {'path': path}
# print(path)
for k in keys:
if not isinstance(item[k], h5py.Group):
dataset = np.array(item[k].value)
if type(dataset) is np.ndarray:
if dataset.size != 0:
if type(dataset[0]) is np.bytes_:
dataset = [a.decode('ascii') for a in dataset]
data.update({k: dataset})
return data
''' Returns the number of groups '''
def group_size(self):
return len(self.get_group_list())
def size(self):
count = 0
for g in self.store.values():
count = count + len(g.items())
return count
''' Close the HDF5 file '''
def cleanup(self):
self.store.close()
if __name__ == "__main__":
base_dir = os.environ["ROITBERG_ANI"]
# Number of conformations in each file increases exponentially.
# Start with a smaller dataset before continuing. Use all of them
# for production
hdf5files = [
'ani_gdb_s01.h5', 'ani_gdb_s02.h5', 'ani_gdb_s03.h5', 'ani_gdb_s04.h5',
'ani_gdb_s05.h5', 'ani_gdb_s06.h5', 'ani_gdb_s07.h5', 'ani_gdb_s08.h5'
]
hdf5files = [os.path.join(base_dir, f) for f in hdf5files]
for hdf5file in hdf5files:
print("processing", hdf5file)
adl = anidataloader(hdf5file)
for data in adl:
# Extract the data
P = data['path']
R = data['coordinates']
E = data['energies']
S = data['species']
smi = data['smiles']
<file_sep>"""
Sample supports from datasets.
"""
import logging
import time
import numpy as np
from deepchem.data import NumpyDataset
logger = logging.getLogger(__name__)
def remove_dead_examples(dataset):
"""Removes compounds with no weight.
Parameters
----------
dataset: dc.data.Dataset
Source dataset.
"""
w = dataset.w
nonzero_inds = np.nonzero(np.sum(w, axis=1))
# Remove support indices
X = dataset.X[nonzero_inds]
y = dataset.y[nonzero_inds]
w = dataset.w[nonzero_inds]
ids = dataset.ids[nonzero_inds]
return NumpyDataset(X, y, w, ids)
def dataset_difference(dataset, remove):
"""Removes the compounds in remove from dataset.
Parameters
----------
dataset: dc.data.Dataset
Source dataset.
remove: dc.data.Dataset
Dataset whose overlap will be removed.
"""
remove_ids = set(remove.ids)
keep_inds = [
ind for ind in range(len(dataset)) if dataset.ids[ind] not in remove_ids
]
# Remove support indices
X = dataset.X[keep_inds]
y = dataset.y[keep_inds]
w = dataset.w[keep_inds]
ids = dataset.ids[keep_inds]
return NumpyDataset(X, y, w, ids)
def get_task_dataset_minus_support(dataset, support, task):
"""Gets data for specified task, minus support points.
Useful for evaluating model performance once trained (so that
test compounds can be ensured distinct from support.)
Parameters
----------
dataset: dc.data.Dataset
Source dataset.
support: dc.data.Dataset
The support dataset
task: int
Task number of task to select.
"""
support_ids = set(support.ids)
non_support_inds = [
ind for ind in range(len(dataset))
if dataset.ids[ind] not in support_ids
]
# Remove support indices
X = dataset.X[non_support_inds]
y = dataset.y[non_support_inds]
w = dataset.w[non_support_inds]
ids = dataset.ids[non_support_inds]
# Get task specific entries
w_task = w[:, task]
X_task = X[w_task != 0]
y_task = np.expand_dims(y[w_task != 0, task], 1)
ids_task = ids[w_task != 0]
# Now just get weights for this task
w_task = np.expand_dims(w[w_task != 0, task], 1)
return NumpyDataset(X_task, y_task, w_task, ids_task)
def get_task_dataset(dataset, task):
"""Selects out entries for a particular task."""
X, y, w, ids = dataset.X, dataset.y, dataset.w, dataset.ids
# Get task specific entries
w_task = w[:, task]
X_task = X[w_task != 0]
y_task = np.expand_dims(y[w_task != 0, task], 1)
ids_task = ids[w_task != 0]
# Now just get weights for this task
w_task = np.expand_dims(w[w_task != 0, task], 1)
return NumpyDataset(X_task, y_task, w_task, ids_task)
def get_task_test(dataset, n_episodes, n_test, task, log_every_n=50):
"""Gets test set from specified task.
Parameters
----------
dataset: dc.data.Dataset
Dataset from which to sample.
n_episodes: int
Number of episodes to sample test sets for.
n_test: int
Number of compounds per test set.
log_every_n: int, optional
Prints every log_every_n supports sampled.
"""
w_task = dataset.w[:, task]
X_task = dataset.X[w_task != 0]
y_task = dataset.y[w_task != 0]
ids_task = dataset.ids[w_task != 0]
# Now just get weights for this task
w_task = dataset.w[w_task != 0]
n_samples = len(X_task)
ids = np.random.choice(np.arange(n_samples), (n_episodes, n_test))
tests = []
for episode in range(n_episodes):
if episode % log_every_n == 0:
logger.info("Sampling test %d" % episode)
inds = ids[episode]
X_batch = X_task[inds]
y_batch = np.squeeze(y_task[inds, task])
w_batch = np.squeeze(w_task[inds, task])
ids_batch = ids_task[inds]
tests.append(NumpyDataset(X_batch, y_batch, w_batch, ids_batch))
return tests
def get_single_task_test(dataset, batch_size, task, replace=True):
"""Gets test set from specified task.
Samples random subset of size batch_size from specified task of dataset.
Ensures that sampled points have measurements for this task.
"""
w_task = dataset.w[:, task]
X_task = dataset.X[w_task != 0]
y_task = dataset.y[w_task != 0]
ids_task = dataset.ids[w_task != 0]
# Now just get weights for this task
w_task = dataset.w[w_task != 0]
inds = np.random.choice(np.arange(len(X_task)), batch_size, replace=replace)
X_batch = X_task[inds]
y_batch = np.squeeze(y_task[inds, task])
w_batch = np.squeeze(w_task[inds, task])
ids_batch = ids_task[inds]
return NumpyDataset(X_batch, y_batch, w_batch, ids_batch)
def get_single_task_support(dataset, n_pos, n_neg, task, replace=True):
"""Generates one support set purely for specified task.
Parameters
----------
datasets: dc.data.Dataset
Dataset from which supports are sampled.
n_pos: int
Number of positive samples in support.
n_neg: int
Number of negative samples in support.
task: int
Index of current task.
replace: bool, optional
Whether or not to use replacement when sampling supports.
Returns
-------
list
List of NumpyDatasets, each of which is a support set.
"""
return get_task_support(dataset, 1, n_pos, n_neg, task)[0]
def get_task_support(dataset, n_episodes, n_pos, n_neg, task, log_every_n=50):
"""Generates one support set purely for specified task.
Parameters
----------
datasets: dc.data.Dataset
Dataset from which supports are sampled.
n_episodes: int
Number of episodes for which supports have to be sampled from this task.
n_pos: int
Number of positive samples in support.
n_neg: int
Number of negative samples in support.
task: int
Index of current task.
log_every_n: int, optional
Prints every log_every_n supports sampled.
Returns
-------
list
List of NumpyDatasets, each of which is a support set.
"""
y_task = dataset.y[:, task]
w_task = dataset.w[:, task]
# Split data into pos and neg lists.
pos_mols = np.where(np.logical_and(y_task == 1, w_task != 0))[0]
neg_mols = np.where(np.logical_and(y_task == 0, w_task != 0))[0]
supports = []
for episode in range(n_episodes):
if episode % log_every_n == 0:
logger.info("Sampling support %d" % episode)
# No replacement allowed for supports
pos_ids = np.random.choice(len(pos_mols), (n_pos,), replace=False)
neg_ids = np.random.choice(len(neg_mols), (n_neg,), replace=False)
pos_inds, neg_inds = pos_mols[pos_ids], neg_mols[neg_ids]
# Handle one-d vs. non one-d feature matrices
one_dimensional_features = (len(dataset.X.shape) == 1)
if not one_dimensional_features:
X = np.vstack([dataset.X[pos_inds], dataset.X[neg_inds]])
else:
X = np.concatenate([dataset.X[pos_inds], dataset.X[neg_inds]])
y = np.expand_dims(
np.concatenate(
[dataset.y[pos_inds, task], dataset.y[neg_inds, task]]), 1)
w = np.expand_dims(
np.concatenate(
[dataset.w[pos_inds, task], dataset.w[neg_inds, task]]), 1)
ids = np.concatenate([dataset.ids[pos_inds], dataset.ids[neg_inds]])
supports.append(NumpyDataset(X, y, w, ids))
return supports
class EpisodeGenerator(object):
"""Generates (support, test) pairs for episodic training.
Precomputes all (support, test) pairs at construction. Allows to reduce
overhead from computation.
"""
def __init__(self, dataset, n_pos, n_neg, n_test, n_episodes_per_task):
"""
Parameters
----------
dataset: dc.data.Dataset
Holds dataset from which support sets will be sampled.
n_pos: int
Number of positive samples
n_neg: int
Number of negative samples.
n_test: int
Number of samples in test set.
n_episodes_per_task: int
Number of (support, task) pairs to sample per task.
replace: bool
Whether to use sampling with or without replacement.
"""
time_start = time.time()
self.tasks = range(len(dataset.get_task_names()))
self.n_tasks = len(self.tasks)
self.n_episodes_per_task = n_episodes_per_task
self.dataset = dataset
self.n_pos = n_pos
self.n_neg = n_neg
self.task_episodes = {}
for task in range(self.n_tasks):
task_supports = get_task_support(self.dataset, n_episodes_per_task,
n_pos, n_neg, task)
task_tests = get_task_test(self.dataset, n_episodes_per_task,
n_test, task)
self.task_episodes[task] = (task_supports, task_tests)
# Init the iterator
self.perm_tasks = np.random.permutation(self.tasks)
# Set initial iterator state
self.task_num = 0
self.trial_num = 0
time_end = time.time()
logger.info("Constructing EpisodeGenerator took %s seconds" %
str(time_end - time_start))
def __iter__(self):
return self
def next(self):
"""Sample next (support, test) pair.
Return from internal storage.
"""
if self.trial_num == self.n_episodes_per_task:
raise StopIteration
else:
task = self.perm_tasks[self.task_num] # Get id from permutation
# support = self.supports[task][self.trial_num]
task_supports, task_tests = self.task_episodes[task]
support, test = (task_supports[self.trial_num],
task_tests[self.trial_num])
# Increment and update logic
self.task_num += 1
if self.task_num == self.n_tasks:
self.task_num = 0 # Reset
self.perm_tasks = np.random.permutation(
self.tasks) # Permute again
self.trial_num += 1 # Upgrade trial index
return (task, support, test)
__next__ = next # Python 3.X compatibility
class SupportGenerator(object):
"""Generate support sets from a dataset.
Iterates over tasks and trials. For each trial, picks one support from
each task, and returns in a randomized order
"""
def __init__(self, dataset, n_pos, n_neg, n_trials):
"""
Parameters
----------
dataset: dc.data.Dataset
Holds dataset from which support sets will be sampled.
n_pos: int
Number of positive samples
n_neg: int
Number of negative samples.
n_trials: int
Number of passes over dataset to make. In total, n_tasks*n_trials
support sets will be sampled by algorithm.
"""
self.tasks = range(len(dataset.get_task_names()))
self.n_tasks = len(self.tasks)
self.n_trials = n_trials
self.dataset = dataset
self.n_pos = n_pos
self.n_neg = n_neg
# Init the iterator
self.perm_tasks = np.random.permutation(self.tasks)
# Set initial iterator state
self.task_num = 0
self.trial_num = 0
def __iter__(self):
return self
def next(self):
"""Sample next support.
Supports are sampled from the tasks in a random order. Each support is
drawn entirely from within one task.
"""
if self.trial_num == self.n_trials:
raise StopIteration
else:
task = self.perm_tasks[self.task_num] # Get id from permutation
# support = self.supports[task][self.trial_num]
support = get_single_task_support(self.dataset,
n_pos=self.n_pos,
n_neg=self.n_neg,
task=task,
replace=False)
# Increment and update logic
self.task_num += 1
if self.task_num == self.n_tasks:
self.task_num = 0 # Reset
self.perm_tasks = np.random.permutation(
self.tasks) # Permute again
self.trial_num += 1 # Upgrade trial index
return (task, support)
__next__ = next # Python 3.X compatibility
<file_sep>import deepchem as dc
import numpy as np
def test_inmemory_features():
smiles = ["C", "CC", "CCC", "CCCC"]
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
dataset = loader.create_dataset(smiles, shard_size=2)
assert len(dataset) == 4
assert dataset.X.shape == (4, 1024)
assert dataset.get_number_shards() == 2
assert (dataset.ids == np.arange(4)).all()
def test_inmemory_features_and_labels():
smiles = ["C", "CC", "CCC", "CCCC"]
labels = [1, 0, 1, 0]
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
dataset = loader.create_dataset(zip(smiles, labels), shard_size=2)
assert len(dataset) == 4
assert dataset.X.shape == (4, 1024)
assert (dataset.y == np.array(labels)).all()
assert dataset.get_number_shards() == 2
assert (dataset.ids == np.arange(4)).all()
def test_inmemory_features_and_labels_and_weights():
smiles = ["C", "CC", "CCC", "CCCC"]
labels = [1, 0, 1, 0]
weights = [1.5, 1.5, 1, 1]
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
dataset = loader.create_dataset(zip(smiles, labels, weights), shard_size=2)
assert len(dataset) == 4
assert dataset.X.shape == (4, 1024)
assert (dataset.y == np.array(labels)).all()
assert (dataset.w == np.array(weights)).all()
assert (dataset.ids == np.arange(4)).all()
assert dataset.get_number_shards() == 2
def test_inmemory_features_and_labels_and_weights_and_ids():
smiles = ["C", "CC", "CCC", "CCCC"]
labels = [1, 0, 1, 0]
weights = [1.5, 1.5, 1, 1]
ids = smiles
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
dataset = loader.create_dataset(zip(smiles, labels, weights, ids),
shard_size=2)
assert len(dataset) == 4
assert dataset.X.shape == (4, 1024)
assert (dataset.y == np.array(labels)).all()
assert (dataset.w == np.array(weights)).all()
assert (dataset.ids == np.array(ids)).all()
assert dataset.get_number_shards() == 2
<file_sep>"""Interface for reinforcement learning."""
try:
from deepchem.rl.a2c import A2C # noqa: F401
from deepchem.rl.ppo import PPO # noqa: F401
except ModuleNotFoundError:
pass
class Environment(object):
"""An environment in which an actor performs actions to accomplish a task.
An environment has a current state, which is represented as either a single NumPy
array, or optionally a list of NumPy arrays. When an action is taken, that causes
the state to be updated. The environment also computes a reward for each action,
and reports when the task has been terminated (meaning that no more actions may
be taken).
Two types of actions are supported. For environments with discrete action spaces,
the action is an integer specifying the index of the action to perform (out of a
fixed list of possible actions). For environments with continuous action spaces,
the action is a NumPy array.
Environment objects should be written to support pickle and deepcopy operations.
Many algorithms involve creating multiple copies of the Environment, possibly
running in different processes or even on different computers.
"""
def __init__(self,
state_shape,
n_actions=None,
state_dtype=None,
action_shape=None):
"""Subclasses should call the superclass constructor in addition to doing their own initialization.
A value should be provided for either n_actions (for discrete action spaces)
or action_shape (for continuous action spaces), but not both.
Parameters
----------
state_shape: tuple or list of tuples
the shape(s) of the array(s) making up the state
n_actions: int
the number of discrete actions that can be performed. If the action space
is continuous, this should be None.
state_dtype: dtype or list of dtypes
the type(s) of the array(s) making up the state. If this is None, all
arrays are assumed to be float32.
action_shape: tuple
the shape of the array describing an action. If the action space
is discrete, this should be none.
"""
self._state_shape = state_shape
self._n_actions = n_actions
self._action_shape = action_shape
self._state = None
self._terminated = None
if state_dtype is None:
# Assume all arrays are float32.
import numpy
try:
from collections.abc import Sequence as SequenceCollection
except:
from collections import Sequence as SequenceCollection
if isinstance(state_shape[0], SequenceCollection):
self._state_dtype = [numpy.float32] * len(state_shape)
else:
self._state_dtype = numpy.float32
else:
self._state_dtype = state_dtype
@property
def state(self):
"""The current state of the environment, represented as either a NumPy array or list of arrays.
If reset() has not yet been called at least once, this is undefined.
"""
return self._state
@property
def terminated(self):
"""Whether the task has reached its end.
If reset() has not yet been called at least once, this is undefined.
"""
return self._terminated
@property
def state_shape(self):
"""The shape of the arrays that describe a state.
If the state is a single array, this returns a tuple giving the shape of that array.
If the state is a list of arrays, this returns a list of tuples where each tuple is
the shape of one array.
"""
return self._state_shape
@property
def state_dtype(self):
"""The dtypes of the arrays that describe a state.
If the state is a single array, this returns the dtype of that array. If the state
is a list of arrays, this returns a list containing the dtypes of the arrays.
"""
return self._state_dtype
@property
def n_actions(self):
"""The number of possible actions that can be performed in this Environment.
If the environment uses a continuous action space, this returns None.
"""
return self._n_actions
@property
def action_shape(self):
"""The expected shape of NumPy arrays representing actions.
If the environment uses a discrete action space, this returns None.
"""
return self._action_shape
def reset(self):
"""Initialize the environment in preparation for doing calculations with it.
This must be called before calling step() or querying the state. You can call it
again later to reset the environment back to its original state.
"""
raise NotImplementedError("Subclasses must implement this")
def step(self, action):
"""Take a time step by performing an action.
This causes the "state" and "terminated" properties to be updated.
Parameters
----------
action: object
an object describing the action to take
Returns
-------
the reward earned by taking the action, represented as a floating point number
(higher values are better)
"""
raise NotImplementedError("Subclasses must implement this")
class GymEnvironment(Environment):
"""This is a convenience class for working with environments from OpenAI Gym."""
def __init__(self, name):
"""Create an Environment wrapping the OpenAI Gym environment with a specified name."""
import gym
self.env = gym.make(name)
self.name = name
space = self.env.action_space
if 'n' in dir(space):
super(GymEnvironment,
self).__init__(self.env.observation_space.shape, space.n)
else:
super(GymEnvironment,
self).__init__(self.env.observation_space.shape,
action_shape=space.shape)
def reset(self):
self._state = self.env.reset()
self._terminated = False
def step(self, action):
self._state, reward, self._terminated, info = self.env.step(action)
return reward
def __deepcopy__(self, memo):
return GymEnvironment(self.name)
class Policy(object):
"""A policy for taking actions within an environment.
A policy is defined by a tf.keras.Model that takes the current state as input
and performs the necessary calculations. There are many algorithms for
reinforcement learning, and they differ in what values they require a policy to
compute. That makes it impossible to define a single interface allowing any
policy to be optimized with any algorithm. Instead, this interface just tries
to be as flexible and generic as possible. Each algorithm must document what
values it expects the model to output.
Special handling is needed for models that include recurrent layers. In that
case, the model has its own internal state which the learning algorithm must
be able to specify and query. To support this, the Policy must do three things:
1. The Model must take additional inputs that specify the initial states of
all its recurrent layers. These will be appended to the list of arrays
specifying the environment state.
2. The Model must also return the final states of all its recurrent layers as
outputs.
3. The constructor argument rnn_initial_states must be specified to define
the states to use for the Model's recurrent layers at the start of a new
rollout.
Policy objects should be written to support pickling. Many algorithms involve
creating multiple copies of the Policy, possibly running in different processes
or even on different computers.
"""
def __init__(self, output_names, rnn_initial_states=[]):
"""Subclasses should call the superclass constructor in addition to doing
their own initialization.
Parameters
----------
output_names: list of strings
the names of the Model's outputs, in order. It is up to each reinforcement
learning algorithm to document what outputs it expects policies to compute.
Outputs that return the final states of recurrent layers should have the
name 'rnn_state'.
rnn_initial_states: list of NumPy arrays
the initial states of the Model's recurrent layers at the start of a new
rollout
"""
self.output_names = output_names
self.rnn_initial_states = rnn_initial_states
def create_model(self, **kwargs):
"""Construct and return a tf.keras.Model that computes the policy.
The inputs to the model consist of the arrays representing the current state
of the environment, followed by the initial states for all recurrent layers.
Depending on the algorithm being used, other inputs might get passed as
well. It is up to each algorithm to document that.
"""
raise NotImplementedError("Subclasses must implement this")
<file_sep>import pytest
import tempfile
from flaky import flaky
import numpy as np
import deepchem as dc
from deepchem.feat import MolGraphConvFeaturizer
from deepchem.models.tests.test_graph_models import get_dataset
try:
import dgl # noqa: F401
import dgllife # noqa: F401
import torch # noqa: F401
from deepchem.models import GCNModel
has_torch_and_dgl = True
except:
has_torch_and_dgl = False
@pytest.mark.torch
def test_gcn_regression():
# load datasets
featurizer = MolGraphConvFeaturizer()
tasks, dataset, transformers, metric = get_dataset('regression',
featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model = GCNModel(mode='regression',
n_tasks=n_tasks,
number_atom_features=30,
batch_size=10,
learning_rate=0.001)
# overfit test
model.fit(dataset, nb_epoch=300)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean_absolute_error'] < 0.5
# test on a small MoleculeNet dataset
from deepchem.molnet import load_delaney
tasks, all_dataset, transformers = load_delaney(featurizer=featurizer)
train_set, _, _ = all_dataset
model = dc.models.GCNModel(n_tasks=len(tasks),
graph_conv_layers=[2],
residual=False,
predictor_hidden_feats=2)
model.fit(train_set, nb_epoch=1)
@flaky
@pytest.mark.torch
def test_gcn_classification():
# load datasets
featurizer = MolGraphConvFeaturizer()
tasks, dataset, transformers, metric = get_dataset('classification',
featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model = GCNModel(mode='classification',
n_tasks=n_tasks,
number_atom_features=30,
batch_size=10,
learning_rate=0.0003)
# overfit test
model.fit(dataset, nb_epoch=70)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.85
# test on a small MoleculeNet dataset
from deepchem.molnet import load_bace_classification
tasks, all_dataset, transformers = load_bace_classification(
featurizer=featurizer)
train_set, _, _ = all_dataset
model = dc.models.GCNModel(mode='classification',
n_tasks=len(tasks),
graph_conv_layers=[2],
residual=False,
predictor_hidden_feats=2)
model.fit(train_set, nb_epoch=1)
@flaky
@pytest.mark.torch
def test_gcn_reload():
# load datasets
featurizer = MolGraphConvFeaturizer()
tasks, dataset, transformers, metric = get_dataset('classification',
featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model_dir = tempfile.mkdtemp()
model = GCNModel(mode='classification',
n_tasks=n_tasks,
number_atom_features=30,
model_dir=model_dir,
batch_size=10,
learning_rate=0.0003)
model.fit(dataset, nb_epoch=70)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.85
reloaded_model = GCNModel(mode='classification',
n_tasks=n_tasks,
number_atom_features=30,
model_dir=model_dir,
batch_size=10,
learning_rate=0.0003)
reloaded_model.restore()
pred_mols = ["CCCC", "CCCCCO", "CCCCC"]
X_pred = featurizer(pred_mols)
random_dataset = dc.data.NumpyDataset(X_pred)
original_pred = model.predict(random_dataset)
reload_pred = reloaded_model.predict(random_dataset)
assert np.all(original_pred == reload_pred)
<file_sep># Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import inspect
sys.path.insert(0, os.path.abspath('../..'))
import sphinx_rtd_theme # noqa
import deepchem # noqa
# -- Project information -----------------------------------------------------
project = 'deepchem'
copyright = '2022, deepchem-contributors'
author = 'deepchem-contributors'
# The full version, including alpha/beta/rc tags
version = deepchem.__version__
release = deepchem.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosectionlabel',
'sphinx_copybutton',
]
# Options for autodoc directives
autodoc_default_options = {
'member-order':
'bysource',
'special-members':
True,
'exclude-members':
'__repr__, __str__, __weakref__, __hash__, __eq__, __call__, __dict__',
}
# How to represents typehints
autodoc_typehints = "signature"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# autosectionlabel setting
autosectionlabel_prefix_document = True
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# Customize the sphinx theme
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
}
copybutton_remove_prompts = True
# -- Source code links ---------------------------------------------------
# Resolve function for the linkcode extension.
def linkcode_resolve(domain, info):
def find_source():
# try to find the file and line number, based on code from numpy:
# https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286
obj = sys.modules[info['module']]
for part in info['fullname'].split('.'):
obj = getattr(obj, part)
fn = inspect.getsourcefile(obj)
fn = os.path.relpath(fn, start=os.path.dirname(deepchem.__file__))
source, lineno = inspect.getsourcelines(obj)
return fn, lineno, lineno + len(source) - 1
if domain != 'py' or not info['module']:
return None
try:
filename = 'deepchem/%s#L%d-L%d' % find_source()
except Exception:
filename = info['module'].replace('.', '/') + '.py'
tag = 'master' if 'dev' in release else release
return "https://github.com/deepchem/deepchem/blob/%s/%s" % (tag, filename)
<file_sep>"""
Train low-data Tox21 models with graph-convolution. Test last fold only.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from datasets import load_tox21_convmol
# 4-fold splits
K = 4
# num positive/negative ligands
n_pos = 10
n_neg = 10
n_trials = 20
tox21_tasks, dataset, transformers = load_tox21_convmol()
# Define metric
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode="classification")
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
train_folds = fold_datasets[:-1]
train_dataset = dc.splits.merge_fold_datasets(train_folds)
test_dataset = fold_datasets[-1]
# Get supports on test-set
support_generator = dc.data.SupportGenerator(test_dataset, n_pos, n_neg,
n_trials)
# Compute accuracies
task_scores = {task: [] for task in range(len(test_dataset.get_task_names()))}
for trial_num, (task, support) in enumerate(support_generator):
print("Starting trial %d" % trial_num)
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 50
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(128, 64, activation='relu'))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(64, 128, activation='relu'))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.Dense(128, 64, activation='tanh'))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphClassifier(
graph_model,
1,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(support, nb_epoch=10)
# Test model
task_dataset = dc.data.get_task_dataset_minus_support(test_dataset, support,
task)
y_pred = model.predict(task_dataset)
score = metric.compute_metric(task_dataset.y, y_pred, task_dataset.w)
print("Score on task %s is %s" % (str(task), str(score)))
task_scores[task].append(score)
# Join information for all tasks.
mean_task_scores = {}
std_task_scores = {}
for task in range(len(test_dataset.get_task_names())):
mean_task_scores[task] = np.mean(np.array(task_scores[task]))
std_task_scores[task] = np.std(np.array(task_scores[task]))
print("Mean scores")
print(mean_task_scores)
print("Standard Deviations")
print(std_task_scores)
print("Median of Mean Scores")
print(np.median(np.array(mean_task_scores.values())))
<file_sep>echo "Pulling qm8 dataset from deepchem"
wget http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/gdb8.tar.gz
echo "Extracting qm8 structures"
tar -zxvf gdb8.tar.gz
<file_sep>import pytest
try:
import torch
except ModuleNotFoundError:
pass
@pytest.mark.torch
def testGroverReadout():
from deepchem.models.torch_models.readout import GroverReadout
n_nodes, n_features = 6, 32
readout_mean = GroverReadout(rtype="mean")
# testing a simple scenario where each embedding corresponds to an unique graph
embedding = torch.ones(n_nodes, n_features)
scope = [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1)]
readout = readout_mean(embedding, scope)
assert readout.shape == (n_nodes, n_features)
assert (readout == torch.ones(n_nodes, n_features)).all().tolist()
# here embeddings 0, 1 belong to a scope, 2, 3 to another scope and 4, 5 to another scope
# thus, we sill have 3 graphs
n_graphs = n_nodes // 2
scope = [(0, 2), (2, 2), (4, 2)]
embedding[torch.tensor([0, 2, 4])] = torch.zeros_like(
embedding[torch.tensor([0, 2, 4])])
readout = readout_mean(embedding, scope)
assert readout.shape == (n_graphs, n_features)
assert (readout == torch.ones(n_graphs, n_features) / 2).all().tolist()
attn_out = 8
readout_attn = GroverReadout(rtype="self_attention",
in_features=n_features,
attn_hidden_size=32,
attn_out_size=attn_out)
readout = readout_attn(embedding, scope)
assert readout.shape == (n_graphs, attn_out * n_features)
<file_sep>from typing import Callable, List, Union
import dgl
import dgl.function as fn
import torch
from torch import nn
from torch.nn import functional as F
from deepchem.models.losses import NTXentMultiplePositives
from deepchem.models.torch_models import ModularTorchModel
from deepchem.models.torch_models.layers import MultilayerPerceptron
from deepchem.models.torch_models.pna_gnn import PNA, AtomEncoder
from deepchem.utils.graph_utils import fourier_encode_dist
class Net3DLayer(nn.Module):
"""
Net3DLayer is a single layer of a 3D graph neural network based on the 3D Infomax architecture [1].
This class expects a DGL graph with node features stored under the name 'feat' and edge features stored under the name 'd' (representing 3D distances). The edge features are updated by the message network and the node features are updated by the update network.
Parameters
----------
edge_dim : int
The dimension of the edge features.
hidden_dim : int
The dimension of the hidden layers.
reduce_func : str
The reduce function to use for aggregating messages. Can be either 'sum' or 'mean'.
batch_norm : bool, optional (default=False)
Whether to use batch normalization.
batch_norm_momentum : float, optional (default=0.1)
The momentum for the batch normalization layers.
dropout : float, optional (default=0.0)
The dropout rate for the layers.
mid_activation : str, optional (default='SiLU')
The activation function to use in the network.
message_net_layers : int, optional (default=2)
The number of message network layers.
update_net_layers : int, optional (default=2)
The number of update network layers.
References
----------
.. [1] <NAME>. et al. 3D Infomax improves GNNs for Molecular Property Prediction. Preprint at https://doi.org/10.48550/arXiv.2110.04126 (2022).
Examples
--------
>>> net3d_layer = Net3DLayer(edge_dim=3, hidden_dim=3)
>>> graph = dgl.graph(([0, 1], [1, 2]))
>>> graph.ndata['feat'] = torch.tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> graph.edata['d'] = torch.tensor([[0.5, 0.6, 0.7], [0.8, 0.9, 1.0]])
>>> output = net3d_layer(graph)
"""
def __init__(self,
edge_dim: int,
hidden_dim: int,
reduce_func: str = 'sum',
batch_norm: bool = False,
batch_norm_momentum: float = 0.1,
dropout: float = 0.0,
message_net_layers: int = 2,
update_net_layers: int = 2):
super(Net3DLayer, self).__init__()
self.message_network = nn.Sequential(
MultilayerPerceptron(d_input=hidden_dim * 2 + edge_dim,
d_output=hidden_dim,
d_hidden=(hidden_dim,) *
(message_net_layers - 1),
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum,
dropout=dropout), torch.nn.SiLU())
if reduce_func == 'sum':
self.reduce_func = fn.sum
elif reduce_func == 'mean':
self.reduce_func = fn.mean
else:
raise ValueError('reduce function not supported: ', reduce_func)
self.update_network = MultilayerPerceptron(
d_input=hidden_dim,
d_hidden=(hidden_dim,) * (update_net_layers - 1),
d_output=hidden_dim,
batch_norm=True,
batch_norm_momentum=batch_norm_momentum)
self.soft_edge_network = nn.Linear(hidden_dim, 1)
def forward(self, input_graph: dgl.DGLGraph):
"""Perform a forward pass on the given graph.
Parameters
----------
input_graph : dgl.DGLGraph
The graph to perform the forward pass on.
Returns
-------
dgl.DGLGraph
The updated graph after the forward pass.
"""
# copy the input graph to avoid in-place operations
graph = input_graph.local_var()
graph.ndata['feat'] = input_graph.ndata['feat'].clone()
graph.edata['d'] = input_graph.edata['d'].clone()
graph.update_all(message_func=self.message_function,
reduce_func=self.reduce_func(msg='m', out='m_sum'),
apply_node_func=self.update_function)
return graph
def message_function(self, edges):
"""Computes the message and edge weight for a given set of edges.
Parameters
----------
edges : dgl.EdgeBatch
A dgl.EdgeBatch object containing the edges information (data, batch size, etc.).
Returns
-------
dict
A dictionary containing the message multiplied by the edge weight.
"""
message_input = torch.cat(
[edges.src['feat'], edges.dst['feat'], edges.data['d']], dim=-1)
message = self.message_network(message_input)
edges.data['d'] += message
edge_weight = torch.sigmoid(self.soft_edge_network(message))
return {'m': message * edge_weight}
def update_function(self, nodes):
"""
Update function for updating node features based on the aggregated messages.
This function is used in the forward method to perform a forward pass on the graph.
Parameters
----------
nodes : dgl.NodeBatch
A node batch object containing the nodes information (data, batch size, etc.).
Returns
-------
dict
A dictionary containing the updated features.
"""
h = nodes.data['feat']
input = torch.cat([nodes.data['m_sum'] + nodes.data['feat']], dim=-1)
h_new = self.update_network(input)
output = h_new + h
return {'feat': output}
class Net3D(nn.Module):
"""
Net3D is a 3D graph neural network that expects a DGL graph input with 3D coordinates stored under the name 'd' and node features stored under the name 'feat'. It is based on the 3D Infomax architecture [1].
Parameters
----------
hidden_dim : int
The dimension of the hidden layers.
target_dim : int
The dimension of the output layer.
readout_aggregators : List[str]
A list of aggregator functions for the readout layer. Options are 'sum', 'max', 'min', 'mean'.
batch_norm : bool, optional (default=False)
Whether to use batch normalization.
node_wise_output_layers : int, optional (default=2)
The number of output layers for each node.
readout_batchnorm : bool, optional (default=True)
Whether to use batch normalization in the readout layer.
batch_norm_momentum : float, optional (default=0.1)
The momentum for the batch normalization layers.
reduce_func : str, optional (default='sum')
The reduce function to use for aggregating messages.
dropout : float, optional (default=0.0)
The dropout rate for the layers.
propagation_depth : int, optional (default=4)
The number of propagation layers in the network.
readout_layers : int, optional (default=2)
The number of readout layers in the network.
readout_hidden_dim : int, optional (default=None)
The dimension of the hidden layers in the readout network.
fourier_encodings : int, optional (default=0)
The number of Fourier encodings to use.
activation : str, optional (default='SiLU')
The activation function to use in the network.
update_net_layers : int, optional (default=2)
The number of update network layers.
message_net_layers : int, optional (default=2)
The number of message network layers.
use_node_features : bool, optional (default=False)
Whether to use node features as input.
Examples
--------
>>> from deepchem.feat.molecule_featurizers.conformer_featurizer import RDKitConformerFeaturizer
>>> from deepchem.models.torch_models.gnn3d import Net3D
>>> smiles = ["C[C@H](F)Cl", "C[C@@H](F)Cl"]
>>> featurizer = RDKitConformerFeaturizer(num_conformers=2)
>>> data = featurizer.featurize(smiles)
>>> dgldata = [[graph.to_dgl_graph() for graph in conf] for conf in data]
>>> net3d = Net3D(hidden_dim=3, target_dim=2, readout_aggregators=['sum', 'mean'])
>>> output = [[net3d(graph) for graph in conf] for conf in dgldata]
References
----------
.. [1] <NAME>. et al. 3D Infomax improves GNNs for Molecular Property Prediction. Preprint at https://doi.org/10.48550/arXiv.2110.04126 (2022).
"""
def __init__(self,
hidden_dim,
target_dim,
readout_aggregators: List[str],
node_wise_output_layers=2,
batch_norm=True,
batch_norm_momentum=0.1,
reduce_func='sum',
dropout=0.0,
propagation_depth: int = 4,
readout_layers: int = 2,
readout_hidden_dim=None,
fourier_encodings=4,
update_net_layers=2,
message_net_layers=2,
use_node_features=False):
super(Net3D, self).__init__()
self.fourier_encodings = fourier_encodings
edge_in_dim = 1 if fourier_encodings == 0 else 2 * fourier_encodings + 1
self.edge_input = nn.Sequential(
MultilayerPerceptron(d_input=edge_in_dim,
d_output=hidden_dim,
d_hidden=(hidden_dim,),
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum),
torch.nn.SiLU())
self.use_node_features = use_node_features
if self.use_node_features:
self.atom_encoder = AtomEncoder(hidden_dim)
else:
self.node_embedding = nn.Parameter(torch.empty((hidden_dim,)))
nn.init.normal_(self.node_embedding)
self.mp_layers = nn.ModuleList()
for _ in range(propagation_depth):
self.mp_layers.append(
Net3DLayer(edge_dim=hidden_dim,
hidden_dim=hidden_dim,
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum,
dropout=dropout,
reduce_func=reduce_func,
message_net_layers=message_net_layers,
update_net_layers=update_net_layers))
self.node_wise_output_layers = node_wise_output_layers
if self.node_wise_output_layers > 0:
self.node_wise_output_network = MultilayerPerceptron(
d_input=hidden_dim,
d_output=hidden_dim,
d_hidden=(hidden_dim,),
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum)
if readout_hidden_dim is None:
readout_hidden_dim = hidden_dim
self.readout_aggregators = readout_aggregators
self.output = MultilayerPerceptron(
d_input=hidden_dim * len(self.readout_aggregators),
d_output=target_dim,
d_hidden=(readout_hidden_dim,) *
(readout_layers -
1), # -1 because the input layer is not considered a hidden layer
batch_norm=False)
def forward(self, graph: dgl.DGLGraph):
"""
Forward pass of the Net3D model.
Parameters
----------
graph : dgl.DGLGraph
The input graph with node features stored under the key 'x' and edge distances stored under the key 'd'.
Returns
-------
torch.Tensor
The graph representation tensor of shape (1, target_dim).
"""
if self.use_node_features:
graph.ndata['feat'] = self.atom_encoder(graph.ndata['x'])
else:
graph.ndata['feat'] = self.node_embedding[None, :].expand(
graph.number_of_nodes(), -1)
if self.fourier_encodings > 0:
graph.edata['d'] = fourier_encode_dist(
graph.edata['d'], num_encodings=self.fourier_encodings)
graph.apply_edges(self.input_edge_func)
for mp_layer in self.mp_layers:
graph = mp_layer(graph)
if self.node_wise_output_layers > 0:
graph.apply_nodes(self.output_node_func)
readouts_to_cat = [
dgl.readout_nodes(graph, 'feat', op=aggr)
for aggr in self.readout_aggregators
]
readout = torch.cat(readouts_to_cat, dim=-1)
return self.output(readout.squeeze())
def output_node_func(self, nodes):
"""
Apply the node-wise output network to the node features.
Parameters
----------
nodes : dgl.NodeBatch
A batch of nodes with features stored under the key 'feat'.
Returns
-------
dict
A dictionary with the updated node features under the key 'feat'.
"""
return {'feat': self.node_wise_output_network(nodes.data['feat'])}
def input_edge_func(self, edges):
"""
Apply the edge input network to the edge features.
Parameters
----------
edges : dgl.EdgeBatch
A batch of edges with distances stored under the key 'd'.
Returns
-------
dict
A dictionary with the updated edge features under the key 'd'.
"""
return {'d': F.silu(self.edge_input(edges.data['d']))}
class InfoMax3DModular(ModularTorchModel):
"""
InfoMax3DModular is a modular torch model that uses a 2D PNA model and a 3D Net3D model to maximize the mutual information between their representations. The 2D model can then be used for downstream tasks without the need for 3D coordinates. This is based off the work in [1].
This class expects data in featurized by the RDKitConformerFeaturizer. This featurizer produces features of the type Array[Array[List[GraphData]]].
The outermost array is the dataset array, the second array is the molecule, the list contains the conformers for that molecule and the GraphData object is the featurized graph for that conformer with node_pos_features holding the 3D coordinates.
If you are not using RDKitConformerFeaturizer, your input data features should look like this: Dataset[Molecule[Conformers[GraphData]]].
Parameters
----------
hidden_dim : int
The dimension of the hidden layers.
target_dim : int
The dimension of the output layer.
aggregators : List[str]
A list of aggregator functions for the PNA model. Options are 'mean', 'sum', 'min', 'max', 'std', 'var', 'moment3', 'moment4', 'moment5'.
readout_aggregators : List[str]
A list of aggregator functions for the readout layer. Options are 'sum', 'max', 'min', 'mean'.
scalers : List[str]
A list of scaler functions for the PNA model. Options are 'identity', 'amplification', 'attenuation'.
residual : bool, optional (default=True)
Whether to use residual connections in the PNA model.
node_wise_output_layers : int, optional (default=2)
The number of output layers for each node in the Net3D model.
pairwise_distances : bool, optional (default=False)
Whether to use pairwise distances in the PNA model.
activation : Union[Callable, str], optional (default="relu")
The activation function to use in the PNA model.
reduce_func : str, optional (default='sum')
The reduce function to use for aggregating messages in the Net3D model.
batch_norm : bool, optional (default=True)
Whether to use batch normalization in the PNA model.
batch_norm_momentum : float, optional (default=0.1)
The momentum for the batch normalization layers.
propagation_depth : int, optional (default=5)
The number of propagation layers in the PNA and Net3D models.
dropout : float, optional (default=0.0)
The dropout rate for the layers in the PNA and Net3D models.
readout_layers : int, optional (default=2)
The number of readout layers in the PNA and Net3D models.
readout_hidden_dim : int, optional (default=None)
The dimension of the hidden layers in the readout network.
fourier_encodings : int, optional (default=4)
The number of Fourier encodings to use in the Net3D model.
update_net_layers : int, optional (default=2)
The number of update network layers in the Net3D model.
message_net_layers : int, optional (default=2)
The number of message network layers in the Net3D model.
use_node_features : bool, optional (default=False)
Whether to use node features as input in the Net3D model.
posttrans_layers : int, optional (default=1)
The number of post-transformation layers in the PNA model.
pretrans_layers : int, optional (default=1)
The number of pre-transformation layers in the PNA model.
kwargs : dict
Additional keyword arguments.
References
----------
.. [1] <NAME>. 3D Infomax improves GNNs for Molecular Property Prediction. Preprint at https://doi.org/10.48550/arXiv.2110.04126 (2022).
Examples
--------
>>> from deepchem.feat.graph_data import BatchGraphData
>>> from deepchem.feat.molecule_featurizers.conformer_featurizer import RDKitConformerFeaturizer
>>> from deepchem.models.torch_models.gnn3d import InfoMax3DModular
>>> import numpy as np
>>> import deepchem as dc
>>> from deepchem.data.datasets import NumpyDataset
>>> smiles = ["C[C@H](F)Cl", "C[C@@H](F)Cl"]
>>> featurizer = RDKitConformerFeaturizer(num_conformers=2)
>>> data = featurizer.featurize(smiles)
>>> dataset = NumpyDataset(X=data)
>>> model = InfoMax3DModular(hidden_dim=64,
... target_dim=10,
... aggregators=['max'],
... readout_aggregators=['mean'],
... scalers=['identity'])
>>> loss = model.fit(dataset, nb_epoch=1)
"""
def __init__(self,
hidden_dim,
target_dim,
aggregators: List[str],
readout_aggregators: List[str],
scalers: List[str],
residual: bool = True,
node_wise_output_layers: int = 2,
pairwise_distances: bool = False,
activation: Union[Callable, str] = "relu",
reduce_func: str = 'sum',
batch_norm: bool = True,
batch_norm_momentum: float = 0.1,
propagation_depth: int = 5,
dropout: float = 0.0,
readout_layers: int = 2,
readout_hidden_dim: int = 1,
fourier_encodings: int = 4,
update_net_layers: int = 2,
message_net_layers: int = 2,
use_node_features: bool = False,
posttrans_layers: int = 1,
pretrans_layers: int = 1,
**kwargs):
self.hidden_dim = hidden_dim
self.target_dim = target_dim
self.aggregators = aggregators
self.readout_aggregators = readout_aggregators
self.scalers = scalers
self.residual = residual
self.node_wise_output_layers = node_wise_output_layers
self.pairwise_distances = pairwise_distances
self.activation = activation
self.reduce_func = reduce_func
self.batch_norm = batch_norm
self.batch_norm_momentum = batch_norm_momentum
self.propagation_depth = propagation_depth
self.dropout = dropout
self.readout_layers = readout_layers
self.readout_hidden_dim = readout_hidden_dim
self.fourier_encodings = fourier_encodings
self.update_net_layers = update_net_layers
self.message_net_layers = message_net_layers
self.use_node_features = use_node_features
self.posttrans_layers = posttrans_layers
self.pretrans_layers = pretrans_layers
self.kwargs = kwargs
self.criterion = NTXentMultiplePositives()._create_pytorch_loss()
self.components = self.build_components()
self.model = self.build_model()
super().__init__(self.model, self.components, **kwargs)
def build_components(self):
"""
Build the components of the InfoMax3DModular model.
Returns
-------
dict
A dictionary containing the '2d' PNA model and the '3d' Net3D model.
"""
return {
'2d':
PNA(hidden_dim=self.hidden_dim,
target_dim=self.target_dim,
aggregators=self.aggregators,
scalers=self.scalers,
readout_aggregators=self.readout_aggregators,
readout_hidden_dim=self.readout_hidden_dim,
readout_layers=self.readout_layers,
residual=self.residual,
pairwise_distances=self.pairwise_distances,
activation=self.activation,
batch_norm=self.batch_norm,
batch_norm_momentum=self.batch_norm_momentum,
propagation_depth=self.propagation_depth,
dropout=self.dropout,
posttrans_layers=self.posttrans_layers,
pretrans_layers=self.pretrans_layers,
**self.kwargs),
'3d':
Net3D(hidden_dim=self.hidden_dim,
target_dim=self.target_dim,
readout_aggregators=self.readout_aggregators,
node_wise_output_layers=self.node_wise_output_layers,
batch_norm=True,
batch_norm_momentum=self.batch_norm_momentum,
reduce_func=self.reduce_func,
dropout=self.dropout,
propagation_depth=self.propagation_depth,
readout_layers=self.readout_layers,
readout_hidden_dim=self.readout_hidden_dim,
fourier_encodings=self.fourier_encodings,
update_net_layers=self.update_net_layers,
message_net_layers=self.message_net_layers,
use_node_features=self.use_node_features),
}
def build_model(self):
"""
Build the InfoMax3DModular model. This is the 2D network which is meant to be used for inference.
Returns
-------
PNA
The 2D PNA model component.
"""
return self.components['2d']
def loss_func(self, inputs, labels, weights):
"""
Compute the loss function for the InfoMax3DModular model.
Parameters
----------
inputs : dgl.DGLGraph
The input graph with node features stored under the key 'x' and edge distances stored under the key 'd'.
labels : torch.Tensor
The ground truth labels.
weights : torch.Tensor
The weights for each sample.
Returns
-------
torch.Tensor
The computed loss value.
"""
encodings2d = []
encodings3d = []
for conformers in inputs:
# 2d model takes only the first conformer
encodings2d.append(self.components['2d'](conformers[0]))
# 3d model takes all conformers
encodings3d.append(
[self.components['3d'](conf) for conf in conformers])
# concat the lists such that the 2d encodings is of shape batch_size x target_dim
# and the 3d encodings is of shape batch_size*num_conformers x target_dim
encodings2d = torch.cat(encodings2d, dim=0)
encodings3d = torch.cat(
[torch.cat(conf, dim=0) for conf in encodings3d], dim=0)
loss = self.criterion(encodings2d, encodings3d)
return loss
def _prepare_batch(self, batch):
"""
Prepare a batch of data for the InfoMax3DModular model.
Parameters
----------
batch : tuple
A tuple containing the inputs, labels, and weights.
Returns
-------
tuple
A tuple containing the prepared batch graph, labels, and weights.
"""
inputs, labels, weights = batch
inputs = inputs[0]
# convert the GraphData objects to DGL graphs
graphs = [[
graph_data.to_dgl_graph().to(self.device) for graph_data in row
] for row in inputs]
return graphs, labels, weights
<file_sep>Model Cheatsheet
----------------
If you're just getting started with DeepChem, you're probably interested in the
basics. The place to get started is this "model cheatsheet" that lists various
types of custom DeepChem models. Note that some wrappers like :code:`SklearnModel`
and :code:`GBDTModel` which wrap external machine learning libraries are excluded,
but this table should otherwise be complete.
As a note about how to read these tables: Each row describes what's needed to
invoke a given model. Some models must be applied with given :code:`Transformer` or
:code:`Featurizer` objects. Most models can be trained calling :code:`model.fit`,
otherwise the name of the fit_method is given in the Comment column.
In order to run the models, make sure that the backend (Keras and tensorflow
or Pytorch or Jax) is installed.
You can thus read off what's needed to train the model from the table below.
**General purpose**
.. csv-table:: General purpose models
:file: ./general_purpose_models.csv
:width: 100%
:header-rows: 1
**Molecules**
Many models implemented in DeepChem were designed for small to medium-sized organic molecules,
most often drug-like compounds.
If your data is very different (e.g. molecules contain 'exotic' elements not present in the original dataset)
or cannot be represented well using SMILES (e.g. metal complexes, crystals), some adaptations to the
featurization and/or model might be needed to get reasonable results.
.. csv-table:: Molecular models
:file: ./molecular_models.csv
:width: 100%
:header-rows: 1
**Materials**
The following models were designed specifically for (inorganic) materials.
.. csv-table:: Material models
:file: ./material_models.csv
:width: 100%
:header-rows: 1
<file_sep>"""
TOXCAST dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
TOXCAST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/toxcast_data.csv.gz"
TOXCAST_TASKS = [
'ACEA_T47D_80hr_Negative', 'ACEA_T47D_80hr_Positive',
'APR_HepG2_CellCycleArrest_24h_dn', 'APR_HepG2_CellCycleArrest_24h_up',
'APR_HepG2_CellCycleArrest_72h_dn', 'APR_HepG2_CellLoss_24h_dn',
'APR_HepG2_CellLoss_72h_dn', 'APR_HepG2_MicrotubuleCSK_24h_dn',
'APR_HepG2_MicrotubuleCSK_24h_up', 'APR_HepG2_MicrotubuleCSK_72h_dn',
'APR_HepG2_MicrotubuleCSK_72h_up', 'APR_HepG2_MitoMass_24h_dn',
'APR_HepG2_MitoMass_24h_up', 'APR_HepG2_MitoMass_72h_dn',
'APR_HepG2_MitoMass_72h_up', 'APR_HepG2_MitoMembPot_1h_dn',
'APR_HepG2_MitoMembPot_24h_dn', 'APR_HepG2_MitoMembPot_72h_dn',
'APR_HepG2_MitoticArrest_24h_up', 'APR_HepG2_MitoticArrest_72h_up',
'APR_HepG2_NuclearSize_24h_dn', 'APR_HepG2_NuclearSize_72h_dn',
'APR_HepG2_NuclearSize_72h_up', 'APR_HepG2_OxidativeStress_24h_up',
'APR_HepG2_OxidativeStress_72h_up', 'APR_HepG2_StressKinase_1h_up',
'APR_HepG2_StressKinase_24h_up', 'APR_HepG2_StressKinase_72h_up',
'APR_HepG2_p53Act_24h_up', 'APR_HepG2_p53Act_72h_up',
'APR_Hepat_Apoptosis_24hr_up', 'APR_Hepat_Apoptosis_48hr_up',
'APR_Hepat_CellLoss_24hr_dn', 'APR_Hepat_CellLoss_48hr_dn',
'APR_Hepat_DNADamage_24hr_up', 'APR_Hepat_DNADamage_48hr_up',
'APR_Hepat_DNATexture_24hr_up', 'APR_Hepat_DNATexture_48hr_up',
'APR_Hepat_MitoFxnI_1hr_dn', 'APR_Hepat_MitoFxnI_24hr_dn',
'APR_Hepat_MitoFxnI_48hr_dn', 'APR_Hepat_NuclearSize_24hr_dn',
'APR_Hepat_NuclearSize_48hr_dn', 'APR_Hepat_Steatosis_24hr_up',
'APR_Hepat_Steatosis_48hr_up', 'ATG_AP_1_CIS_dn', 'ATG_AP_1_CIS_up',
'ATG_AP_2_CIS_dn', 'ATG_AP_2_CIS_up', 'ATG_AR_TRANS_dn', 'ATG_AR_TRANS_up',
'ATG_Ahr_CIS_dn', 'ATG_Ahr_CIS_up', 'ATG_BRE_CIS_dn', 'ATG_BRE_CIS_up',
'ATG_CAR_TRANS_dn', 'ATG_CAR_TRANS_up', 'ATG_CMV_CIS_dn', 'ATG_CMV_CIS_up',
'ATG_CRE_CIS_dn', 'ATG_CRE_CIS_up', 'ATG_C_EBP_CIS_dn', 'ATG_C_EBP_CIS_up',
'ATG_DR4_LXR_CIS_dn', 'ATG_DR4_LXR_CIS_up', 'ATG_DR5_CIS_dn',
'ATG_DR5_CIS_up', 'ATG_E2F_CIS_dn', 'ATG_E2F_CIS_up', 'ATG_EGR_CIS_up',
'ATG_ERE_CIS_dn', 'ATG_ERE_CIS_up', 'ATG_ERRa_TRANS_dn',
'ATG_ERRg_TRANS_dn', 'ATG_ERRg_TRANS_up', 'ATG_ERa_TRANS_up',
'ATG_E_Box_CIS_dn', 'ATG_E_Box_CIS_up', 'ATG_Ets_CIS_dn', 'ATG_Ets_CIS_up',
'ATG_FXR_TRANS_up', 'ATG_FoxA2_CIS_dn', 'ATG_FoxA2_CIS_up',
'ATG_FoxO_CIS_dn', 'ATG_FoxO_CIS_up', 'ATG_GAL4_TRANS_dn',
'ATG_GATA_CIS_dn', 'ATG_GATA_CIS_up', 'ATG_GLI_CIS_dn', 'ATG_GLI_CIS_up',
'ATG_GRE_CIS_dn', 'ATG_GRE_CIS_up', 'ATG_GR_TRANS_dn', 'ATG_GR_TRANS_up',
'ATG_HIF1a_CIS_dn', 'ATG_HIF1a_CIS_up', 'ATG_HNF4a_TRANS_dn',
'ATG_HNF4a_TRANS_up', 'ATG_HNF6_CIS_dn', 'ATG_HNF6_CIS_up',
'ATG_HSE_CIS_dn', 'ATG_HSE_CIS_up', 'ATG_IR1_CIS_dn', 'ATG_IR1_CIS_up',
'ATG_ISRE_CIS_dn', 'ATG_ISRE_CIS_up', 'ATG_LXRa_TRANS_dn',
'ATG_LXRa_TRANS_up', 'ATG_LXRb_TRANS_dn', 'ATG_LXRb_TRANS_up',
'ATG_MRE_CIS_up', 'ATG_M_06_TRANS_up', 'ATG_M_19_CIS_dn',
'ATG_M_19_TRANS_dn', 'ATG_M_19_TRANS_up', 'ATG_M_32_CIS_dn',
'ATG_M_32_CIS_up', 'ATG_M_32_TRANS_dn', 'ATG_M_32_TRANS_up',
'ATG_M_61_TRANS_up', 'ATG_Myb_CIS_dn', 'ATG_Myb_CIS_up', 'ATG_Myc_CIS_dn',
'ATG_Myc_CIS_up', 'ATG_NFI_CIS_dn', 'ATG_NFI_CIS_up', 'ATG_NF_kB_CIS_dn',
'ATG_NF_kB_CIS_up', 'ATG_NRF1_CIS_dn', 'ATG_NRF1_CIS_up',
'ATG_NRF2_ARE_CIS_dn', 'ATG_NRF2_ARE_CIS_up', 'ATG_NURR1_TRANS_dn',
'ATG_NURR1_TRANS_up', 'ATG_Oct_MLP_CIS_dn', 'ATG_Oct_MLP_CIS_up',
'ATG_PBREM_CIS_dn', 'ATG_PBREM_CIS_up', 'ATG_PPARa_TRANS_dn',
'ATG_PPARa_TRANS_up', 'ATG_PPARd_TRANS_up', 'ATG_PPARg_TRANS_up',
'ATG_PPRE_CIS_dn', 'ATG_PPRE_CIS_up', 'ATG_PXRE_CIS_dn', 'ATG_PXRE_CIS_up',
'ATG_PXR_TRANS_dn', 'ATG_PXR_TRANS_up', 'ATG_Pax6_CIS_up',
'ATG_RARa_TRANS_dn', 'ATG_RARa_TRANS_up', 'ATG_RARb_TRANS_dn',
'ATG_RARb_TRANS_up', 'ATG_RARg_TRANS_dn', 'ATG_RARg_TRANS_up',
'ATG_RORE_CIS_dn', 'ATG_RORE_CIS_up', 'ATG_RORb_TRANS_dn',
'ATG_RORg_TRANS_dn', 'ATG_RORg_TRANS_up', 'ATG_RXRa_TRANS_dn',
'ATG_RXRa_TRANS_up', 'ATG_RXRb_TRANS_dn', 'ATG_RXRb_TRANS_up',
'ATG_SREBP_CIS_dn', 'ATG_SREBP_CIS_up', 'ATG_STAT3_CIS_dn',
'ATG_STAT3_CIS_up', 'ATG_Sox_CIS_dn', 'ATG_Sox_CIS_up', 'ATG_Sp1_CIS_dn',
'ATG_Sp1_CIS_up', 'ATG_TAL_CIS_dn', 'ATG_TAL_CIS_up', 'ATG_TA_CIS_dn',
'ATG_TA_CIS_up', 'ATG_TCF_b_cat_CIS_dn', 'ATG_TCF_b_cat_CIS_up',
'ATG_TGFb_CIS_dn', 'ATG_TGFb_CIS_up', 'ATG_THRa1_TRANS_dn',
'ATG_THRa1_TRANS_up', 'ATG_VDRE_CIS_dn', 'ATG_VDRE_CIS_up',
'ATG_VDR_TRANS_dn', 'ATG_VDR_TRANS_up', 'ATG_XTT_Cytotoxicity_up',
'ATG_Xbp1_CIS_dn', 'ATG_Xbp1_CIS_up', 'ATG_p53_CIS_dn', 'ATG_p53_CIS_up',
'BSK_3C_Eselectin_down', 'BSK_3C_HLADR_down', 'BSK_3C_ICAM1_down',
'BSK_3C_IL8_down', 'BSK_3C_MCP1_down', 'BSK_3C_MIG_down',
'BSK_3C_Proliferation_down', 'BSK_3C_SRB_down',
'BSK_3C_Thrombomodulin_down', 'BSK_3C_Thrombomodulin_up',
'BSK_3C_TissueFactor_down', 'BSK_3C_TissueFactor_up', 'BSK_3C_VCAM1_down',
'BSK_3C_Vis_down', 'BSK_3C_uPAR_down', 'BSK_4H_Eotaxin3_down',
'BSK_4H_MCP1_down', 'BSK_4H_Pselectin_down', 'BSK_4H_Pselectin_up',
'BSK_4H_SRB_down', 'BSK_4H_VCAM1_down', 'BSK_4H_VEGFRII_down',
'BSK_4H_uPAR_down', 'BSK_4H_uPAR_up', 'BSK_BE3C_HLADR_down',
'BSK_BE3C_IL1a_down', 'BSK_BE3C_IP10_down', 'BSK_BE3C_MIG_down',
'BSK_BE3C_MMP1_down', 'BSK_BE3C_MMP1_up', 'BSK_BE3C_PAI1_down',
'BSK_BE3C_SRB_down', 'BSK_BE3C_TGFb1_down', 'BSK_BE3C_tPA_down',
'BSK_BE3C_uPAR_down', 'BSK_BE3C_uPAR_up', 'BSK_BE3C_uPA_down',
'BSK_CASM3C_HLADR_down', 'BSK_CASM3C_IL6_down', 'BSK_CASM3C_IL6_up',
'BSK_CASM3C_IL8_down', 'BSK_CASM3C_LDLR_down', 'BSK_CASM3C_LDLR_up',
'BSK_CASM3C_MCP1_down', 'BSK_CASM3C_MCP1_up', 'BSK_CASM3C_MCSF_down',
'BSK_CASM3C_MCSF_up', 'BSK_CASM3C_MIG_down',
'BSK_CASM3C_Proliferation_down', 'BSK_CASM3C_Proliferation_up',
'BSK_CASM3C_SAA_down', 'BSK_CASM3C_SAA_up', 'BSK_CASM3C_SRB_down',
'BSK_CASM3C_Thrombomodulin_down', 'BSK_CASM3C_Thrombomodulin_up',
'BSK_CASM3C_TissueFactor_down', 'BSK_CASM3C_VCAM1_down',
'BSK_CASM3C_VCAM1_up', 'BSK_CASM3C_uPAR_down', 'BSK_CASM3C_uPAR_up',
'BSK_KF3CT_ICAM1_down', 'BSK_KF3CT_IL1a_down', 'BSK_KF3CT_IP10_down',
'BSK_KF3CT_IP10_up', 'BSK_KF3CT_MCP1_down', 'BSK_KF3CT_MCP1_up',
'BSK_KF3CT_MMP9_down', 'BSK_KF3CT_SRB_down', 'BSK_KF3CT_TGFb1_down',
'BSK_KF3CT_TIMP2_down', 'BSK_KF3CT_uPA_down', 'BSK_LPS_CD40_down',
'BSK_LPS_Eselectin_down', 'BSK_LPS_Eselectin_up', 'BSK_LPS_IL1a_down',
'BSK_LPS_IL1a_up', 'BSK_LPS_IL8_down', 'BSK_LPS_IL8_up',
'BSK_LPS_MCP1_down', 'BSK_LPS_MCSF_down', 'BSK_LPS_PGE2_down',
'BSK_LPS_PGE2_up', 'BSK_LPS_SRB_down', 'BSK_LPS_TNFa_down',
'BSK_LPS_TNFa_up', 'BSK_LPS_TissueFactor_down', 'BSK_LPS_TissueFactor_up',
'BSK_LPS_VCAM1_down', 'BSK_SAg_CD38_down', 'BSK_SAg_CD40_down',
'BSK_SAg_CD69_down', 'BSK_SAg_Eselectin_down', 'BSK_SAg_Eselectin_up',
'BSK_SAg_IL8_down', 'BSK_SAg_IL8_up', 'BSK_SAg_MCP1_down',
'BSK_SAg_MIG_down', 'BSK_SAg_PBMCCytotoxicity_down',
'BSK_SAg_PBMCCytotoxicity_up', 'BSK_SAg_Proliferation_down',
'BSK_SAg_SRB_down', 'BSK_hDFCGF_CollagenIII_down', 'BSK_hDFCGF_EGFR_down',
'BSK_hDFCGF_EGFR_up', 'BSK_hDFCGF_IL8_down', 'BSK_hDFCGF_IP10_down',
'BSK_hDFCGF_MCSF_down', 'BSK_hDFCGF_MIG_down', 'BSK_hDFCGF_MMP1_down',
'BSK_hDFCGF_MMP1_up', 'BSK_hDFCGF_PAI1_down',
'BSK_hDFCGF_Proliferation_down', 'BSK_hDFCGF_SRB_down',
'BSK_hDFCGF_TIMP1_down', 'BSK_hDFCGF_VCAM1_down', 'CEETOX_H295R_11DCORT_dn',
'CEETOX_H295R_ANDR_dn', 'CEETOX_H295R_CORTISOL_dn', 'CEETOX_H295R_DOC_dn',
'CEETOX_H295R_DOC_up', 'CEETOX_H295R_ESTRADIOL_dn',
'CEETOX_H295R_ESTRADIOL_up', 'CEETOX_H295R_ESTRONE_dn',
'CEETOX_H295R_ESTRONE_up', 'CEETOX_H295R_OHPREG_up',
'CEETOX_H295R_OHPROG_dn', 'CEETOX_H295R_OHPROG_up', 'CEETOX_H295R_PROG_up',
'CEETOX_H295R_TESTO_dn', 'CLD_ABCB1_48hr', 'CLD_ABCG2_48hr',
'CLD_CYP1A1_24hr', 'CLD_CYP1A1_48hr', 'CLD_CYP1A1_6hr', 'CLD_CYP1A2_24hr',
'CLD_CYP1A2_48hr', 'CLD_CYP1A2_6hr', 'CLD_CYP2B6_24hr', 'CLD_CYP2B6_48hr',
'CLD_CYP2B6_6hr', 'CLD_CYP3A4_24hr', 'CLD_CYP3A4_48hr', 'CLD_CYP3A4_6hr',
'CLD_GSTA2_48hr', 'CLD_SULT2A_24hr', 'CLD_SULT2A_48hr', 'CLD_UGT1A1_24hr',
'CLD_UGT1A1_48hr', 'NCCT_HEK293T_CellTiterGLO', 'NCCT_QuantiLum_inhib_2_dn',
'NCCT_QuantiLum_inhib_dn', 'NCCT_TPO_AUR_dn', 'NCCT_TPO_GUA_dn',
'NHEERL_ZF_144hpf_TERATOSCORE_up', 'NVS_ADME_hCYP19A1', 'NVS_ADME_hCYP1A1',
'NVS_ADME_hCYP1A2', 'NVS_ADME_hCYP2A6', 'NVS_ADME_hCYP2B6',
'NVS_ADME_hCYP2C19', 'NVS_ADME_hCYP2C9', 'NVS_ADME_hCYP2D6',
'NVS_ADME_hCYP3A4', 'NVS_ADME_hCYP4F12', 'NVS_ADME_rCYP2C12',
'NVS_ENZ_hAChE', 'NVS_ENZ_hAMPKa1', 'NVS_ENZ_hAurA', 'NVS_ENZ_hBACE',
'NVS_ENZ_hCASP5', 'NVS_ENZ_hCK1D', 'NVS_ENZ_hDUSP3', 'NVS_ENZ_hES',
'NVS_ENZ_hElastase', 'NVS_ENZ_hFGFR1', 'NVS_ENZ_hGSK3b', 'NVS_ENZ_hMMP1',
'NVS_ENZ_hMMP13', 'NVS_ENZ_hMMP2', 'NVS_ENZ_hMMP3', 'NVS_ENZ_hMMP7',
'NVS_ENZ_hMMP9', 'NVS_ENZ_hPDE10', 'NVS_ENZ_hPDE4A1', 'NVS_ENZ_hPDE5',
'NVS_ENZ_hPI3Ka', 'NVS_ENZ_hPTEN', 'NVS_ENZ_hPTPN11', 'NVS_ENZ_hPTPN12',
'NVS_ENZ_hPTPN13', 'NVS_ENZ_hPTPN9', 'NVS_ENZ_hPTPRC', 'NVS_ENZ_hSIRT1',
'NVS_ENZ_hSIRT2', 'NVS_ENZ_hTrkA', 'NVS_ENZ_hVEGFR2', 'NVS_ENZ_oCOX1',
'NVS_ENZ_oCOX2', 'NVS_ENZ_rAChE', 'NVS_ENZ_rCNOS', 'NVS_ENZ_rMAOAC',
'NVS_ENZ_rMAOAP', 'NVS_ENZ_rMAOBC', 'NVS_ENZ_rMAOBP', 'NVS_ENZ_rabI2C',
'NVS_GPCR_bAdoR_NonSelective', 'NVS_GPCR_bDR_NonSelective',
'NVS_GPCR_g5HT4', 'NVS_GPCR_gH2', 'NVS_GPCR_gLTB4', 'NVS_GPCR_gLTD4',
'NVS_GPCR_gMPeripheral_NonSelective', 'NVS_GPCR_gOpiateK',
'NVS_GPCR_h5HT2A', 'NVS_GPCR_h5HT5A', 'NVS_GPCR_h5HT6', 'NVS_GPCR_h5HT7',
'NVS_GPCR_hAT1', 'NVS_GPCR_hAdoRA1', 'NVS_GPCR_hAdoRA2a',
'NVS_GPCR_hAdra2A', 'NVS_GPCR_hAdra2C', 'NVS_GPCR_hAdrb1',
'NVS_GPCR_hAdrb2', 'NVS_GPCR_hAdrb3', 'NVS_GPCR_hDRD1', 'NVS_GPCR_hDRD2s',
'NVS_GPCR_hDRD4.4', 'NVS_GPCR_hH1', 'NVS_GPCR_hLTB4_BLT1', 'NVS_GPCR_hM1',
'NVS_GPCR_hM2', 'NVS_GPCR_hM3', 'NVS_GPCR_hM4', 'NVS_GPCR_hNK2',
'NVS_GPCR_hOpiate_D1', 'NVS_GPCR_hOpiate_mu', 'NVS_GPCR_hTXA2',
'NVS_GPCR_p5HT2C', 'NVS_GPCR_r5HT1_NonSelective',
'NVS_GPCR_r5HT_NonSelective', 'NVS_GPCR_rAdra1B',
'NVS_GPCR_rAdra1_NonSelective', 'NVS_GPCR_rAdra2_NonSelective',
'NVS_GPCR_rAdrb_NonSelective', 'NVS_GPCR_rNK1', 'NVS_GPCR_rNK3',
'NVS_GPCR_rOpiate_NonSelective', 'NVS_GPCR_rOpiate_NonSelectiveNa',
'NVS_GPCR_rSST', 'NVS_GPCR_rTRH', 'NVS_GPCR_rV1', 'NVS_GPCR_rabPAF',
'NVS_GPCR_rmAdra2B', 'NVS_IC_hKhERGCh', 'NVS_IC_rCaBTZCHL',
'NVS_IC_rCaDHPRCh_L', 'NVS_IC_rNaCh_site2', 'NVS_LGIC_bGABARa1',
'NVS_LGIC_h5HT3', 'NVS_LGIC_hNNR_NBungSens', 'NVS_LGIC_rGABAR_NonSelective',
'NVS_LGIC_rNNR_BungSens', 'NVS_MP_hPBR', 'NVS_MP_rPBR', 'NVS_NR_bER',
'NVS_NR_bPR', 'NVS_NR_cAR', 'NVS_NR_hAR', 'NVS_NR_hCAR_Antagonist',
'NVS_NR_hER', 'NVS_NR_hFXR_Agonist', 'NVS_NR_hFXR_Antagonist', 'NVS_NR_hGR',
'NVS_NR_hPPARa', 'NVS_NR_hPPARg', 'NVS_NR_hPR', 'NVS_NR_hPXR',
'NVS_NR_hRAR_Antagonist', 'NVS_NR_hRARa_Agonist', 'NVS_NR_hTRa_Antagonist',
'NVS_NR_mERa', 'NVS_NR_rAR', 'NVS_NR_rMR', 'NVS_OR_gSIGMA_NonSelective',
'NVS_TR_gDAT', 'NVS_TR_hAdoT', 'NVS_TR_hDAT', 'NVS_TR_hNET', 'NVS_TR_hSERT',
'NVS_TR_rNET', 'NVS_TR_rSERT', 'NVS_TR_rVMAT2', 'OT_AR_ARELUC_AG_1440',
'OT_AR_ARSRC1_0480', 'OT_AR_ARSRC1_0960', 'OT_ER_ERaERa_0480',
'OT_ER_ERaERa_1440', 'OT_ER_ERaERb_0480', 'OT_ER_ERaERb_1440',
'OT_ER_ERbERb_0480', 'OT_ER_ERbERb_1440', 'OT_ERa_EREGFP_0120',
'OT_ERa_EREGFP_0480', 'OT_FXR_FXRSRC1_0480', 'OT_FXR_FXRSRC1_1440',
'OT_NURR1_NURR1RXRa_0480', 'OT_NURR1_NURR1RXRa_1440',
'TOX21_ARE_BLA_Agonist_ch1', 'TOX21_ARE_BLA_Agonist_ch2',
'TOX21_ARE_BLA_agonist_ratio', 'TOX21_ARE_BLA_agonist_viability',
'TOX21_AR_BLA_Agonist_ch1', 'TOX21_AR_BLA_Agonist_ch2',
'TOX21_AR_BLA_Agonist_ratio', 'TOX21_AR_BLA_Antagonist_ch1',
'TOX21_AR_BLA_Antagonist_ch2', 'TOX21_AR_BLA_Antagonist_ratio',
'TOX21_AR_BLA_Antagonist_viability', 'TOX21_AR_LUC_MDAKB2_Agonist',
'TOX21_AR_LUC_MDAKB2_Antagonist', 'TOX21_AR_LUC_MDAKB2_Antagonist2',
'TOX21_AhR_LUC_Agonist', 'TOX21_Aromatase_Inhibition',
'TOX21_AutoFluor_HEK293_Cell_blue', 'TOX21_AutoFluor_HEK293_Media_blue',
'TOX21_AutoFluor_HEPG2_Cell_blue', 'TOX21_AutoFluor_HEPG2_Cell_green',
'TOX21_AutoFluor_HEPG2_Media_blue', 'TOX21_AutoFluor_HEPG2_Media_green',
'TOX21_ELG1_LUC_Agonist', 'TOX21_ERa_BLA_Agonist_ch1',
'TOX21_ERa_BLA_Agonist_ch2', 'TOX21_ERa_BLA_Agonist_ratio',
'TOX21_ERa_BLA_Antagonist_ch1', 'TOX21_ERa_BLA_Antagonist_ch2',
'TOX21_ERa_BLA_Antagonist_ratio', 'TOX21_ERa_BLA_Antagonist_viability',
'TOX21_ERa_LUC_BG1_Agonist', 'TOX21_ERa_LUC_BG1_Antagonist',
'TOX21_ESRE_BLA_ch1', 'TOX21_ESRE_BLA_ch2', 'TOX21_ESRE_BLA_ratio',
'TOX21_ESRE_BLA_viability', 'TOX21_FXR_BLA_Antagonist_ch1',
'TOX21_FXR_BLA_Antagonist_ch2', 'TOX21_FXR_BLA_agonist_ch2',
'TOX21_FXR_BLA_agonist_ratio', 'TOX21_FXR_BLA_antagonist_ratio',
'TOX21_FXR_BLA_antagonist_viability', 'TOX21_GR_BLA_Agonist_ch1',
'TOX21_GR_BLA_Agonist_ch2', 'TOX21_GR_BLA_Agonist_ratio',
'TOX21_GR_BLA_Antagonist_ch2', 'TOX21_GR_BLA_Antagonist_ratio',
'TOX21_GR_BLA_Antagonist_viability', 'TOX21_HSE_BLA_agonist_ch1',
'TOX21_HSE_BLA_agonist_ch2', 'TOX21_HSE_BLA_agonist_ratio',
'TOX21_HSE_BLA_agonist_viability', 'TOX21_MMP_ratio_down',
'TOX21_MMP_ratio_up', 'TOX21_MMP_viability', 'TOX21_NFkB_BLA_agonist_ch1',
'TOX21_NFkB_BLA_agonist_ch2', 'TOX21_NFkB_BLA_agonist_ratio',
'TOX21_NFkB_BLA_agonist_viability', 'TOX21_PPARd_BLA_Agonist_viability',
'TOX21_PPARd_BLA_Antagonist_ch1', 'TOX21_PPARd_BLA_agonist_ch1',
'TOX21_PPARd_BLA_agonist_ch2', 'TOX21_PPARd_BLA_agonist_ratio',
'TOX21_PPARd_BLA_antagonist_ratio', 'TOX21_PPARd_BLA_antagonist_viability',
'TOX21_PPARg_BLA_Agonist_ch1', 'TOX21_PPARg_BLA_Agonist_ch2',
'TOX21_PPARg_BLA_Agonist_ratio', 'TOX21_PPARg_BLA_Antagonist_ch1',
'TOX21_PPARg_BLA_antagonist_ratio', 'TOX21_PPARg_BLA_antagonist_viability',
'TOX21_TR_LUC_GH3_Agonist', 'TOX21_TR_LUC_GH3_Antagonist',
'TOX21_VDR_BLA_Agonist_viability', 'TOX21_VDR_BLA_Antagonist_ch1',
'TOX21_VDR_BLA_agonist_ch2', 'TOX21_VDR_BLA_agonist_ratio',
'TOX21_VDR_BLA_antagonist_ratio', 'TOX21_VDR_BLA_antagonist_viability',
'TOX21_p53_BLA_p1_ch1', 'TOX21_p53_BLA_p1_ch2', 'TOX21_p53_BLA_p1_ratio',
'TOX21_p53_BLA_p1_viability', 'TOX21_p53_BLA_p2_ch1',
'TOX21_p53_BLA_p2_ch2', 'TOX21_p53_BLA_p2_ratio',
'TOX21_p53_BLA_p2_viability', 'TOX21_p53_BLA_p3_ch1',
'TOX21_p53_BLA_p3_ch2', 'TOX21_p53_BLA_p3_ratio',
'TOX21_p53_BLA_p3_viability', 'TOX21_p53_BLA_p4_ch1',
'TOX21_p53_BLA_p4_ch2', 'TOX21_p53_BLA_p4_ratio',
'TOX21_p53_BLA_p4_viability', 'TOX21_p53_BLA_p5_ch1',
'TOX21_p53_BLA_p5_ch2', 'TOX21_p53_BLA_p5_ratio',
'TOX21_p53_BLA_p5_viability', 'Tanguay_ZF_120hpf_AXIS_up',
'Tanguay_ZF_120hpf_ActivityScore', 'Tanguay_ZF_120hpf_BRAI_up',
'Tanguay_ZF_120hpf_CFIN_up', 'Tanguay_ZF_120hpf_CIRC_up',
'Tanguay_ZF_120hpf_EYE_up', 'Tanguay_ZF_120hpf_JAW_up',
'Tanguay_ZF_120hpf_MORT_up', 'Tanguay_ZF_120hpf_OTIC_up',
'Tanguay_ZF_120hpf_PE_up', 'Tanguay_ZF_120hpf_PFIN_up',
'Tanguay_ZF_120hpf_PIG_up', 'Tanguay_ZF_120hpf_SNOU_up',
'Tanguay_ZF_120hpf_SOMI_up', 'Tanguay_ZF_120hpf_SWIM_up',
'Tanguay_ZF_120hpf_TRUN_up', 'Tanguay_ZF_120hpf_TR_up',
'Tanguay_ZF_120hpf_YSE_up'
]
class _ToxcastLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "toxcast_data.csv.gz")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=TOXCAST_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_toxcast(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load Toxcast dataset
ToxCast is an extended data collection from the same
initiative as Tox21, providing toxicology data for a large
library of compounds based on in vitro high-throughput
screening. The processed collection includes qualitative
results of over 600 experiments on 8k compounds.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "smiles": SMILES representation of the molecular structure
- "ACEA_T47D_80hr_Negative" ~ "Tanguay_ZF_120hpf_YSE_up": Bioassays results.
Please refer to the section "high-throughput assay information" at
https://www.epa.gov/chemical-research/toxicity-forecaster-toxcasttm-data
for details.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Richard, <NAME>., et al. "ToxCast chemical landscape: paving the road
to 21st century toxicology." Chemical research in toxicology 29.8 (2016):
1225-1251.
"""
loader = _ToxcastLoader(featurizer, splitter, transformers, TOXCAST_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('toxcast', reload)
<file_sep>"""
Tests for hyperparam optimization.
"""
import unittest
import sklearn
import deepchem as dc
class TestHyperparamOpt(unittest.TestCase):
"""
Test abstract superclass behavior.
"""
def test_cant_be_initialized(self):
"""Test HyperparamOpt can't be initialized."""
initialized = True
def rf_model_builder(model_params, model_dir):
sklearn_model = sklearn.ensemble.RandomForestRegressor(
**model_params)
return dc.model.SklearnModel(sklearn_model, model_dir)
try:
_ = dc.hyper.HyperparamOpt(rf_model_builder)
except ValueError:
initialized = False
assert not initialized
<file_sep>echo "Pulling pdbbind dataset from deepchem"
wget -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/pdbbind_v2015.tar.gz
echo "Extracting pdbbind structures"
tar -zxvf pdbbind_v2015.tar.gz
<file_sep>"""
Created on Thu Sep 28 15:17:50 2017
@author: zqwu
"""
import numpy as np
import tensorflow as tf
import copy
import sys
from deepchem.metrics import to_one_hot
from deepchem.models import KerasModel, layers
from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda
# Common symbols in SMILES, note that Cl and Br are regarded as single symbol
default_dict = {
'#': 1,
'(': 2,
')': 3,
'+': 4,
'-': 5,
'/': 6,
'1': 7,
'2': 8,
'3': 9,
'4': 10,
'5': 11,
'6': 12,
'7': 13,
'8': 14,
'=': 15,
'C': 16,
'F': 17,
'H': 18,
'I': 19,
'N': 20,
'O': 21,
'P': 22,
'S': 23,
'[': 24,
'\\': 25,
']': 26,
'_': 27,
'c': 28,
'Cl': 29,
'Br': 30,
'n': 31,
'o': 32,
's': 33
}
class TextCNNModel(KerasModel):
""" A Convolutional neural network on smiles strings
Reimplementation of the discriminator module in ORGAN [1]_ .
Originated from [2]_.
This model applies multiple 1D convolutional filters to
the padded strings, then max-over-time pooling is applied on
all filters, extracting one feature per filter. All
features are concatenated and transformed through several
hidden layers to form predictions.
This model is initially developed for sentence-level
classification tasks, with words represented as vectors. In
this implementation, SMILES strings are dissected into
characters and transformed to one-hot vectors in a similar
way. The model can be used for general molecular-level
classification or regression tasks. It is also used in the
ORGAN model as discriminator.
Training of the model only requires SMILES strings input,
all featurized datasets that include SMILES in the `ids`
attribute are accepted. PDBbind, QM7 and QM7b are not
supported. To use the model, `build_char_dict` should be
called first before defining the model to build character
dict of input dataset, example can be found in
examples/delaney/delaney_textcnn.py
References
----------
.. [1] Guimaraes, <NAME>, et al. "Objective-reinforced generative adversarial networks (ORGAN) for sequence generation models." arXiv preprint arXiv:1705.10843 (2017).
.. [2] <NAME>. "Convolutional neural networks for sentence classification." arXiv preprint arXiv:1408.5882 (2014).
"""
def __init__(self,
n_tasks,
char_dict,
seq_length,
n_embedding=75,
kernel_sizes=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20],
num_filters=[
100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160
],
dropout=0.25,
mode="classification",
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
char_dict: dict
Mapping from characters in smiles to integers
seq_length: int
Length of sequences(after padding)
n_embedding: int, optional
Length of embedding vector
filter_sizes: list of int, optional
Properties of filters used in the conv net
num_filters: list of int, optional
Properties of filters used in the conv net
dropout: float, optional
Dropout rate
mode: str
Either "classification" or "regression" for type of model.
"""
self.n_tasks = n_tasks
self.char_dict = char_dict
self.seq_length = max(seq_length, max(kernel_sizes))
self.n_embedding = n_embedding
self.kernel_sizes = kernel_sizes
self.num_filters = num_filters
self.dropout = dropout
self.mode = mode
# Build the model.
smiles_seqs = Input(shape=(self.seq_length,), dtype=tf.int32)
# Character embedding
embedding = layers.DTNNEmbedding(
n_embedding=self.n_embedding,
periodic_table_length=len(self.char_dict.keys()) + 1)(smiles_seqs)
pooled_outputs = []
conv_layers = []
for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
# Multiple convolutional layers with different filter widths
conv_layers.append(
Conv1D(kernel_size=filter_size,
filters=num_filter,
padding='valid')(embedding))
# Max-over-time pooling
reduced = Lambda(lambda x: tf.reduce_max(x, axis=1))(
conv_layers[-1])
pooled_outputs.append(reduced)
# Concat features from all filters(one feature per filter)
concat_outputs = Concatenate(axis=1)(pooled_outputs)
dropout = Dropout(rate=self.dropout)(concat_outputs)
dense = Dense(200, activation=tf.nn.relu)(dropout)
# Highway layer from https://arxiv.org/pdf/1505.00387.pdf
gather = layers.Highway()(dense)
if self.mode == "classification":
logits = Dense(self.n_tasks * 2)(gather)
logits = Reshape((self.n_tasks, 2))(logits)
output = Softmax()(logits)
outputs = [output, logits]
output_types = ['prediction', 'loss']
loss = SoftmaxCrossEntropy()
else:
output = Dense(self.n_tasks * 1)(gather)
output = Reshape((self.n_tasks, 1))(output)
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(inputs=[smiles_seqs], outputs=outputs)
super(TextCNNModel, self).__init__(model,
loss,
output_types=output_types,
**kwargs)
@staticmethod
def build_char_dict(dataset, default_dict=default_dict):
""" Collect all unique characters(in smiles) from the dataset.
This method should be called before defining the model to build appropriate char_dict
"""
# SMILES strings
X = dataset.ids
# Maximum length is expanded to allow length variation during train and inference
seq_length = int(max([len(smile) for smile in X]) * 1.2)
# '_' served as delimiter and padding
all_smiles = '_'.join(X)
tot_len = len(all_smiles)
# Initialize common characters as keys
keys = list(default_dict.keys())
out_dict = copy.deepcopy(default_dict)
current_key_val = len(keys) + 1
# Include space to avoid extra keys
keys.extend([' '])
extra_keys = []
i = 0
while i < tot_len:
# For 'Cl', 'Br', etc.
if all_smiles[i:i + 2] in keys:
i = i + 2
elif all_smiles[i:i + 1] in keys:
i = i + 1
else:
# Character not recognized, add to extra_keys
extra_keys.append(all_smiles[i])
keys.append(all_smiles[i])
i = i + 1
# Add all extra_keys to char_dict
for extra_key in extra_keys:
out_dict[extra_key] = current_key_val
current_key_val += 1
return out_dict, seq_length
@staticmethod
def convert_bytes_to_char(s):
s = ''.join(chr(b) for b in s)
return s
def smiles_to_seq_batch(self, ids_b):
"""Converts SMILES strings to np.array sequence.
A tf.py_func wrapper is written around this when creating the input_fn for make_estimator
"""
if isinstance(ids_b[0], bytes) and sys.version_info[
0] != 2: # Python 2.7 bytes and string are analogous
ids_b = [
TextCNNModel.convert_bytes_to_char(smiles) for smiles in ids_b
]
smiles_seqs = [self.smiles_to_seq(smiles) for smiles in ids_b]
smiles_seqs = np.vstack(smiles_seqs)
return smiles_seqs
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
"""Transfer smiles strings to fixed length integer vectors"""
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None:
if self.mode == 'classification':
y_b = to_one_hot(y_b.flatten(),
2).reshape(-1, self.n_tasks, 2)
# Transform SMILES sequence to integers
X_b = self.smiles_to_seq_batch(ids_b)
yield ([X_b], [y_b], [w_b])
def smiles_to_seq(self, smiles):
""" Tokenize characters in smiles to integers
"""
smiles_len = len(smiles)
seq = [0]
keys = self.char_dict.keys()
i = 0
while i < smiles_len:
# Skip all spaces
if smiles[i:i + 1] == ' ':
i = i + 1
# For 'Cl', 'Br', etc.
elif smiles[i:i + 2] in keys:
seq.append(self.char_dict[smiles[i:i + 2]])
i = i + 2
elif smiles[i:i + 1] in keys:
seq.append(self.char_dict[smiles[i:i + 1]])
i = i + 1
else:
raise ValueError('character not found in dict')
for i in range(self.seq_length - len(seq)):
# Padding with '_'
seq.append(self.char_dict['_'])
return np.array(seq, dtype=np.int32)
#################### Deprecation warnings for renamed TensorGraph models #################### # noqa: E266
import warnings # noqa: E402
TENSORGRAPH_DEPRECATION = "{} is deprecated and has been renamed to {} and will be removed in DeepChem 3.0."
class TextCNNTensorGraph(TextCNNModel):
def __init__(self, *args, **kwargs):
warnings.warn(
TENSORGRAPH_DEPRECATION.format("TextCNNTensorGraph",
"TextCNNModel"), FutureWarning)
super(TextCNNTensorGraph, self).__init__(*args, **kwargs)
<file_sep># flake8:noqa
import logging
logger = logging.getLogger(__name__)
from deepchem.models.torch_models.torch_model import TorchModel
from deepchem.models.torch_models.modular import ModularTorchModel
from deepchem.models.torch_models.attentivefp import AttentiveFP, AttentiveFPModel
from deepchem.models.torch_models.cgcnn import CGCNN, CGCNNModel
from deepchem.models.torch_models.gat import GAT, GATModel
from deepchem.models.torch_models.gcn import GCN, GCNModel
from deepchem.models.torch_models.infograph import InfoGraphStar, InfoGraphStarModel, InfoGraphEncoder, GINEncoder, InfoGraph, InfoGraphModel, InfoGraphEncoder
from deepchem.models.torch_models.mpnn import MPNN, MPNNModel
from deepchem.models.torch_models.lcnn import LCNN, LCNNModel
from deepchem.models.torch_models.pagtn import Pagtn, PagtnModel
from deepchem.models.torch_models.mat import MAT, MATModel
from deepchem.models.torch_models.megnet import MEGNetModel
from deepchem.models.torch_models.normalizing_flows_pytorch import NormalizingFlow
from deepchem.models.torch_models.layers import MultilayerPerceptron, CNNModule, CombineMeanStd, WeightedLinearCombo, AtomicConvolution, NeighborList, SetGather, EdgeNetwork, WeaveLayer, WeaveGather, MolGANConvolutionLayer, MolGANAggregationLayer, MolGANMultiConvolutionLayer, MolGANEncoderLayer, EncoderRNN
from deepchem.models.torch_models.cnn import CNN
from deepchem.models.torch_models.attention import ScaledDotProductAttention, SelfAttention
from deepchem.models.torch_models.grover import GroverModel, GroverPretrain, GroverFinetune
from deepchem.models.torch_models.readout import GroverReadout
from deepchem.models.torch_models.dtnn import DTNN, DTNNModel
try:
from deepchem.models.torch_models.dmpnn import DMPNN, DMPNNModel
from deepchem.models.torch_models.gnn import GNN, GNNHead, GNNModular
from deepchem.models.torch_models.pna_gnn import AtomEncoder, BondEncoder, PNALayer, PNAGNN, PNA
from deepchem.models.torch_models.gnn3d import Net3D, InfoMax3DModular
except ModuleNotFoundError as e:
logger.warning(
f'Skipped loading modules with pytorch-geometric dependency, missing a dependency. {e}'
)
try:
from deepchem.models.torch_models.hf_models import HuggingFaceModel
from deepchem.models.torch_models.chemberta import Chemberta
except ModuleNotFoundError as e:
logger.warning(f'Skipped loading modules with transformers dependency. {e}')
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import os
import sys
import deepchem as dc
import numpy as np
import tensorflow as tf
sys.path.append("../../models")
from atomicnet_ops import create_symmetry_parameters
from atomicnet import TensorflowFragmentRegressor
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, "datasets")
train_dir = os.path.join(data_dir, "random_train")
test_dir = os.path.join(data_dir, "random_test")
model_dir = os.path.join(base_dir, "random_model")
frag1_num_atoms = 70
frag2_num_atoms = 634
complex_num_atoms = 701
max_num_neighbors = 12
neighbor_cutoff = 12.0
train_dataset = dc.data.DiskDataset(train_dir)
test_dataset = dc.data.DiskDataset(test_dir)
pdbbind_tasks = ["-logKd/Ki"]
transformers = []
#transformers = [dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
#for transformer in transformers:
# train_dataset = transformer.transform(train_dataset)
# test_dataset = transformer.transform(test_dataset)
y_train = train_dataset.y
y_train *= -1 * 2.479 / 4.184
train_dataset = dc.data.DiskDataset.from_numpy(
train_dataset.X,
y_train,
train_dataset.w,
train_dataset.ids,
tasks=pdbbind_tasks)
y_test = test_dataset.y
y_test *= -1 * 2.479 / 4.184
test_dataset = dc.data.DiskDataset.from_numpy(
test_dataset.X,
y_test,
test_dataset.w,
test_dataset.ids,
tasks=pdbbind_tasks)
at = [6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53.]
radial = [[
1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5,
9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
], [0.0, 4.0, 8.0], [0.4]]
#radial = [[12.0], [0.0, 4.0, 8.0], [0.4]]
rp = create_symmetry_parameters(radial)
layer_sizes = [32, 32, 16]
weight_init_stddevs = [
1 / np.sqrt(layer_sizes[0]), 1 / np.sqrt(layer_sizes[1]),
1 / np.sqrt(layer_sizes[2])
]
dropouts = [0.3, 0.3, 0.05]
penalty_type = "l2"
penalty = 0.
model = TensorflowFragmentRegressor(
len(pdbbind_tasks),
rp,
at,
frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
logdir=model_dir,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=[0., 0., 0.],
penalty=penalty,
penalty_type=penalty_type,
dropouts=dropouts,
learning_rate=0.002,
momentum=0.8,
optimizer="adam",
batch_size=24,
conv_layers=1,
boxsize=None,
verbose=True,
seed=seed)
model.fit(train_dataset, nb_epoch=10)
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
train_evaluator = dc.utils.evaluate.Evaluator(model, train_dataset,
transformers)
train_scores = train_evaluator.compute_model_performance(
metric,
csv_out="train_predict_ac_random.csv",
stats_out="train_stats_ac_random.csv")
print("Train scores")
print(train_scores)
test_evaluator = dc.utils.evaluate.Evaluator(model, test_dataset, transformers)
test_scores = test_evaluator.compute_model_performance(
metric,
csv_out="test_predict_ac_random.csv",
stats_out="test_stats_ac_random.csv")
print("Test scores")
print(test_scores)
<file_sep>"""
ZINC15 commercially-available compounds for virtual screening.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
ZINC15_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/"
ZINC15_TASKS = ['mwt', 'logp', 'reactive']
class _Zinc15Loader(_MolnetLoader):
def __init__(self, *args, dataset_size: str, dataset_dimension: str,
**kwargs):
super(_Zinc15Loader, self).__init__(*args, **kwargs)
self.dataset_size = dataset_size
self.dataset_dimension = dataset_dimension
self.name = 'zinc15_' + dataset_size + '_' + dataset_dimension
def create_dataset(self) -> Dataset:
if self.dataset_size not in ['250K', '1M', '10M', '270M']:
raise ValueError(
"Only '250K', '1M', '10M', and '270M' are supported for dataset_size."
)
if self.dataset_dimension != '2D':
raise ValueError(
"Currently, only '2D' is supported for dataset_dimension.")
if self.dataset_size == '270M':
answer = ''
while answer not in ['y', 'n']:
answer = input("""You're about to download 270M SMILES strings.
This dataset is 23GB. Are you sure you want to continue? (Y/N)"""
).lower()
if answer == 'n':
raise ValueError('Choose a smaller dataset_size.')
filename = self.name + '.csv'
dataset_file = os.path.join(self.data_dir, filename)
if not os.path.exists(dataset_file):
compressed_file = self.name + '.tar.gz'
if not os.path.exists(compressed_file):
dc.utils.download_url(url=ZINC15_URL + compressed_file,
dest_dir=self.data_dir)
dc.utils.untargz_file(os.path.join(self.data_dir, compressed_file),
self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
id_field="zinc_id",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_zinc15(
featurizer: Union[dc.feat.Featurizer, str] = 'OneHot',
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
dataset_size: str = '250K',
dataset_dimension: str = '2D',
tasks: List[str] = ZINC15_TASKS,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load zinc15.
ZINC15 is a dataset of over 230 million purchasable compounds for
virtual screening of small molecules to identify structures that
are likely to bind to drug targets. ZINC15 data is currently available
in 2D (SMILES string) format.
MolNet provides subsets of 250K, 1M, and 10M "lead-like" compounds
from ZINC15. The full dataset of 270M "goldilocks" compounds is also
available. Compounds in ZINC15 are labeled by their molecular weight
and LogP (solubility) values. Each compound also has information about how
readily available (purchasable) it is and its reactivity. Lead-like
compounds have molecular weight between 300 and 350 Daltons and LogP
between -1 and 3.5. Goldilocks compounds are lead-like compounds with
LogP values further restricted to between 2 and 3.
If `reload = True` and `data_dir` (`save_dir`) is specified, the loader
will attempt to load the raw dataset (featurized dataset) from disk.
Otherwise, the dataset will be downloaded from the DeepChem AWS bucket.
For more information on ZINC15, please see [1]_ and
https://zinc15.docking.org/.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
size : str (default '250K')
Size of dataset to download. '250K', '1M', '10M', and '270M' are supported.
format : str (default '2D')
Format of data to download. 2D SMILES strings or 3D SDF files.
tasks: List[str], (optional) default: `['molwt', 'logp', 'reactive']`
Specify the set of tasks to load. If no task is specified, then it loads
the default set of tasks which are molwt, logp, reactive.
Returns
-------
tasks, datasets, transformers : tuple
tasks : list
Column names corresponding to machine learning target variables.
datasets : tuple
train, validation, test splits of data as
``deepchem.data.datasets.Dataset`` instances.
transformers : list
``deepchem.trans.transformers.Transformer`` instances applied
to dataset.
Notes
-----
The total ZINC dataset with SMILES strings contains hundreds of millions
of compounds and is over 100GB! ZINC250K is recommended for experimentation.
The full set of 270M goldilocks compounds is 23GB.
References
----------
.. [1] <NAME> Irwin. J. Chem. Inf. Model, 2015 http://pubs.acs.org/doi/abs/10.1021/acs.jcim.5b00559.
"""
for task in tasks:
assert task in ZINC15_TASKS, f'Invalid task name {task}. Task should be one of logp, mwt, reactive'
loader = _Zinc15Loader(featurizer,
splitter,
transformers,
tasks,
data_dir,
save_dir,
dataset_size=dataset_size,
dataset_dimension=dataset_dimension,
**kwargs)
return loader.load_dataset(loader.name, reload)
<file_sep>"""
Convenience classes for assembling graph models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import tensorflow as tf
from deepchem.nn.layers import GraphGather
from deepchem.models.tf_new_models.graph_topology import GraphTopology, DTNNGraphTopology, DAGGraphTopology, WeaveGraphTopology, AlternateWeaveGraphTopology
class SequentialGraph(object):
"""An analog of Keras Sequential class for Graph data.
Like the Sequential class from Keras, but automatically passes topology
placeholders from GraphTopology to each graph layer (from layers) added
to the network. Non graph layers don't get the extra placeholders.
"""
def __init__(self, n_feat):
"""
Parameters
----------
n_feat: int
Number of features per atom.
"""
warnings.warn("SequentialGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.graph = tf.Graph()
with self.graph.as_default():
self.graph_topology = GraphTopology(n_feat)
self.output = self.graph_topology.get_atom_features_placeholder()
# Keep track of the layers
self.layers = []
def add(self, layer):
"""Adds a new layer to model."""
with self.graph.as_default():
# For graphical layers, add connectivity placeholders
if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")):
assert self.layers[-1].__name__ != "GraphGather", \
'Cannot use GraphConv or GraphGather layers after a GraphGather'
self.output = layer([self.output] +
self.graph_topology.get_topology_placeholders())
else:
self.output = layer(self.output)
# Add layer to the layer list
self.layers.append(layer)
def get_graph_topology(self):
return self.graph_topology
def get_num_output_features(self):
"""Gets the output shape of the featurization layers of the network"""
return self.layers[-1].output_shape[1]
def return_outputs(self):
return self.output
def return_inputs(self):
return self.graph_topology.get_input_placeholders()
def get_layer(self, layer_id):
return self.layers[layer_id]
class SequentialDTNNGraph(SequentialGraph):
"""An analog of Keras Sequential class for Coulomb Matrix data.
automatically generates and passes topology placeholders to each layer.
"""
def __init__(self, n_distance=100, distance_min=-1., distance_max=18.):
"""
Parameters
----------
n_distance: int, optional
granularity of distance matrix
step size will be (distance_max-distance_min)/n_distance
distance_min: float, optional
minimum distance of atom pairs, default = -1 Angstorm
distance_max: float, optional
maximum distance of atom pairs, default = 18 Angstorm
"""
warnings.warn("SequentialDTNNGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.graph = tf.Graph()
with self.graph.as_default():
self.graph_topology = DTNNGraphTopology(
n_distance, distance_min=distance_min, distance_max=distance_max)
self.output = self.graph_topology.get_atom_number_placeholder()
# Keep track of the layers
self.layers = []
def add(self, layer):
"""Adds a new layer to model."""
with self.graph.as_default():
if type(layer).__name__ in ['DTNNStep']:
self.output = layer([self.output] +
self.graph_topology.get_topology_placeholders())
elif type(layer).__name__ in ['DTNNGather']:
self.output = layer(
[self.output, self.graph_topology.atom_membership_placeholder])
else:
self.output = layer(self.output)
self.layers.append(layer)
class SequentialDAGGraph(SequentialGraph):
"""SequentialGraph for DAG models
"""
def __init__(self, n_atom_feat=75, max_atoms=50):
"""
Parameters
----------
n_atom_feat: int, optional
Number of features per atom.
max_atoms: int, optional
Maximum number of atoms in a molecule, should be defined based on dataset
"""
warnings.warn("SequentialDAGGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.graph = tf.Graph()
with self.graph.as_default():
self.graph_topology = DAGGraphTopology(
n_atom_feat=n_atom_feat, max_atoms=max_atoms)
self.output = self.graph_topology.get_atom_features_placeholder()
self.layers = []
def add(self, layer):
"""Adds a new layer to model."""
with self.graph.as_default():
if type(layer).__name__ in ['DAGLayer']:
self.output = layer([self.output] +
self.graph_topology.get_topology_placeholders())
elif type(layer).__name__ in ['DAGGather']:
self.output = layer(
[self.output, self.graph_topology.membership_placeholder])
else:
self.output = layer(self.output)
self.layers.append(layer)
class SequentialWeaveGraph(SequentialGraph):
"""SequentialGraph for Weave models
"""
def __init__(self, max_atoms=50, n_atom_feat=75, n_pair_feat=14):
"""
Parameters
----------
max_atoms: int, optional
Maximum number of atoms in a molecule, should be defined based on dataset
n_atom_feat: int, optional
Number of features per atom.
n_pair_feat: int, optional
Number of features per pair of atoms.
"""
warnings.warn("SequentialWeaveGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.graph = tf.Graph()
self.max_atoms = max_atoms
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
with self.graph.as_default():
self.graph_topology = WeaveGraphTopology(self.max_atoms, self.n_atom_feat,
self.n_pair_feat)
self.output = self.graph_topology.get_atom_features_placeholder()
self.output_P = self.graph_topology.get_pair_features_placeholder()
self.layers = []
def add(self, layer):
"""Adds a new layer to model."""
with self.graph.as_default():
if type(layer).__name__ in ['WeaveLayer']:
self.output, self.output_P = layer([
self.output, self.output_P
] + self.graph_topology.get_topology_placeholders())
elif type(layer).__name__ in ['WeaveConcat']:
self.output = layer(
[self.output, self.graph_topology.atom_mask_placeholder])
elif type(layer).__name__ in ['WeaveGather']:
self.output = layer(
[self.output, self.graph_topology.membership_placeholder])
else:
self.output = layer(self.output)
self.layers.append(layer)
class AlternateSequentialWeaveGraph(SequentialGraph):
"""Alternate implementation of SequentialGraph for Weave models
"""
def __init__(self, batch_size, max_atoms=50, n_atom_feat=75, n_pair_feat=14):
"""
Parameters
----------
batch_size: int
number of molecules in a batch
max_atoms: int, optional
Maximum number of atoms in a molecule, should be defined based on dataset
n_atom_feat: int, optional
Number of features per atom.
n_pair_feat: int, optional
Number of features per pair of atoms.
"""
warnings.warn("AlternateSequentialWeaveGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.graph = tf.Graph()
self.batch_size = batch_size
self.max_atoms = max_atoms
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
with self.graph.as_default():
self.graph_topology = AlternateWeaveGraphTopology(
self.batch_size, self.max_atoms, self.n_atom_feat, self.n_pair_feat)
self.output = self.graph_topology.get_atom_features_placeholder()
self.output_P = self.graph_topology.get_pair_features_placeholder()
self.layers = []
def add(self, layer):
"""Adds a new layer to model."""
with self.graph.as_default():
if type(layer).__name__ in ['AlternateWeaveLayer']:
self.output, self.output_P = layer([
self.output, self.output_P
] + self.graph_topology.get_topology_placeholders())
elif type(layer).__name__ in ['AlternateWeaveGather']:
self.output = layer(
[self.output, self.graph_topology.atom_split_placeholder])
else:
self.output = layer(self.output)
self.layers.append(layer)
class SequentialSupportGraph(object):
"""An analog of Keras Sequential model for test/support models."""
def __init__(self, n_feat):
"""
Parameters
----------
n_feat: int
Number of atomic features.
"""
warnings.warn("SequentialSupportWeaveGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.graph = tf.Graph()
with self.graph.as_default():
# Create graph topology and x
self.test_graph_topology = GraphTopology(n_feat, name='test')
self.support_graph_topology = GraphTopology(n_feat, name='support')
self.test = self.test_graph_topology.get_atom_features_placeholder()
self.support = self.support_graph_topology.get_atom_features_placeholder()
# Keep track of the layers
self.layers = []
# Whether or not we have used the GraphGather layer yet
self.bool_pre_gather = True
def add(self, layer):
"""Adds a layer to both test/support stacks.
Note that the layer transformation is performed independently on the
test/support tensors.
"""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
assert self.bool_pre_gather, "Cannot apply graphical layers after gather."
self.test = layer([self.test] + self.test_graph_topology.topology)
self.support = layer([self.support] +
self.support_graph_topology.topology)
else:
self.test = layer(self.test)
self.support = layer(self.support)
if type(layer).__name__ == 'GraphGather':
self.bool_pre_gather = False # Set flag to stop adding topology
def add_test(self, layer):
"""Adds a layer to test."""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
self.test = layer([self.test] + self.test_graph_topology.topology)
else:
self.test = layer(self.test)
def add_support(self, layer):
"""Adds a layer to support."""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
self.support = layer([self.support] +
self.support_graph_topology.topology)
else:
self.support = layer(self.support)
def join(self, layer):
"""Joins test and support to a two input two output layer"""
with self.graph.as_default():
self.layers.append(layer)
self.test, self.support = layer([self.test, self.support])
def get_test_output(self):
return self.test
def get_support_output(self):
return self.support
def return_outputs(self):
return [self.test] + [self.support]
def return_inputs(self):
return (self.test_graph_topology.get_inputs() +
self.support_graph_topology.get_inputs())
<file_sep>"""
Tests for metricsT.
"""
import numpy as np
import deepchem as dc
def test_kappa_score():
y_true = [1, 0, 1, 0]
y_pred = [0.8, 0.2, 0.3, 0.4] # [1, 0, 0, 0] with 0.5 threshold
kappa = dc.metrics.kappa_score(y_true, np.greater(y_pred, 0.5))
observed_agreement = 3.0 / 4.0
expected_agreement = ((2 * 1) + (2 * 3)) / 4.0**2
expected_kappa = np.true_divide(observed_agreement - expected_agreement,
1.0 - expected_agreement)
np.testing.assert_almost_equal(kappa, expected_kappa)
def test_one_sample():
"""Test that the metrics won't raise error even in an extreme condition
where there is only one sample with w > 0.
"""
np.random.seed(123)
n_samples = 2
y_true = np.random.randint(2, size=(n_samples,))
y_pred = np.random.randint(2, size=(n_samples,))
w = np.array([0, 1])
all_metrics = [
dc.metrics.Metric(dc.metrics.recall_score),
dc.metrics.Metric(dc.metrics.matthews_corrcoef),
dc.metrics.Metric(dc.metrics.roc_auc_score)
]
for metric in all_metrics:
_ = metric.compute_singletask_metric(y_true, y_pred, w)
def test_pearsonr():
"""Test the Pearson correlation coefficient is correct."""
metric = dc.metrics.Metric(dc.metrics.pearsonr)
r = metric.compute_metric(np.array([1.0, 2.0, 3.0]),
np.array([2.0, 3.0, 4.0]))
np.testing.assert_almost_equal(1.0, r)
r = metric.compute_metric(np.array([1.0, 2.0, 3.0]),
np.array([-2.0, -3.0, -4.0]))
np.testing.assert_almost_equal(-1.0, r)
r = metric.compute_metric(np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 2.0, 1.0]))
np.testing.assert_almost_equal(0.0, r)
def test_r2_score():
"""Test that R^2 metric passes basic sanity tests"""
np.random.seed(123)
n_samples = 10
y_true = np.random.rand(n_samples,)
y_pred = np.random.rand(n_samples,)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score, n_tasks=1)
assert np.isclose(dc.metrics.r2_score(y_true, y_pred),
regression_metric.compute_metric(y_true, y_pred))
def test_bedroc_score():
"""Test BEDROC."""
num_actives = 20
num_total = 400
y_true_actives = np.ones(num_actives)
y_true_inactives = np.zeros(num_total - num_actives)
y_true = np.concatenate([y_true_actives, y_true_inactives])
# Best score case
y_pred_best = dc.metrics.to_one_hot(
np.concatenate([y_true_actives, y_true_inactives]))
best_score = dc.metrics.bedroc_score(y_true, y_pred_best)
np.testing.assert_almost_equal(best_score, 1.0)
# Worst score case
worst_pred_actives = np.zeros(num_actives)
worst_pred_inactives = np.ones(num_total - num_actives)
y_pred_worst = dc.metrics.to_one_hot(
np.concatenate([worst_pred_actives, worst_pred_inactives]))
worst_score = dc.metrics.bedroc_score(y_true, y_pred_worst)
np.testing.assert_almost_equal(worst_score, 0.0, 4)
def test_concordance_index():
"""Test concordance index."""
metric = dc.metrics.Metric(dc.metrics.concordance_index)
y_true = np.array([1, 3, 5, 4, 2])
y_pred = np.array([3, 1, 5, 4, 2])
assert metric.compute_singletask_metric(y_true, y_pred) == 0.7
# best case
y_true = np.array([1, 3, 5, 4, 2])
y_pred = np.array([1, 3, 5, 4, 2])
assert metric.compute_singletask_metric(y_true, y_pred) == 1.0
# duplicate prediction value
y_true = np.array([1, 3, 5, 4, 2])
y_pred = np.array([1, 3, 4, 4, 2])
assert metric.compute_singletask_metric(y_true, y_pred) == 0.95
<file_sep>"""
Tests that FASTA files can be loaded.
"""
import os
import unittest
import deepchem as dc
from deepchem.feat.molecule_featurizers import OneHotFeaturizer
class TestFASTALoader(unittest.TestCase):
"""
Test FASTALoader
"""
def setUp(self):
super(TestFASTALoader, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_legacy_fasta_one_hot(self):
input_file = os.path.join(self.current_dir, "example.fasta")
loader = dc.data.FASTALoader(legacy=True)
sequences = loader.create_dataset(input_file)
# example.fasta contains 3 sequences each of length 58.
# The one-hot encoding turns base-pairs into vectors of length 5 (ATCGN).
# There is one "image channel".
assert sequences.X.shape == (3, 5, 58, 1)
def test_fasta_one_hot(self):
input_file = os.path.join(self.current_dir, "example.fasta")
loader = dc.data.FASTALoader(legacy=False)
sequences = loader.create_dataset(input_file)
# Due to FASTALoader redesign, expected shape is now (3, 58, 5)
assert sequences.X.shape == (3, 58, 5)
def test_fasta_one_hot_big(self):
protein = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'*', '-'
]
input_file = os.path.join(self.current_dir, "uniprot_truncated.fasta")
loader = dc.data.FASTALoader(OneHotFeaturizer(charset=protein,
max_length=1000),
legacy=False)
sequences = loader.create_dataset(input_file)
assert sequences.X.shape
# TODO: test with full uniprot file once sharding support is added.
<file_sep>Transformers
============
DeepChem :code:`dc.trans.Transformer` objects are another core
building block of DeepChem programs. Often times, machine learning
systems are very delicate. They need their inputs and outputs to fit
within a pre-specified range or follow a clean mathematical
distribution. Real data of course is wild and hard to control. What do
you do if you have a crazy dataset and need to bring its statistics to
heel? Fear not for you have :code:`Transformer` objects.
.. contents:: Contents
:local:
General Transformers
--------------------
NormalizationTransformer
^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.NormalizationTransformer
:members:
:inherited-members:
MinMaxTransformer
^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.MinMaxTransformer
:members:
:inherited-members:
ClippingTransformer
^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.ClippingTransformer
:members:
:inherited-members:
LogTransformer
^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.LogTransformer
:members:
:inherited-members:
CDFTransformer
^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.CDFTransformer
:members:
:inherited-members:
PowerTransformer
^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.PowerTransformer
:members:
:inherited-members:
BalancingTransformer
^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.BalancingTransformer
:members:
:inherited-members:
DuplicateBalancingTransformer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.DuplicateBalancingTransformer
:members:
:inherited-members:
ImageTransformer
^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.ImageTransformer
:members:
:inherited-members:
FeaturizationTransformer
^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.FeaturizationTransformer
:members:
:inherited-members:
Specified Usecase Transformers
------------------------------
CoulombFitTransformer
^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.CoulombFitTransformer
:members:
:inherited-members:
IRVTransformer
^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.IRVTransformer
:members:
:inherited-members:
DAGTransformer
^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.DAGTransformer
:members:
:inherited-members:
RxnSplitTransformer
^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.trans.RxnSplitTransformer
:members:
:inherited-members:
Base Transformer (for develop)
-------------------------------
The :code:`dc.trans.Transformer` class is the abstract parent class
for all transformers. This class should never be directly initialized,
but contains a number of useful method implementations.
.. autoclass:: deepchem.trans.Transformer
:members:
<file_sep>import unittest
import pytest
import deepchem as dc
import numpy as np
try:
import torch
class MLP(dc.models.TorchModel):
def __init__(self,
n_tasks=1,
feature_dim=100,
hidden_layer_size=64,
**kwargs):
pytorch_model = torch.nn.Sequential(
torch.nn.Linear(feature_dim, hidden_layer_size),
torch.nn.ReLU(), torch.nn.Linear(hidden_layer_size, n_tasks),
torch.nn.Sigmoid())
loss = dc.models.losses.BinaryCrossEntropy()
super(MLP, self).__init__(model=pytorch_model, loss=loss, **kwargs)
has_pytorch = True
except:
has_pytorch = False
@unittest.skipIf(not has_pytorch, 'PyTorch is not installed')
class TestPretrainedTorch(unittest.TestCase):
@pytest.mark.torch
def setUp(self):
self.feature_dim = 2
self.hidden_layer_size = 10
data_points = 10
X = np.random.randn(data_points, self.feature_dim)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
self.dataset = dc.data.NumpyDataset(X, y)
@pytest.mark.torch
def test_load_from_pretrained(self):
"""Tests loading pretrained model."""
source_model = MLP(hidden_layer_size=self.hidden_layer_size,
feature_dim=self.feature_dim,
batch_size=10)
source_model.fit(self.dataset, nb_epoch=1000, checkpoint_interval=0)
dest_model = MLP(feature_dim=self.feature_dim,
hidden_layer_size=self.hidden_layer_size,
n_tasks=10)
assignment_map = dict()
value_map = dict()
source_vars = list(source_model.model.parameters())
dest_vars = list(dest_model.model.parameters())[:-2]
for idx, dest_var in enumerate(dest_vars):
source_var = source_vars[idx]
assignment_map[source_var] = dest_var
value_map[source_var] = source_var.detach().cpu().numpy()
dest_model.load_from_pretrained(source_model=source_model,
assignment_map=assignment_map,
value_map=value_map)
for source_var, dest_var in assignment_map.items():
source_val = source_var.detach().cpu().numpy()
dest_val = dest_var.detach().cpu().numpy()
np.testing.assert_array_almost_equal(source_val, dest_val)
@pytest.mark.torch
def test_restore_equivalency(self):
"""Test for restore based pretrained model loading."""
source_model = MLP(feature_dim=self.feature_dim,
hidden_layer_size=self.hidden_layer_size,
learning_rate=0.003)
source_model.fit(self.dataset, nb_epoch=1000)
dest_model = MLP(feature_dim=self.feature_dim,
hidden_layer_size=self.hidden_layer_size)
dest_model.load_from_pretrained(source_model=source_model,
assignment_map=None,
value_map=None,
model_dir=None,
include_top=True)
predictions = np.squeeze(dest_model.predict_on_batch(self.dataset.X))
np.testing.assert_array_almost_equal(self.dataset.y,
np.round(predictions))
<file_sep>"""
Experimental bandgaps for inorganic crystals.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
BANDGAP_URL = 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/expt_gap.tar.gz'
BANDGAP_TASKS = ['experimental_bandgap']
class _BandgapLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, 'expt_gap.json')
targz_file = os.path.join(self.data_dir, 'expt_gap.tar.gz')
if not os.path.exists(dataset_file):
if not os.path.exists(targz_file):
dc.utils.data_utils.download_url(url=BANDGAP_URL,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(targz_file, self.data_dir)
loader = dc.data.JsonLoader(tasks=self.tasks,
feature_field="composition",
label_field="experimental_bandgap",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file)
def load_bandgap(
featurizer: Union[dc.feat.Featurizer,
str] = dc.feat.ElementPropertyFingerprint(),
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load band gap dataset.
Contains 4604 experimentally measured band gaps for inorganic
crystal structure compositions. In benchmark studies, random forest
models achieved a mean average error of 0.45 eV during five-fold
nested cross validation on this dataset.
For more details on the dataset see [1]_. For more details
on previous benchmarks for this dataset, see [2]_.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
Returns
-------
tasks, datasets, transformers : tuple
tasks : list
Column names corresponding to machine learning target variables.
datasets : tuple
train, validation, test splits of data as
``deepchem.data.datasets.Dataset`` instances.
transformers : list
``deepchem.trans.transformers.Transformer`` instances applied
to dataset.
References
----------
.. [1] <NAME>. et al. "Predicting the Band Gaps of Inorganic Solids by Machine Learning."
J. Phys. Chem. Lett. (2018) DOI: 10.1021/acs.jpclett.8b00124.
.. [2] <NAME>. et al. "Benchmarking Materials Property Prediction Methods: The Matbench Test Set
and Automatminer Reference Algorithm." https://arxiv.org/abs/2005.00707 (2020)
Examples
--------
>>>
>> import deepchem as dc
>> tasks, datasets, transformers = dc.molnet.load_bandgap()
>> train_dataset, val_dataset, test_dataset = datasets
>> n_tasks = len(tasks)
>> n_features = train_dataset.get_data_shape()[0]
>> model = dc.models.MultitaskRegressor(n_tasks, n_features)
"""
loader = _BandgapLoader(featurizer, splitter, transformers, BANDGAP_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('bandgap', reload)
<file_sep>from deepchem.feat import Featurizer
from typing import List
try:
from transformers import BertTokenizerFast
except ModuleNotFoundError:
raise ImportError(
'Transformers must be installed for BertFeaturizer to be used!')
pass
class BertFeaturizer(Featurizer):
"""Bert Featurizer.
Bert Featurizer.
The Bert Featurizer is a wrapper class for HuggingFace's BertTokenizerFast.
This class intends to allow users to use the BertTokenizer API while
remaining inside the DeepChem ecosystem.
Examples
--------
>>> from deepchem.feat import BertFeaturizer
>>> from transformers import BertTokenizerFast
>>> tokenizer = BertTokenizerFast.from_pretrained("Rostlab/prot_bert", do_lower_case=False)
>>> featurizer = BertFeaturizer(tokenizer)
>>> feats = featurizer.featurize('D L I P [MASK] L V T')
Notes
-----
Examples are based on RostLab's ProtBert documentation.
"""
def __init__(self, tokenizer: BertTokenizerFast):
if not isinstance(tokenizer, BertTokenizerFast):
raise TypeError(
f"""`tokenizer` must be a constructed `BertTokenizerFast`
object, not {type(tokenizer)}""")
else:
self.tokenizer = tokenizer
def _featurize(self, datapoint: str, **kwargs) -> List[List[int]]:
"""
Calculate encoding using HuggingFace's RobertaTokenizerFast
Parameters
----------
datapoint: str
Arbitrary string sequence to be tokenized.
Returns
-------
encoding: List
List containing three lists: the `input_ids`, 'token_type_ids', and `attention_mask`.
"""
# the encoding is natively a dictionary with keys 'input_ids', 'token_type_ids', and 'attention_mask'
encoding = list(self.tokenizer(datapoint, **kwargs).values())
return encoding
<file_sep>import unittest
from deepchem.feat.molecule_featurizers import SparseMatrixOneHotFeaturizer
class TestSparseMatrixOneHotFeaturizer(unittest.TestCase):
"""
Test SparseMatrixOneHotFeaturizer.
"""
def test_sparsemat_arbitrary_default_charset(self):
"""
Test simple one hot encoding
"""
featurizer = SparseMatrixOneHotFeaturizer()
sequence = "MMMQLA"
encodings = featurizer.featurize([sequence])
assert encodings[0].shape[0] == 6
assert encodings[0].shape[1] == 25
def test_sparsemat_arbitrary_default_charset_utransform(self):
"""
Test simple one hot encoding
"""
featurizer = SparseMatrixOneHotFeaturizer()
sequence = "MMMQLA"
encodings = featurizer.featurize([sequence])
out = featurizer.untransform(encodings[0])
assert out == "MMMQLA"
def test_sparsemat_arbitrary_arbitrary_charset(self):
"""
Test simple one hot encoding
"""
charset = ["A", "B", "C"]
featurizer = SparseMatrixOneHotFeaturizer(charset)
sequence = "AAAB"
encodings = featurizer.featurize([sequence])
array = encodings[0].toarray()
assert encodings[0].shape[0] == 4
assert encodings[0].shape[1] == 3
assert array[0][0] == 1
assert array[0][1] == 0
def test_sparsemat_arbitrary_unkonw_val(self):
"""
Test simple one hot encoding
"""
charset = ["A", "B", "C"]
featurizer = SparseMatrixOneHotFeaturizer(charset)
sequence = "AAAD"
encodings = featurizer.featurize([sequence])
array = encodings[0].toarray()
assert encodings[0].shape[0] == 4
assert encodings[0].shape[1] == len(charset)
assert array[0][0] == 1
assert array[-1][-1] == 0
<file_sep>import os
import pytest
import numpy as np
import deepchem as dc
from deepchem.feat.molecule_featurizers import MolGraphConvFeaturizer
@pytest.mark.torch
def get_classification_dataset():
featurizer = MolGraphConvFeaturizer(use_edges=True)
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/example_classification.csv')
loader = dc.data.CSVLoader(tasks=["outcome"],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
return dataset, metric
@pytest.mark.torch
def get_multitask_classification_dataset():
featurizer = MolGraphConvFeaturizer(use_edges=True)
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/multitask_example.csv')
loader = dc.data.CSVLoader(tasks=['task0', 'task1', 'task2'],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
return dataset, metric
@pytest.mark.torch
def get_multitask_regression_dataset():
featurizer = MolGraphConvFeaturizer(use_edges=True)
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/multitask_regression.csv')
loader = dc.data.CSVLoader(tasks=['task0', 'task1', 'task2'],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
return dataset, metric
@pytest.mark.torch
def get_regression_dataset():
featurizer = MolGraphConvFeaturizer(use_edges=True)
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/example_regression.csv')
loader = dc.data.CSVLoader(tasks=["outcome"],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
return dataset, metric
@pytest.mark.torch
def test_infographencoder():
import numpy as np
import torch
from deepchem.models.torch_models.infograph import InfoGraphEncoder
from deepchem.feat.graph_data import GraphData, BatchGraphData
torch.manual_seed(123)
embedding_dim = 32
num_nodes = 10
num_graphs = 3
encoder = InfoGraphEncoder(num_features=25,
edge_features=10,
embedding_dim=embedding_dim)
data = []
for i in range(num_graphs):
node_features = np.random.randn(num_nodes, 25)
edge_index = np.array([[0, 1, 2], [1, 2, 3]])
edge_features = np.random.randn(3, 10)
data.append(
GraphData(node_features=node_features,
edge_index=edge_index,
edge_features=edge_features))
data = BatchGraphData(data).numpy_to_torch()
embedding, feature_map = encoder(data)
assert embedding.shape == torch.Size([num_graphs, 2 * embedding_dim])
assert feature_map.shape == torch.Size(
[num_nodes * num_graphs, embedding_dim])
@pytest.mark.torch
def test_GINEcnoder():
import numpy as np
import torch
from deepchem.models.torch_models.infograph import GINEncoder
from deepchem.feat.graph_data import GraphData, BatchGraphData
torch.manual_seed(123)
num_gc_layers = 2
embedding_dim = 32
num_nodes = 10
num_graphs = 3
encoder = GINEncoder(num_features=25,
embedding_dim=embedding_dim,
num_gc_layers=num_gc_layers)
data = []
for i in range(num_graphs):
node_features = np.random.randn(num_nodes, 25)
edge_index = np.array([[0, 1, 2], [1, 2, 3]])
edge_features = np.random.randn(3, 10)
data.append(
GraphData(node_features=node_features,
edge_index=edge_index,
edge_features=edge_features))
data = BatchGraphData(data).numpy_to_torch()
embedding, intermediate_embeddings = encoder(data)
assert embedding.shape == torch.Size([num_graphs, embedding_dim])
assert intermediate_embeddings.shape == torch.Size(
[num_nodes * num_graphs, embedding_dim])
@pytest.mark.torch
def test_infographstar_regression_semisupervised():
from deepchem.models.torch_models.infograph import InfoGraphStarModel
import torch
torch.manual_seed(123)
dataset, metric = get_regression_dataset()
num_feat = 30
edge_dim = 11
dim = 128
model = InfoGraphStarModel(num_feat,
edge_dim,
dim,
num_gc_layers=2,
task='semisupervised')
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric])
assert scores['mean_absolute_error'] < 0.2
@pytest.mark.torch
def test_infographstar_classification_semisupervised():
from deepchem.models.torch_models.infograph import InfoGraphStarModel
import torch
torch.manual_seed(123)
dataset, metric = get_classification_dataset()
num_feat = 30
edge_dim = 11
dim = 64
model = InfoGraphStarModel(num_feat,
edge_dim,
dim,
num_gc_layers=3,
task='semisupervised')
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric])
assert scores['mean-roc_auc_score'] >= 0.9
@pytest.mark.torch
def test_infographstar_multitask_classification_supervised():
from deepchem.models.torch_models.infograph import InfoGraphStarModel
import torch
torch.manual_seed(123)
dataset, metric = get_multitask_classification_dataset()
num_feat = 30
edge_dim = 11
dim = 64
model = InfoGraphStarModel(num_feat,
edge_dim,
dim,
task='supervised',
mode='classification',
num_classes=2,
num_tasks=3)
model.fit(dataset, nb_epoch=200)
scores = model.evaluate(dataset, [metric])
# .8 to save resources for a difficult task
assert scores['mean-roc_auc_score'] >= 0.8
@pytest.mark.torch
def test_infographstar_multitask_regression_supervised():
from deepchem.models.torch_models.infograph import InfoGraphStarModel
import torch
torch.manual_seed(123)
dataset, metric = get_multitask_regression_dataset()
num_feat = 30
edge_dim = 11
dim = 64
model = InfoGraphStarModel(num_feat,
edge_dim,
dim,
num_gc_layers=3,
task='supervised',
mode='regression',
num_tasks=3)
model.fit(dataset, nb_epoch=200)
scores = model.evaluate(dataset, [metric])
# .2 to save resources for a difficult task
assert scores['mean_absolute_error'] < 0.2
@pytest.mark.torch
def test_infographstar_regression_supervised():
from deepchem.models.torch_models.infograph import InfoGraphStarModel
import torch
torch.manual_seed(123)
dataset, metric = get_regression_dataset()
num_feat = 30
edge_dim = 11
dim = 64
model = InfoGraphStarModel(num_feat,
edge_dim,
dim,
num_gc_layers=3,
task='supervised')
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric])
assert scores['mean_absolute_error'] < 0.1
@pytest.mark.torch
def test_infograph():
from deepchem.models.torch_models.infograph import InfoGraphModel
import torch
torch.manual_seed(123)
dataset, _ = get_regression_dataset()
num_feat = 30
edge_dim = 11
model = InfoGraphModel(num_feat, edge_dim)
# first iteration loss is around 50
loss = model.fit(dataset, nb_epoch=20)
assert loss < 25
@pytest.mark.torch
def test_infograph_pretrain_overfit():
"""This tests the intended use of InfoGraph and InfoGraphStar together, with InfoGraph serving as a pretraining step for InfoGraphStar."""
from deepchem.models.torch_models.infograph import InfoGraphModel, InfoGraphStarModel
import torch
torch.manual_seed(123)
np.random.seed(123)
dataset, _ = get_regression_dataset()
num_feat = 30
edge_dim = 11
dim = 32
infograph = InfoGraphModel(num_feat, edge_dim)
infographstar = InfoGraphStarModel(num_feat,
edge_dim,
dim,
num_gc_layers=2,
task='semisupervised')
loss1 = infographstar.fit(dataset, nb_epoch=10)
infograph.fit(dataset, nb_epoch=20)
infographstar.load_from_pretrained(infograph, ['unsup_encoder'])
loss2 = infographstar.fit(dataset, nb_epoch=10)
infographstar.fit(dataset, nb_epoch=200)
prediction = infographstar.predict_on_batch(dataset.X).reshape(-1, 1)
assert np.allclose(np.round(dataset.y), np.round(prediction))
assert loss1 > loss2
@pytest.mark.torch
def test_infographstar_fit_restore():
from deepchem.models.torch_models.infograph import InfoGraphStarModel
dataset, _ = get_classification_dataset()
num_feat = 30
edge_dim = 11
dim = 64
model = InfoGraphStarModel(num_feat, edge_dim, dim, task='supervised')
model.fit(dataset, nb_epoch=100)
model2 = InfoGraphStarModel(num_feat,
edge_dim,
dim,
training_mode='supervised',
model_dir=model.model_dir)
model2.fit(dataset, nb_epoch=1, restore=True)
prediction = model2.predict_on_batch(dataset.X).reshape(-1, 1)
assert np.allclose(dataset.y, np.round(prediction))
@pytest.mark.torch
def test_infograph_pretrain_finetune(tmpdir):
from deepchem.models.torch_models.infograph import InfoGraphModel
import torch
torch.manual_seed(123)
np.random.seed(123)
dataset, _ = get_regression_dataset()
num_feat = 30
edge_dim = 11
pretrain_model = InfoGraphModel(num_feat,
edge_dim,
num_gc_layers=1,
model_dir=tmpdir,
device=torch.device('cpu'))
pretraining_loss = pretrain_model.fit(dataset, nb_epoch=1)
assert pretraining_loss
pretrain_model.save_checkpoint()
finetune_model = InfoGraphModel(num_feat,
edge_dim,
num_gc_layers=1,
task='regression',
n_tasks=1,
model_dir=tmpdir,
device=torch.device('cpu'))
finetune_model.restore(components=['encoder'])
finetuning_loss = finetune_model.fit(dataset, nb_epoch=1)
assert finetuning_loss
<file_sep>"""
qm8 dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
GDB8_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb8.tar.gz"
QM8_CSV_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/qm8.csv"
QM8_TASKS = [
"E1-CC2", "E2-CC2", "f1-CC2", "f2-CC2", "E1-PBE0", "E2-PBE0", "f1-PBE0",
"f2-PBE0", "E1-PBE0", "E2-PBE0", "f1-PBE0", "f2-PBE0", "E1-CAM", "E2-CAM",
"f1-CAM", "f2-CAM"
]
class _QM8Loader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "qm8.sdf")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=GDB8_URL,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(
os.path.join(self.data_dir, "gdb8.tar.gz"), self.data_dir)
loader = dc.data.SDFLoader(tasks=self.tasks,
featurizer=self.featurizer,
sanitize=True)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_qm8(
featurizer: Union[dc.feat.Featurizer, str] = dc.feat.CoulombMatrix(26),
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load QM8 dataset
QM8 is the dataset used in a study on modeling quantum
mechanical calculations of electronic spectra and excited
state energy of small molecules. Multiple methods, including
time-dependent density functional theories (TDDFT) and
second-order approximate coupled-cluster (CC2), are applied to
a collection of molecules that include up to eight heavy atoms
(also a subset of the GDB-17 database). In our collection,
there are four excited state properties calculated by four
different methods on 22 thousand samples:
S0 -> S1 transition energy E1 and the corresponding oscillator strength f1
S0 -> S2 transition energy E2 and the corresponding oscillator strength f2
E1, E2, f1, f2 are in atomic units. f1, f2 are in length representation
Random splitting is recommended for this dataset.
The source data contain:
- qm8.sdf: molecular structures
- qm8.sdf.csv: tables for molecular properties
- Column 1: Molecule ID (gdb9 index) mapping to the .sdf file
- Columns 2-5: RI-CC2/def2TZVP
- Columns 6-9: LR-TDPBE0/def2SVP
- Columns 10-13: LR-TDPBE0/def2TZVP
- Columns 14-17: LR-TDCAM-B3LYP/def2TZVP
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
Note
----
DeepChem 2.4.0 has turned on sanitization for this dataset by
default. For the QM8 dataset, this means that calling this
function will return 21747 compounds instead of 21786 in the source
dataset file. This appears to be due to valence specification
mismatches in the dataset that weren't caught in earlier more lax
versions of RDKit. Note that this may subtly affect benchmarking
results on this dataset.
References
----------
.. [1] Blum, <NAME>., and <NAME>. "970 million druglike
small molecules for virtual screening in the chemical universe database
GDB-13." Journal of the American Chemical Society 131.25 (2009):
8732-8733.
.. [2] Ramakrishnan, Raghunathan, et al. "Electronic spectra from TDDFT
and machine learning in chemical space." The Journal of chemical physics
143.8 (2015): 084111.
"""
loader = _QM8Loader(featurizer, splitter, transformers, QM8_TASKS, data_dir,
save_dir, **kwargs)
return loader.load_dataset('qm8', reload)
<file_sep>"""
Load datasets for Low Data processing.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import shutil
import tempfile
import numpy as np
import deepchem as dc
def to_numpy_dataset(dataset):
"""Converts dataset to numpy dataset."""
return dc.data.NumpyDataset(dataset.X, dataset.y, dataset.w, dataset.ids)
def load_tox21_ecfp(num_train=7200):
"""Load Tox21 datasets. Does not do train/test split"""
# Set some global variables up top
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir, "../../datasets/tox21.csv.gz")
# Featurize Tox21 dataset
print("About to featurize Tox21 dataset.")
featurizer = dc.feat.CircularFingerprint(size=1024)
tox21_tasks = [
'NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'
]
loader = dc.data.CSVLoader(
tasks=tox21_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
# Initialize transformers
transformers = [dc.trans.BalancingTransformer(dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
return tox21_tasks, dataset, transformers
def load_tox21_convmol(base_dir=None, num_train=7200):
"""Load Tox21 datasets. Does not do train/test split"""
# Set some global variables up top
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir, "../../datasets/tox21.csv.gz")
# Featurize Tox21 dataset
print("About to featurize Tox21 dataset.")
featurizer = dc.feat.ConvMolFeaturizer()
tox21_tasks = [
'NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'
]
loader = dc.data.CSVLoader(
tasks=tox21_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
# Initialize transformers
transformers = [dc.trans.BalancingTransformer(dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
return tox21_tasks, dataset, transformers
def load_muv_ecfp():
"""Load MUV datasets. Does not do train/test split"""
# Load MUV dataset
print("About to load MUV dataset.")
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir, "../../datasets/muv.csv.gz")
# Featurize MUV dataset
print("About to featurize MUV dataset.")
featurizer = dc.feat.CircularFingerprint(size=1024)
MUV_tasks = sorted([
'MUV-692', 'MUV-689', 'MUV-846', 'MUV-859', 'MUV-644', 'MUV-548',
'MUV-852', 'MUV-600', 'MUV-810', 'MUV-712', 'MUV-737', 'MUV-858',
'MUV-713', 'MUV-733', 'MUV-652', 'MUV-466', 'MUV-832'
])
loader = dc.data.CSVLoader(
tasks=MUV_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
# Initialize transformers
transformers = [dc.trans.BalancingTransformer(dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
return MUV_tasks, dataset, transformers
def load_muv_convmol():
"""Load MUV datasets. Does not do train/test split"""
# Load MUV dataset
print("About to load MUV dataset.")
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir, "../../datasets/muv.csv.gz")
# Featurize MUV dataset
print("About to featurize MUV dataset.")
featurizer = dc.feat.ConvMolFeaturizer()
MUV_tasks = sorted([
'MUV-692', 'MUV-689', 'MUV-846', 'MUV-859', 'MUV-644', 'MUV-548',
'MUV-852', 'MUV-600', 'MUV-810', 'MUV-712', 'MUV-737', 'MUV-858',
'MUV-713', 'MUV-733', 'MUV-652', 'MUV-466', 'MUV-832'
])
loader = dc.data.CSVLoader(
tasks=MUV_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
# Initialize transformers
transformers = [dc.trans.BalancingTransformer(dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
return MUV_tasks, dataset, transformers
def load_sider_ecfp():
"""Load SIDER datasets. Does not do train/test split"""
# Featurize SIDER dataset
print("About to featurize SIDER dataset.")
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir, "../sider/sider.csv.gz")
featurizer = dc.feat.CircularFingerprint(size=1024)
dataset = dc.utils.save.load_from_disk(dataset_file)
SIDER_tasks = dataset.columns.values[1:].tolist()
print("SIDER tasks: %s" % str(SIDER_tasks))
print("%d tasks in total" % len(SIDER_tasks))
loader = dc.data.CSVLoader(
tasks=SIDER_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
print("%d datapoints in SIDER dataset" % len(dataset))
# Initialize transformers
transformers = [dc.trans.BalancingTransformer(dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
return SIDER_tasks, dataset, transformers
def load_sider_convmol():
"""Load SIDER datasets. Does not do train/test split"""
# Featurize SIDER dataset
print("About to featurize SIDER dataset.")
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir, "../sider/sider.csv.gz")
featurizer = dc.feat.ConvMolFeaturizer()
dataset = dc.utils.save.load_from_disk(dataset_file)
SIDER_tasks = dataset.columns.values[1:].tolist()
print("SIDER tasks: %s" % str(SIDER_tasks))
print("%d tasks in total" % len(SIDER_tasks))
loader = dc.data.CSVLoader(
tasks=SIDER_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
print("%d datapoints in SIDER dataset" % len(dataset))
# Initialize transformers
transformers = [dc.trans.BalancingTransformer(dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
return SIDER_tasks, dataset, transformers
<file_sep>Infrastructures
===============
The DeepChem project maintains supporting infrastructure on a number of
different services. This infrastructure is maintained by the DeepChem
development team.
GitHub
------
The core DeepChem repositories are maintained in the `deepchem`_ GitHub organization.
And, we use GitHub Actions to build a continuous integration pipeline.
.. _`deepchem`: https://github.com/deepchem
DeepChem developers have write access to the repositories on this repo and
technical steering committee members have admin access.
Conda Forge
-----------
The DeepChem `feedstock`_ repo maintains the build recipe for conda-forge.
.. _`feedstock`: https://github.com/conda-forge/deepchem-feedstock
Docker Hub
----------
DeepChem hosts major releases and nightly docker build instances on `Docker Hub`_.
.. _`Docker Hub`: https://hub.docker.com/r/deepchemio/deepchem
PyPI
----
DeepChem hosts major releases and nightly builds on `PyPI`_.
.. _`PyPI`: https://pypi.org/project/deepchem/
Amazon Web Services
-------------------
DeepChem's website infrastructure is all managed on AWS through different AWS
services. All DeepChem developers have access to these services through the
deepchem-developers IAM role. (An IAM role controls access permissions.) At
present, @rbharath is the only developer with admin access to the IAM role, but
longer term we should migrate this so other folks have access to the roles.
S3
^^
Amazon's S3 allows for storage of data on "buckets" (Think of buckets like folders.)
There are two core deepchem S3 buckets:
- deepchemdata: This bucket hosts the MoleculeNet datasets, pre-featurized datasets,
and pretrained models.
- deepchemforum: This bucket hosts backups for the forums. The bucket is private for security reasons.
The forums themselves are hosted on a digital ocean instance that only @rbharath currently has access to.
Longer term, we should migrate the forums onto AWS so all DeepChem developers can access the forums.
The forums themselves are a discord instance. The forums upload their backups to this S3 bucket once a day.
If the forums crash, they can be restored from the backups in this bucket.
Route 53
^^^^^^^^
DNS for the deepchem.io website is handled by Route 53. The "hosted zone"
deepchem.io holds all DNS information for the website.
Certificate Manager
^^^^^^^^^^^^^^^^^^^
The AWS certificate manager issues the SSL/TLS certificate for the
\*.deepchem.io and deepchem.io domains.
GitHub Pages
^^^^^^^^^^
We make use of GitHub Pages to serve our static website. GitHub Pages
connects to the certificate in Certificate Manager. We set CNAME for
www.deepchem.io, and an A-record for deepchem.io.
The GitHub Pages repository is [deepchem/deepchem.github.io](https://github.com/deepchem/deepchem.github.io).
GoDaddy
-------
The deepchem.io domain is registered with GoDaddy. If you change the name
servers in AWS Route 53, you will need to update the GoDaddy record. At
present, only @rbharath has access to the GoDaddy account that owns the
deepchem.io domain name. We should explore how to provide access to the domain
name for other DeepChem developers.
Digital Ocean
-------------
The forums are hosted on a digital ocean instance. At present, only @rbharath
has access to this instance. We should migrate this instance onto AWS so other
DeepChem developers can help maintain the forums.
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load Tox21 dataset
tasks, datasets, transformers = dc.molnet.load_qm8()
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = [dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")]
# Batch size of models
batch_size = 50
n_embedding = 20
n_distance = 51
distance_min = -1.
distance_max = 9.2
n_hidden = 15
model = dc.models.DTNNModel(
len(tasks),
n_embedding=n_embedding,
n_hidden=n_hidden,
n_distance=n_distance,
distance_min=distance_min,
distance_max=distance_max,
output_activation=False,
batch_size=batch_size,
learning_rate=0.0001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Featurizer implementations used in ChemCeption models.
SmilesToImage featurizer for ChemCeption models taken from https://arxiv.org/abs/1710.02238
"""
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class SmilesToImage(MolecularFeaturizer):
"""Convert SMILES string to an image.
SmilesToImage Featurizer takes a SMILES string, and turns it into an image.
Details taken from [1]_.
The default size of for the image is 80 x 80. Two image modes are currently
supported - std & engd. std is the gray scale specification,
with atomic numbers as pixel values for atom positions and a constant value of
2 for bond positions. engd is a 4-channel specification, which uses atom
properties like hybridization, valency, charges in addition to atomic number.
Bond type is also used for the bonds.
The coordinates of all atoms are computed, and lines are drawn between atoms
to indicate bonds. For the respective channels, the atom and bond positions are
set to the property values as mentioned in the paper.
Examples
--------
>>> import deepchem as dc
>>> smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O']
>>> featurizer = dc.feat.SmilesToImage(img_size=80, img_spec='std')
>>> images = featurizer.featurize(smiles)
>>> type (images[0])
<class 'numpy.ndarray'>
>>> images[0].shape # (img_size, img_size, 1)
(80, 80, 1)
References
----------
.. [1] Goh, <NAME>., et al. "Using rule-based labels for weak supervised
learning: a ChemNet for transferable chemical property prediction."
Proceedings of the 24th ACM SIGKDD International Conference on Knowledge
Discovery & Data Mining. 2018.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
img_size: int = 80,
res: float = 0.5,
max_len: int = 250,
img_spec: str = "std"):
"""
Parameters
----------
img_size: int, default 80
Size of the image tensor
res: float, default 0.5
Displays the resolution of each pixel in Angstrom
max_len: int, default 250
Maximum allowed length of SMILES string
img_spec: str, default std
Indicates the channel organization of the image tensor
"""
if img_spec not in ["std", "engd"]:
raise ValueError(
"Image mode must be one of std or engd. {} is not supported".
format(img_spec))
self.img_size = img_size
self.max_len = max_len
self.res = res
self.img_spec = img_spec
self.embed = int(img_size * res / 2)
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""Featurizes a single SMILE into an image.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
A 3D array of image, the shape is `(img_size, img_size, 1)`.
If the length of SMILES is longer than `max_len`, this value is an empty array.
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
smile = Chem.MolToSmiles(datapoint)
if len(smile) > self.max_len:
return np.array([])
cmol = Chem.Mol(datapoint.ToBinary())
cmol.ComputeGasteigerCharges()
AllChem.Compute2DCoords(cmol)
atom_coords = cmol.GetConformer(0).GetPositions()
if self.img_spec == "std":
# Setup image
img = np.zeros((self.img_size, self.img_size, 1))
# Compute bond properties
bond_props = np.array(
[[2.0, bond.GetBeginAtomIdx(),
bond.GetEndAtomIdx()] for bond in datapoint.GetBonds()])
# Compute atom properties
atom_props = np.array(
[[atom.GetAtomicNum()] for atom in cmol.GetAtoms()])
bond_props = bond_props.astype(np.float32)
atom_props = atom_props.astype(np.float32)
else:
# Setup image
img = np.zeros((self.img_size, self.img_size, 4))
# Compute bond properties
bond_props = np.array([[
bond.GetBondTypeAsDouble(),
bond.GetBeginAtomIdx(),
bond.GetEndAtomIdx()
] for bond in datapoint.GetBonds()])
# Compute atom properties
atom_props = np.array([[
atom.GetAtomicNum(),
atom.GetProp("_GasteigerCharge"),
atom.GetExplicitValence(),
atom.GetHybridization().real,
] for atom in cmol.GetAtoms()])
bond_props = bond_props.astype(np.float32)
atom_props = atom_props.astype(np.float32)
partial_charges = atom_props[:, 1]
if np.any(np.isnan(partial_charges)):
return np.array([])
frac = np.linspace(0, 1, int(1 / self.res * 2))
# Reshape done for proper broadcast
frac = frac.reshape(-1, 1, 1)
bond_begin_idxs = bond_props[:, 1].astype(int)
bond_end_idxs = bond_props[:, 2].astype(int)
# Reshapes, and axes manipulations to facilitate vector processing.
begin_coords = atom_coords[bond_begin_idxs]
begin_coords = np.expand_dims(begin_coords.T, axis=0)
end_coords = atom_coords[bond_end_idxs]
end_coords = np.expand_dims(end_coords.T, axis=0)
# Draw a line between the two atoms.
# The coordinates of this line, are indicated in line_coords
line_coords = frac * begin_coords + (1 - frac) * end_coords
# Turn the line coordinates into image positions
bond_line_idxs = np.ceil(
(line_coords[:, 0] + self.embed) / self.res).astype(int)
bond_line_idys = np.ceil(
(line_coords[:, 1] + self.embed) / self.res).astype(int)
# Turn atomic coordinates into image positions
atom_idxs = np.round(
(atom_coords[:, 0] + self.embed) / self.res).astype(int)
atom_idys = np.round(
(atom_coords[:, 1] + self.embed) / self.res).astype(int)
try:
# Set the bond line coordinates to the bond property used.
img[bond_line_idxs, bond_line_idys, 0] = bond_props[:, 0]
# Set the atom positions in image to different atomic properties in channels
img[atom_idxs, atom_idys, :] = atom_props
except IndexError:
# With fixed res and img_size some molecules (e.g. long chains) may not fit.
raise IndexError(
"The molecule does not fit into the image. Consider increasing img_size or res of the SmilesToImage featurizer."
)
return img
<file_sep>"""
Test for DFT Utilities
"""
try:
import dqc
from dqc.system.mol import Mol
from dqc.qccalc.ks import KS
from deepchem.utils.dftutils import KSCalc, hashstr
import torch
except ModuleNotFoundError:
pass
import pytest
@pytest.mark.dqc
def test_dftutils():
system = {
'type': 'mol',
'kwargs': {
'moldesc': 'H 0.86625 0 0; F -0.86625 0 0',
'basis': '6-311++G(3df,3pd)'
}
}
atomzs, atomposs = dqc.parse_moldesc(system["kwargs"]["moldesc"])
mol = Mol(**system["kwargs"])
qc = KS(mol, xc='lda_x').run()
qcs = KSCalc(qc)
a = qcs.energy()
b = torch.tensor(-99.1360, dtype=torch.float64)
assert torch.allclose(a, b)
@pytest.mark.dqc
def test_str():
s = "hydrogen fluoride"
s = hashstr(s)
s1 = "df4e3775493a2e784618edaf9e96b7ecb6ce2b4cd022e8619588d55009872bb2"
assert s == s1
<file_sep>FROM ubuntu:18.04
# Install some utilities
RUN apt-get update && \
apt-get install -y -q wget git libxrender1 libsm6 bzip2 && \
rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
# Install miniconda
RUN MINICONDA="Miniconda3-latest-Linux-x86_64.sh" && \
wget --quiet https://repo.continuum.io/miniconda/$MINICONDA && \
bash $MINICONDA -b -p /miniconda && \
rm -f $MINICONDA && \
echo ". /miniconda/etc/profile.d/conda.sh" >> ~/.bashrc
ENV PATH /miniconda/bin:$PATH
SHELL ["/bin/bash", "-c"]
# install deepchem with master branch
RUN conda update -n base conda && \
git clone --depth 1 https://github.com/deepchem/deepchem.git && \
cd deepchem && \
source scripts/light/install_deepchem.sh 3.8 cpu tensorflow && \
conda activate deepchem && \
pip install -e . && \
conda clean -afy && \
rm -rf ~/.cache/pip
RUN echo "conda activate deepchem" >> ~/.bashrc
WORKDIR /root/mydir
<file_sep>import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.optim as optim
import random
import numpy as np
from sklearn.metrics import roc_auc_score
def symmetric_normalize_adj(adj):
"""
Implements symmetric normalization of graphs
trick described here:
https://tkipf.github.io/graph-convolutional-networks/]
adj: NxN graph adjacency matrix (2d square numpy array)
"""
n_atoms = np.where(np.max(adj, axis=1)==0)[0][0]
if n_atoms == 0:
return(adj)
orig_shape = adj.shape
adj = adj[:n_atoms, :n_atoms]
degree = np.sum(adj, axis=1)
D = np.diag(degree)
D_sqrt = np.sqrt(D)
D_sqrt_inv = np.linalg.inv(D_sqrt)
sym_norm = D_sqrt_inv.dot(adj)
sym_norm = sym_norm.dot(D_sqrt_inv)
new_adj = np.zeros(orig_shape)
new_adj[:n_atoms, :n_atoms] = sym_norm
return(new_adj)
class GraphConvolution(nn.Module):
"""
Differentiable function that performs a graph convolution
given adjacency matrix G and feature matrix X
"""
def __init__(self, n_conv_layers=1,
max_n_atoms=200,
n_atom_types=75,
conv_layer_dims=[64,128,256],
n_fc_layers=2,
fc_layer_dims=[64, 10],
dropout=0.,
return_sigmoid=True):
"""
Defines the operations available in this module.
n_conv_layers: int, number of graph convolution layers
max_n_atoms: int, N, n_rows (n_cols) of adjacency matrix
n_atom_types: int, number of features describing each atom in
input
conv_layer_dims: list of ints, output n_features for each
graph conv layer
n_fc_layers: int, number of fully connected layers
fc_layer_dims: list of ints, output n_features for each
fully connected layer
dropout: float, probability of zeroing out a given output neuron
return_sigmoid: boolean, determines if forward pass
returns sigmoid activation on the final layer
"""
super(GraphConvolution, self).__init__()
self.n_conv_layers = n_conv_layers
self.max_n_atoms = max_n_atoms
self.n_atom_types = n_atom_types
self.fc_layer_dims = fc_layer_dims
self.n_fc_layers = n_fc_layers
self.return_sigmoid = return_sigmoid
self.conv_layer_dims = [n_atom_types] + conv_layer_dims
self.dropout = dropout
self.conv_ops = nn.ModuleList()
for layer_idx in range(self.n_conv_layers):
p_in = self.conv_layer_dims[layer_idx]
p_out = self.conv_layer_dims[layer_idx+1]
op = nn.Sequential(
nn.Linear(p_in, p_out),
nn.Dropout(p=self.dropout),
nn.ReLU(inplace=True),
nn.BatchNorm1d(p_out))
self.conv_ops.append(op)
self.fc_ops = nn.ModuleList()
self.fc_layer_dims = [self.conv_layer_dims[self.n_conv_layers]] + self.fc_layer_dims
for layer_idx in range(self.n_fc_layers):
p_in = self.fc_layer_dims[layer_idx]
p_out = self.fc_layer_dims[layer_idx+1]
op = nn.Sequential(
nn.Linear(p_in, p_out),
nn.Dropout(p=self.dropout),
nn.ReLU(inplace=True),
nn.BatchNorm1d(p_out))
self.fc_ops.append(op)
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, np.sqrt(2. / n))
elif m.__class__.__name__.find("BatchNorm") != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.02)
def forward(self, G, x):
"""
Performs a series of graph convolutions
followed by a summation and
fully connected layers.
G: (batch_size, max_n_atoms, max_n_atoms) batch of adjacency matrices
x: (batch_size, max_n_atoms, p) batch of feature matrices for each
molecule
"""
h = x
for layer_idx in range(self.n_conv_layers):
h = torch.bmm(G, h)
h = h.view(-1, h.size()[-1])
op = self.conv_ops[layer_idx]
h = op(h)
h = h.view(-1, self.max_n_atoms, self.conv_layer_dims[layer_idx+1])
h = torch.squeeze(torch.sum(h, dim=1), dim=1)
for layer_idx in range(self.n_fc_layers):
op = self.fc_ops[layer_idx]
h = op(h)
if self.return_sigmoid:
h = nn.Sigmoid()(h)
return(h)
class SingleTaskGraphConvolution(object):
"""
Convenience class for training a single task graph convolutional model.
"""
def __init__(self, net, lr, weight_decay):
"""
net: an instance of class GraphConvolution
lr: float, learning rate
weight_decay: float
"""
self.net = net
self.criterion = nn.CrossEntropyLoss()
self.input_x = torch.FloatTensor(-1, self.net.max_n_atoms, self.net.n_atom_types)
self.input_g = torch.FloatTensor(-1, self.net.max_n_atoms, self.net.max_n_atoms)
self.label = torch.FloatTensor(-1)
self.net.cuda()
self.criterion = nn.CrossEntropyLoss()
self.criterion.cuda()
self.input_x, self.input_g, self.label = self.input_x.cuda(), self.input_g.cuda(), self.label.cuda()
self.lr = lr
self.weight_decay = weight_decay
# setup optimizer
self.optimizer = optim.Adam(self.net.parameters(),
lr=self.lr,
weight_decay=self.weight_decay)
def train_epoch(self, train_features, y_train, batch_size=32,
shuffle_train_inds=True):
"""
train_features: list of dictionaries. each dictionary represents one sample feature.
key "x" maps to max_n_atoms x p feature matrix. key "g" maps to square adjacency matrix
y_train: numpy array of labels
"""
train_inds = range(0, len(train_features))
if shuffle_train_inds:
random.shuffle(train_inds)
for b in range(0, len(train_inds)/batch_size):
batch_inds = [train_inds[idx] for idx in range(b*batch_size, (b+1)*batch_size)]
train_x_batch = np.concatenate([np.expand_dims(train_features[idx]["x"], 0) for idx in batch_inds], axis=0)
train_g_batch = np.concatenate([np.expand_dims(train_features[idx]["g"], 0) for idx in batch_inds], axis=0)
xb = torch.from_numpy(train_x_batch.astype(np.float32)).cuda()
gb = torch.from_numpy(train_g_batch.astype(np.float32)).cuda()
yb = torch.from_numpy(y_train[batch_inds].astype(np.float32)).cuda()
self.net.train()
self.net.zero_grad()
self.input_x.resize_as_(xb).copy_(xb)
self.input_g.resize_as_(gb).copy_(gb)
self.label.resize_as_(yb).copy_(yb)
input_xv = Variable(self.input_x)
input_gv = Variable(self.input_g)
label_v = Variable(self.label)
output = self.net(input_gv, input_xv)
err = self.criterion(output, label_v)
err.backward()
self.optimizer.step()
def evaluate(self, train_features,
test_features,
y_train,
y_test,
transformer,
batch_size=32):
self.net.eval()
print("TRAIN:")
o = []
l = []
train_inds = range(0, len(train_features))
for b in range(0, len(train_features)/batch_size):
batch_inds = [train_inds[idx] for idx in range(b*batch_size, (b+1)*batch_size)]
train_x_batch = np.concatenate([np.expand_dims(train_features[idx]["x"], 0) for idx in batch_inds], axis=0)
train_g_batch = np.concatenate([np.expand_dims(train_features[idx]["g"], 0) for idx in batch_inds], axis=0)
xb = torch.from_numpy(train_x_batch.astype(np.float32)).cuda()
gb = torch.from_numpy(train_g_batch.astype(np.float32)).cuda()
self.input_x.resize_as_(xb).copy_(xb)
self.input_g.resize_as_(gb).copy_(gb)
input_xv = Variable(self.input_x)
input_gv = Variable(self.input_g)
output = self.net(input_gv, input_xv)
if transformer is not None:
o.append(transformer.inverse_transform(output.data.cpu().numpy().reshape((-1,1))).flatten())
l.append(transformer.inverse_transform(y_train[batch_inds].reshape((-1,1))).flatten())
else:
o.append(output.data.cpu().numpy().reshape((-1,1)).flatten())
l.append(y_train[batch_inds].reshape((-1,1)).flatten())
o = np.concatenate(o)
l = np.concatenate(l)
print("RMSE:")
print(np.sqrt(np.mean(np.square(l-o))))
print("ROC AUC:")
print(roc_auc_score(l, o))
o = []
l = []
print("TEST:")
test_inds = range(0, len(test_features))
for b in range(0, len(test_features)/batch_size):
batch_inds = [test_inds[idx] for idx in range(b*batch_size, (b+1)*batch_size)]
test_x_batch = np.concatenate([np.expand_dims(test_features[idx]["x"], 0) for idx in batch_inds], axis=0)
test_g_batch = np.concatenate([np.expand_dims(test_features[idx]["g"], 0) for idx in batch_inds], axis=0)
xb = torch.from_numpy(test_x_batch.astype(np.float32)).cuda()
gb = torch.from_numpy(test_g_batch.astype(np.float32)).cuda()
self.input_x.resize_as_(xb).copy_(xb)
self.input_g.resize_as_(gb).copy_(gb)
input_xv = Variable(self.input_x)
input_gv = Variable(self.input_g)
output = self.net(input_gv, input_xv)
if transformer is not None:
o.append(transformer.inverse_transform(output.data.cpu().numpy().reshape((-1,1))).flatten())
l.append(transformer.inverse_transform(y_test[batch_inds].reshape((-1,1))).flatten())
else:
o.append(output.data.cpu().numpy().reshape((-1,1)).flatten())
l.append(y_test[batch_inds].reshape((-1,1)).flatten())
o = np.concatenate(o)
l = np.concatenate(l)
print("RMSE:")
print(np.sqrt(np.mean(np.square(l-o))))
print("ROC AUC:")
print(roc_auc_score(l, o))
class MultiTaskGraphConvolution(object):
"""
Convenience Class for training and evaluating multitask graph convolutional network
"""
def __init__(self, net, lr, weight_decay, n_tasks):
"""
net: an instance of class GraphConvolution
lr: float, learning rate
weight_decay: float
n_tasks: int, number of tasks
"""
self.net = net
self.criterion = nn.CrossEntropyLoss()
self.input_x = torch.FloatTensor(-1, self.net.max_n_atoms, self.net.n_atom_types)
self.input_g = torch.FloatTensor(-1, self.net.max_n_atoms, self.net.max_n_atoms)
self.label = torch.FloatTensor(-1)
self.net.cuda()
self.criterion = nn.CrossEntropyLoss()
self.criterion.cuda()
self.input_x, self.input_g, self.label = self.input_x.cuda(), self.input_g.cuda(), self.label.cuda()
self.lr = lr
self.weight_decay = weight_decay
# setup optimizer
self.optimizer = optim.Adam(self.net.parameters(),
lr=self.lr,
weight_decay=self.weight_decay)
self.n_tasks = n_tasks
def multitask_loss(self, output, label_v):
losses = []
for task in range(self.n_tasks):
#print("tasK: %d" %task)
scores = output[:,task].contiguous().view((-1,1))
#cores = torch.cat([scores, 1.-scores], dim=1)
#print("scores")
#print(scores.size())
task_label = label_v[:,task]#.long()
#print("task_label")
#print(task_label.size())
#task_loss = self.criterion(scores, task_label)
task_loss = -(task_label * torch.log(scores)) + (1. - task_label) * torch.log(1. - task_label)
task_loss = task_loss.mean()
losses.append(task_loss)
#print("task_loss")
#print(task_loss.size())
loss = torch.cat(losses).mean()
return(loss)
def train_epoch(self, train_features, y_train, batch_size=32,
shuffle_train_inds=True):
train_inds = range(0, len(train_features))
if shuffle_train_inds:
random.shuffle(train_inds)
for b in range(0, len(train_inds)/batch_size):
batch_inds = [train_inds[idx] for idx in range(b*batch_size, (b+1)*batch_size)]
train_x_batch = np.concatenate([np.expand_dims(train_features[idx]["x"], 0) for idx in batch_inds], axis=0)
train_g_batch = np.concatenate([np.expand_dims(train_features[idx]["g"], 0) for idx in batch_inds], axis=0)
xb = torch.from_numpy(train_x_batch.astype(np.float32)).cuda()
gb = torch.from_numpy(train_g_batch.astype(np.float32)).cuda()
yb = torch.from_numpy(y_train[batch_inds].astype(np.float32)).cuda()
self.net.train()
self.net.zero_grad()
self.input_x.resize_as_(xb).copy_(xb)
self.input_g.resize_as_(gb).copy_(gb)
self.label.resize_as_(yb).copy_(yb)
input_xv = Variable(self.input_x)
input_gv = Variable(self.input_g)
label_v = Variable(self.label)
output = self.net(input_gv, input_xv)
err = self.multitask_loss(output, label_v)
err.backward()
self.optimizer.step()
def evaluate(self, train_features,
test_features,
y_train,
y_test,
transformer,
batch_size=32):
self.net.eval()
print("TRAIN:")
o = []
l = []
train_inds = range(0, len(train_features))
for b in range(0, len(train_features)/batch_size):
batch_inds = [train_inds[idx] for idx in range(b*batch_size, (b+1)*batch_size)]
train_x_batch = np.concatenate([np.expand_dims(train_features[idx]["x"], 0) for idx in batch_inds], axis=0)
train_g_batch = np.concatenate([np.expand_dims(train_features[idx]["g"], 0) for idx in batch_inds], axis=0)
xb = torch.from_numpy(train_x_batch.astype(np.float32)).cuda()
gb = torch.from_numpy(train_g_batch.astype(np.float32)).cuda()
self.input_x.resize_as_(xb).copy_(xb)
self.input_g.resize_as_(gb).copy_(gb)
input_xv = Variable(self.input_x)
input_gv = Variable(self.input_g)
output = self.net(input_gv, input_xv)
if transformer is not None:
o.append(transformer.inverse_transform(output.data.cpu().numpy().reshape((-1,1))).flatten())
l.append(transformer.inverse_transform(y_train[batch_inds].reshape((-1,1))).flatten())
else:
o.append(output.data.cpu().numpy().reshape((-1,1)).flatten())
l.append(y_train[batch_inds].reshape((-1,1)).flatten())
o = np.concatenate(o)
l = np.concatenate(l)
print("RMSE:")
print(np.sqrt(np.mean(np.square(l-o))))
print("ROC AUC:")
print(roc_auc_score(l, o))
o = []
l = []
print("TEST:")
test_inds = range(0, len(test_features))
for b in range(0, len(test_features)/batch_size):
batch_inds = [test_inds[idx] for idx in range(b*batch_size, (b+1)*batch_size)]
test_x_batch = np.concatenate([np.expand_dims(test_features[idx]["x"], 0) for idx in batch_inds], axis=0)
test_g_batch = np.concatenate([np.expand_dims(test_features[idx]["g"], 0) for idx in batch_inds], axis=0)
xb = torch.from_numpy(test_x_batch.astype(np.float32)).cuda()
gb = torch.from_numpy(test_g_batch.astype(np.float32)).cuda()
self.input_x.resize_as_(xb).copy_(xb)
self.input_g.resize_as_(gb).copy_(gb)
input_xv = Variable(self.input_x)
input_gv = Variable(self.input_g)
output = self.net(input_gv, input_xv)
if transformer is not None:
o.append(transformer.inverse_transform(output.data.cpu().numpy().reshape((-1,1))).flatten())
l.append(transformer.inverse_transform(y_test[batch_inds].reshape((-1,1))).flatten())
else:
o.append(output.data.cpu().numpy().reshape((-1,1)).flatten())
l.append(y_test[batch_inds].reshape((-1,1)).flatten())
o = np.concatenate(o)
l = np.concatenate(l)
print("RMSE:")
print(np.sqrt(np.mean(np.square(l-o))))
print("ROC AUC:")
print(roc_auc_score(l, o))
<file_sep>"""
Integration tests for singletask vector feature models.
"""
import os
import pytest
import deepchem as dc
import numpy as np
from sklearn.ensemble import RandomForestRegressor
try:
import torch # noqa: F401
has_pytorch = True
except:
has_pytorch = False
def test_singletask_sklearn_rf_ECFP_regression_API():
"""Test of singletask RF ECFP regression API."""
X = np.random.rand(100, 5)
y = np.random.rand(100,)
dataset = dc.data.NumpyDataset(X, y)
splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
transformer = dc.trans.NormalizationTransformer(transform_y=True,
dataset=train_dataset)
train_dataset = transformer.transform(train_dataset)
test_dataset = transformer.transform(test_dataset)
regression_metrics = [
dc.metrics.Metric(dc.metrics.r2_score),
dc.metrics.Metric(dc.metrics.mean_squared_error),
dc.metrics.Metric(dc.metrics.mean_absolute_error)
]
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on train
_ = model.evaluate(train_dataset, regression_metrics, [transformer])
_ = model.evaluate(test_dataset, regression_metrics, [transformer])
def test_singletask_sklearn_rf_user_specified_regression_API():
"""Test of singletask RF USF regression API."""
featurizer = dc.feat.UserDefinedFeaturizer(
["user-specified1", "user-specified2"])
tasks = ["log-solubility"]
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "assets/user_specified_example.csv")
loader = dc.data.UserCSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
transformers = [
dc.trans.NormalizationTransformer(transform_y=True,
dataset=train_dataset)
]
for dataset in [train_dataset, test_dataset]:
for transformer in transformers:
dataset = transformer.transform(dataset)
regression_metrics = [
dc.metrics.Metric(dc.metrics.r2_score),
dc.metrics.Metric(dc.metrics.mean_squared_error),
dc.metrics.Metric(dc.metrics.mean_absolute_error)
]
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on train/test
_ = model.evaluate(train_dataset, regression_metrics, transformers)
_ = model.evaluate(test_dataset, regression_metrics, transformers)
def test_singletask_sklearn_rf_RDKIT_descriptor_regression_API():
"""Test of singletask RF RDKIT-descriptor regression API."""
featurizer = dc.feat.RDKitDescriptors()
tasks = ["log-solubility"]
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "assets/example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
splitter = dc.splits.ScaffoldSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
transformers = [
dc.trans.NormalizationTransformer(transform_X=True,
dataset=train_dataset),
dc.trans.ClippingTransformer(transform_X=True, dataset=train_dataset),
dc.trans.NormalizationTransformer(transform_y=True,
dataset=train_dataset)
]
for dataset in [train_dataset, test_dataset]:
for transformer in transformers:
dataset = transformer.transform(dataset)
regression_metrics = [
dc.metrics.Metric(dc.metrics.r2_score),
dc.metrics.Metric(dc.metrics.mean_squared_error),
dc.metrics.Metric(dc.metrics.mean_absolute_error)
]
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on train/test
_ = model.evaluate(train_dataset, regression_metrics, transformers)
_ = model.evaluate(test_dataset, regression_metrics, transformers)
@pytest.mark.torch
def test_singletask_mlp_ECFP_classification_API():
"""Test of singletask MLP classification API."""
np.random.seed(123)
X = np.random.rand(100, 5)
y = np.random.randint(2, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
transformers = []
classification_metrics = [
dc.metrics.Metric(dc.metrics.roc_auc_score),
dc.metrics.Metric(dc.metrics.prc_auc_score),
dc.metrics.Metric(dc.metrics.matthews_corrcoef),
dc.metrics.Metric(dc.metrics.recall_score),
dc.metrics.Metric(dc.metrics.accuracy_score),
dc.metrics.Metric(dc.metrics.balanced_accuracy_score),
dc.metrics.Metric(dc.metrics.jaccard_score),
dc.metrics.Metric(dc.metrics.f1_score),
dc.metrics.Metric(dc.metrics.pixel_error),
dc.metrics.Metric(dc.metrics.kappa_score),
dc.metrics.Metric(dc.metrics.bedroc_score),
]
model = dc.models.MultitaskClassifier(1, 5)
# Fit trained model
model.fit(train_dataset)
# Eval model on train/test
_ = model.evaluate(train_dataset, classification_metrics, transformers)
_ = model.evaluate(test_dataset, classification_metrics, transformers)
<file_sep>Coding Conventions
==================
Pre-Commit
-----------
.. _`Pre-Commit`: https://pre-commit.com/
We use `pre-commit`_ to ensure that we're always keeping up with the best
practices when it comes to linting, standard code conventions and type
annotations. Although it may seem time consuming at first as to why is one
supposed to run all these tests and checks but it helps in identifying simple
issues before submission to code review. We've already specified a configuration
file with a list of hooks that will get executed before every commit.
First you'll need to setup the git hook scripts by installing them.
.. code-block:: bash
pre-commit install
Now whenever you commit, pre-commit will run the necessary hooks on the modified
files.
Code Formatting
---------------
.. _`YAPF`: https://github.com/google/yapf
We use `YAPF`_ to format all of the code in DeepChem. Although it sometimes
produces slightly awkward formatting, it does have two major benefits. First,
it ensures complete consistency throughout the entire codebase. And second, it
avoids disagreements about how a piece of code should be formatted.
Whenever you modify a file, run :code:`yapf` on it to reformat it before
checking it in.
.. code-block:: bash
yapf -i <modified file>
YAPF is run on every pull request to make sure the formatting is correct, so if
you forget to do this the continuous integration system will remind you.
Because different versions of YAPF can produce different results, it is
essential to use the same version that is being run on CI. At present, that
is 0.32. We periodically update it to newer versions.
Linting
-------
.. _`Flake8`: https://github.com/pycqa/flake8
We use `Flake8`_ to check our code syntax. Lint tools basically provide these benefits.
- Prevent things like syntax errors or typos
- Save our review time (no need to check unused codes or typos)
Whenever you modify a file, run :code:`flake8` on it.
.. code-block:: bash
flake8 <modified file> --count
If the command returns 0, it means your code passes the Flake8 check.
Docstrings
----------
All classes and functions should include docstrings describing their purpose and
intended usage. When in doubt about how much information to include, always err
on the side of including more rather than less. Explain what problem a class is
intended to solve, what algorithms it uses, and how to use it correctly. When
appropriate, cite the relevant publications.
.. _`numpy`: https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard
All docstrings should follow the `numpy`_ docstring formatting conventions. To
ensure that the code examples in the docstrings are working as expected, run
.. code-block:: bash
python -m doctest <modified file>
Unit Tests
----------
Having an extensive collection of test cases is essential to ensure the code
works correctly. If you haven't written tests for a feature, that means the
feature isn't finished yet. Untested code is code that probably doesn't work.
Complex numerical code is sometimes challenging to fully test. When an
algorithm produces a result, it sometimes is not obvious how to tell whether the
result is correct or not. As far as possible, try to find simple examples for
which the correct answer is exactly known. Sometimes we rely on stochastic
tests which will *probably* pass if the code is correct and *probably* fail if
the code is broken. This means these tests are expected to fail a small
fraction of the time. Such tests can be marked with the :code:`@flaky`
annotation. If they fail during continuous integration, they will be run a
second time and an error only reported if they fail again.
If possible, each test should run in no more than a few seconds. Occasionally
this is not possible. In that case, mark the test with the :code:`@pytest.mark.slow`
annotation. Slow tests are skipped during continuous integration, so changes
that break them may sometimes slip through and get merged into the repository.
We still try to run them regularly, so hopefully the problem will be discovered
fairly soon.
The full suite of slow tests can be run from the root directory of the source code as
.. code-block:: bash
pytest -v -m 'slow' deepchem
To test your code locally, you will have to setup a symbolic link to your
current development directory. To do this, simply run
.. code-block:: bash
python setup.py develop
while installing the package from source. This will let you see changes that you
make to the source code when you import the package and, in particular, it
allows you to import the new classes/methods for unit tests.
Ensure that the tests pass locally! Check this by running
.. code-block:: bash
python -m pytest <modified file>
Testing Machine Learning Models
-------------------------------
Testing the correctness of a machine learning model can be quite
tricky to do in practice. When adding a new machine learning model to
DeepChem, you should add at least a few basic types of unit tests:
- Overfitting test: Create a small synthetic dataset and test that
your model can learn this datasest with high accuracy. For regression
and classification task, this should correspond to low training error
on the dataset. For generative tasks, this should correspond to low
training loss on the dataset.
- Reloading test: Check that a trained model can be saved to disk and
reloaded correctly. This should involve checking that predictions from
the saved and reloaded models matching exactly.
Note that unit tests are not sufficient to gauge the real performance
of a model. You should benchmark your model on larger datasets as well
and report your benchmarking tests in the PR comments.
For testing tensorflow models and pytorch models, we recommend testing in
different conda environments. Tensorflow 2.6 supports numpy 1.19 while
pytorch supports numpy 1.21. This version mismatch on numpy dependency
sometimes causes trouble in installing tensorflow and pytorch backends in
the same environment.
For testing tensorflow models of deepchem, we create a tensorflow test environment
and then run the test as follows:
.. code-block:: bash
conda create -n tf-test python=3.8
conda activate tf-test
pip install conda-merge
conda-merge requirements/tensorflow/env_tensorflow.yml requirements/env_test.yml > env.yml
conda env update --file env.yml --prune
pytest -v -m 'tensorflow' deepchem
For testing pytorch models of deepchem, first create a pytorch test environment
and then run the tests as follows:
.. code-block:: bash
conda create -n pytorch-test python=3.8
conda activate pytorch-test
pip install conda-merge
conda-merge requirements/torch/env_torch.yml requirements/torch/env_torch.cpu.yml requirements/env_test.yml > env.yml
conda env update --file env.yml --prune
pytest -v -m 'torch' deepchem
Type Annotations
----------------
Type annotations are an important tool for avoiding bugs. All new code should
provide type annotations for function arguments and return types. When you make
significant changes to existing code that does not have type annotations, please
consider adding them at the same time.
.. _`mypy`: http://mypy-lang.org/
We use the `mypy`_ static type checker to verify code correctness. It is
automatically run on every pull request. If you want to run it locally to make
sure you are using types correctly before checking in your code, :code:`cd` to
the top level directory of the repository and execute the command
.. code-block:: bash
mypy -p deepchem --ignore-missing-imports
Because Python is such a dynamic language, it sometimes is not obvious what type
to specify. A good rule of thumb is to be permissive about input types and
strict about output types. For example, many functions are documented as taking
a list as an argument, but actually work just as well with a tuple. In those
cases, it is best to specify the input type as :code:`Sequence` to accept either
one. But if a function returns a list, specify the type as :code:`List` because
we can guarantee the return value will always have that exact type.
Another important case is NumPy arrays. Many functions are documented as taking
an array, but actually can accept any array-like object: a list of numbers, a
list of lists of numbers, a list of arrays, etc. In that case, specify the type
as :code:`Sequence` to accept any of these. On the other hand, if the function
truly requires an array and will fail with any other input, specify it as
:code:`np.ndarray`.
The :code:`deepchem.utils.typing` module contains definitions of some types that
appear frequently in the DeepChem API. You may find them useful when annotating
code.
<file_sep>echo "Pulling GDB7 dataset from deepchem"
wget http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/gdb7.tar.gz
echo "Extracting gdb7 structures"
tar -zxvf gdb7.tar.gz
<file_sep>"""
Test for MEGNetModel
"""
import pytest
import tempfile
import numpy as np
import deepchem as dc
from deepchem.utils.fake_data_generator import FakeGraphGenerator as FGG
try:
from deepchem.models.torch_models import MEGNetModel
# When pytest runs without pytorch in the environment (ex: as in tensorflow workflow),
# the above import raises a ModuleNotFoundError. It is safe to ignore it
# since the below tests only run in an environment with pytorch installed.
except ModuleNotFoundError:
pass
@pytest.mark.torch
def test_megnet_overfit():
fgg = FGG(avg_n_nodes=10,
n_node_features=5,
avg_degree=4,
n_edge_features=3,
global_features=4,
num_classes=5,
task='graph')
graphs = fgg.sample(n_graphs=100)
model = MEGNetModel(n_node_features=5,
n_edge_features=3,
n_global_features=4,
n_blocks=3,
is_undirected=True,
residual_connection=True,
mode='classification',
n_classes=5,
batch_size=16)
metric = dc.metrics.Metric(dc.metrics.accuracy_score, mode="classification")
model.fit(graphs, nb_epoch=100)
scores = model.evaluate(graphs, [metric], n_classes=5)
assert scores['accuracy_score'] == 1.0
@pytest.mark.torch
def test_megnet_classification():
fgg = FGG(avg_n_nodes=10,
n_node_features=5,
avg_degree=4,
n_edge_features=3,
global_features=4,
num_classes=10)
graphs = fgg.sample(n_graphs=200)
model = MEGNetModel(n_node_features=5,
n_edge_features=3,
n_global_features=4,
n_blocks=3,
is_undirected=True,
residual_connection=True,
mode='classification',
n_classes=10,
batch_size=16)
metric = dc.metrics.Metric(dc.metrics.accuracy_score, mode="classification")
model.fit(graphs, nb_epoch=50)
scores = model.evaluate(graphs, [metric], n_classes=10)
assert scores['accuracy_score'] > 0.9
@pytest.mark.torch
def test_megnet_regression():
# TODO The test is skipped as FakeGraphGenerator has to be updated
# to generate regression labels
return
@pytest.mark.torch
def test_megnet_reload():
fgg = FGG(avg_n_nodes=10,
n_node_features=5,
avg_degree=4,
n_edge_features=3,
global_features=4,
num_classes=3)
graphs = fgg.sample(n_graphs=10)
test_graphs = fgg.sample(n_graphs=10)
model_dir = tempfile.mkdtemp()
model = MEGNetModel(n_node_features=5,
n_edge_features=3,
n_global_features=4,
n_blocks=3,
is_undirected=True,
residual_connection=True,
mode='classification',
n_classes=3,
batch_size=16,
model_dir=model_dir)
model.fit(graphs, nb_epoch=10)
reloaded_model = MEGNetModel(n_node_features=5,
n_edge_features=3,
n_global_features=4,
n_blocks=3,
is_undirected=True,
residual_connection=True,
mode='classification',
n_classes=3,
batch_size=16,
model_dir=model_dir)
reloaded_model.restore()
orig_predict = model.predict(test_graphs)
reloaded_predict = reloaded_model.predict(test_graphs)
assert np.all(orig_predict == reloaded_predict)
<file_sep>from deepchem.molnet.load_function.bace_datasets import load_bace_classification, load_bace_regression
from deepchem.molnet.load_function.bbbc_datasets import load_bbbc001, load_bbbc002
from deepchem.molnet.load_function.bbbp_datasets import load_bbbp
from deepchem.molnet.load_function.cell_counting_datasets import load_cell_counting
from deepchem.molnet.load_function.chembl_datasets import load_chembl
from deepchem.molnet.load_function.clearance_datasets import load_clearance
from deepchem.molnet.load_function.clintox_datasets import load_clintox
from deepchem.molnet.load_function.delaney_datasets import load_delaney
from deepchem.molnet.load_function.hiv_datasets import load_hiv
from deepchem.molnet.load_function.hopv_datasets import load_hopv
from deepchem.molnet.load_function.kaggle_datasets import load_kaggle
from deepchem.molnet.load_function.lipo_datasets import load_lipo
from deepchem.molnet.load_function.muv_datasets import load_muv
from deepchem.molnet.load_function.nci_datasets import load_nci
from deepchem.molnet.load_function.pcba_datasets import load_pcba
from deepchem.molnet.load_function.pdbbind_datasets import load_pdbbind
from deepchem.molnet.load_function.ppb_datasets import load_ppb
from deepchem.molnet.load_function.qm7_datasets import load_qm7
from deepchem.molnet.load_function.qm8_datasets import load_qm8
from deepchem.molnet.load_function.qm9_datasets import load_qm9
from deepchem.molnet.load_function.sampl_datasets import load_sampl
from deepchem.molnet.load_function.sider_datasets import load_sider
from deepchem.molnet.load_function.sweetlead_datasets import load_sweet
from deepchem.molnet.load_function.tox21_datasets import load_tox21
from deepchem.molnet.load_function.toxcast_datasets import load_toxcast
from deepchem.molnet.load_function.uspto_datasets import load_uspto
from deepchem.molnet.load_function.uv_datasets import load_uv
from deepchem.molnet.load_function.factors_datasets import load_factors
from deepchem.molnet.load_function.kinase_datasets import load_kinase
from deepchem.molnet.load_function.thermosol_datasets import load_thermosol
from deepchem.molnet.load_function.hppb_datasets import load_hppb
from deepchem.molnet.load_function.chembl25_datasets import load_chembl25
from deepchem.molnet.load_function.zinc15_datasets import load_zinc15
from deepchem.molnet.load_function.freesolv_dataset import load_freesolv
from deepchem.molnet.load_function.material_datasets.load_bandgap import load_bandgap
from deepchem.molnet.load_function.material_datasets.load_perovskite import load_perovskite
from deepchem.molnet.load_function.material_datasets.load_Pt_NO_surface_adsorbate_energy import load_Platinum_Adsorption
from deepchem.molnet.load_function.material_datasets.load_mp_formation_energy import load_mp_formation_energy
from deepchem.molnet.load_function.material_datasets.load_mp_metallicity import load_mp_metallicity
from deepchem.molnet.load_function.molnet_loader import featurizers, splitters, transformers, TransformerGenerator, _MolnetLoader
from deepchem.molnet.dnasim import simulate_motif_density_localization
from deepchem.molnet.dnasim import simulate_motif_counting
from deepchem.molnet.dnasim import simple_motif_embedding
from deepchem.molnet.dnasim import motif_density
from deepchem.molnet.dnasim import simulate_single_motif_detection
<file_sep>"""
Script that trains Weave models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21(
featurizer='Weave')
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
n_atom_feat = 75
n_pair_feat = 14
# Batch size of models
batch_size = 64
n_feat = 128
graph = dc.nn.AlternateSequentialWeaveGraph(
batch_size,
max_atoms=max_atoms,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat)
graph.add(dc.nn.AlternateWeaveLayer(max_atoms, 75, 14))
#graph.add(dc.nn.AlternateWeaveLayer(max_atoms, 50, 50))
graph.add(dc.nn.Dense(n_feat, 50, activation='tanh'))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(
dc.nn.AlternateWeaveGather(
batch_size, n_input=n_feat, gaussian_expand=True))
model = dc.models.MultitaskGraphClassifier(
graph,
len(tox21_tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=20, log_every_N_batches=5)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Compute various spatial fingerprints for macromolecular complexes.
"""
import itertools
import logging
import numpy as np
from deepchem.utils import rdkit_utils
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.voxel_utils import voxelize
from deepchem.utils.voxel_utils import convert_atom_to_voxel
from deepchem.utils.voxel_utils import convert_atom_pair_to_voxel
from deepchem.utils.noncovalent_utils import compute_salt_bridges
from deepchem.utils.noncovalent_utils import compute_binding_pocket_cation_pi
from deepchem.utils.noncovalent_utils import compute_pi_stack
from deepchem.utils.noncovalent_utils import compute_hydrogen_bonds
from deepchem.utils.rdkit_utils import MoleculeLoadException
from deepchem.utils.rdkit_utils import compute_contact_centroid
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import subtract_centroid
from deepchem.utils.fragment_utils import get_partial_charge
from deepchem.utils.fragment_utils import reduce_molecular_complex_to_contacts
from typing import List, Tuple, Optional
logger = logging.getLogger(__name__)
HBOND_DIST_BINS = [(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)]
HBOND_ANGLE_CUTOFFS = [5., 50., 90.]
def compute_charge_dictionary(molecule):
"""Create a dictionary with partial charges for each atom in the molecule.
This function assumes that the charges for the molecule are
already computed (it can be done with
rdkit_util.compute_charges(molecule))
"""
charge_dictionary = {}
for i, atom in enumerate(molecule.GetAtoms()):
charge_dictionary[i] = get_partial_charge(atom)
return charge_dictionary
class ChargeVoxelizer(ComplexFeaturizer):
"""Localize partial charges of atoms in macromolecular complexes.
Given a macromolecular complex made up of multiple
constitutent molecules, compute the partial (Gasteiger
charge) on each molecule. For each atom, localize this
partial charge in the voxel in which it originated to create
a local charge array. Sum contributions to get an effective
charge at each voxel.
Let `voxels_per_edge = int(box_width/voxel_width)`. Creates a
tensor output of shape `(voxels_per_edge, voxels_per_edge,
voxels_per_edge, 1)` for each macromolecular complex that computes
the effective charge at each voxel.
"""
def __init__(self,
cutoff: float = 4.5,
box_width: float = 16.0,
voxel_width: float = 1.0,
reduce_to_contacts: bool = True):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
reduce_to_contacts: bool, optional
If True, reduce the atoms in the complex to those near a contact
region.
"""
self.cutoff = cutoff
self.box_width = box_width
self.voxel_width = voxel_width
self.reduce_to_contacts = reduce_to_contacts
def _featurize(self, datapoint, **kwargs): # -> Optional[np.ndarray]:
"""
Compute featurization for a single mol/protein complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = rdkit_utils.load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
if self.reduce_to_contacts:
fragments = reduce_molecular_complex_to_contacts(
fragments, self.cutoff)
# We compute pairwise contact fingerprints
for (frag1_ind,
frag2_ind) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[frag1_ind], fragments[frag2_ind]
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
rdks = [frag1[1], frag2[1]]
pairwise_features.append(
sum([
voxelize(convert_atom_to_voxel,
hash_function=None,
coordinates=xyz,
box_width=self.box_width,
voxel_width=self.voxel_width,
feature_dict=compute_charge_dictionary(mol),
nb_channel=1,
dtype="np.float16")
for xyz, mol in zip(xyzs, rdks)
]))
# Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 1) so we should concatenate on the last axis.
return np.concatenate(pairwise_features, axis=-1)
class SaltBridgeVoxelizer(ComplexFeaturizer):
"""Localize salt bridges between atoms in macromolecular complexes.
Given a macromolecular complex made up of multiple
constitutent molecules, compute salt bridges between atoms in
the macromolecular complex. For each atom, localize this salt
bridge in the voxel in which it originated to create a local
salt bridge array. Note that if atoms in two different voxels
interact in a salt-bridge, the interaction is double counted
in both voxels.
Let `voxels_per_edge = int(box_width/voxel_width)`. Creates a
tensor output of shape `(voxels_per_edge, voxels_per_edge,
voxels_per_edge, 1)` for each macromolecular the number of salt
bridges at each voxel.
"""
def __init__(self,
cutoff: float = 5.0,
box_width: float = 16.0,
voxel_width: float = 1.0,
reduce_to_contacts: bool = True):
"""
Parameters
----------
cutoff: float, optional (default 5.0)
The distance in angstroms within which atoms must be to
be considered for a salt bridge between them.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
reduce_to_contacts: bool, optional
If True, reduce the atoms in the complex to those near a contact
region.
"""
self.cutoff = cutoff
self.box_width = box_width
self.voxel_width = voxel_width
self.reduce_to_contacts = reduce_to_contacts
def _featurize(self, datapoint, **kwargs): # -> Optional[np.ndarray]:
"""
Compute featurization for a single mol/protein complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = rdkit_utils.load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
if self.reduce_to_contacts:
fragments = reduce_molecular_complex_to_contacts(
fragments, self.cutoff)
for (frag1_ind,
frag2_ind) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[frag1_ind], fragments[frag2_ind]
distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
# rdks = [frag1[1], frag2[1]]
pairwise_features.append(
sum([
voxelize(convert_atom_pair_to_voxel,
hash_function=None,
coordinates=xyz,
box_width=self.box_width,
voxel_width=self.voxel_width,
feature_list=compute_salt_bridges(
frag1[1],
frag2[1],
distances,
cutoff=self.cutoff),
nb_channel=1) for xyz in xyzs
]))
# Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 1) so we should concatenate on the last axis.
return np.concatenate(pairwise_features, axis=-1)
class CationPiVoxelizer(ComplexFeaturizer):
"""Localize cation-Pi interactions between atoms in macromolecular complexes.
Given a macromolecular complex made up of multiple
constitutent molecules, compute cation-Pi between atoms in
the macromolecular complex. For each atom, localize this salt
bridge in the voxel in which it originated to create a local
cation-pi array.
Let `voxels_per_edge = int(box_width/voxel_width)`. Creates a
tensor output of shape `(voxels_per_edge, voxels_per_edge,
voxels_per_edge, 1)` for each macromolecular complex that counts the
number of cation-pi interactions at each voxel.
"""
def __init__(self,
cutoff: float = 6.5,
angle_cutoff: float = 30.0,
box_width: float = 16.0,
voxel_width: float = 1.0):
"""
Parameters
----------
cutoff: float, optional (default 6.5)
The distance in angstroms within which atoms must be to
be considered for a cation-pi interaction between them.
angle_cutoff: float, optional (default 30.0)
Angle cutoff. Max allowed deviation from the ideal (0deg)
angle between ring normal and vector pointing from ring
center to cation (in degrees).
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
"""
self.cutoff = cutoff
self.angle_cutoff = angle_cutoff
self.box_width = box_width
self.voxel_width = voxel_width
def _featurize(self, datapoint, **kwargs): # -> Optional[np.ndarray]:
"""
Compute featurization for a single mol/protein complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = rdkit_utils.load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
for (frag1_ind,
frag2_ind) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[frag1_ind], fragments[frag2_ind]
# distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
# rdks = [frag1[1], frag2[1]]
pairwise_features.append(
sum([
voxelize(convert_atom_to_voxel,
hash_function=None,
box_width=self.box_width,
voxel_width=self.voxel_width,
coordinates=xyz,
feature_dict=cation_pi_dict,
nb_channel=1) for xyz, cation_pi_dict in zip(
xyzs,
compute_binding_pocket_cation_pi(
frag1[1],
frag2[1],
dist_cutoff=self.cutoff,
angle_cutoff=self.angle_cutoff,
))
]))
# Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 1) so we should concatenate on the last axis.
return np.concatenate(pairwise_features, axis=-1)
class PiStackVoxelizer(ComplexFeaturizer):
"""Localize Pi stacking interactions between atoms in macromolecular complexes.
Given a macromolecular complex made up of multiple
constitutent molecules, compute pi-stacking interactions
between atoms in the macromolecular complex. For each atom,
localize this salt bridge in the voxel in which it originated
to create a local pi-stacking array.
Let `voxels_per_edge = int(box_width/voxel_width)`. Creates a
tensor output of shape `(voxels_per_edge, voxels_per_edge,
voxels_per_edge, 2)` for each macromolecular complex. Each voxel has
2 fields, with the first tracking the number of pi-pi parallel
interactions, and the second tracking the number of pi-T
interactions.
"""
def __init__(self,
cutoff: float = 4.4,
angle_cutoff: float = 30.0,
box_width: float = 16.0,
voxel_width: float = 1.0):
"""
Parameters
----------
cutoff: float, optional (default 4.4)
The distance in angstroms within which atoms must be to
be considered for a cation-pi interaction between them.
angle_cutoff: float, optional (default 30.0)
Angle cutoff. Max allowed deviation from the ideal (0 deg)
angle between ring normal and vector pointing from ring
center to other ring center (in degrees).
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
"""
self.cutoff = cutoff
self.angle_cutoff = angle_cutoff
self.box_width = box_width
self.voxel_width = voxel_width
def _featurize(self, datapoint, **kwargs): # -> Optional[np.ndarray]:
"""
Compute featurization for a single mol/protein complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = rdkit_utils.load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
for (frag1_ind,
frag2_ind) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[frag1_ind], fragments[frag2_ind]
distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
# rdks = [frag1[1], frag2[1]]
protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel = (
compute_pi_stack(frag1[1],
frag2[1],
distances,
dist_cutoff=self.cutoff,
angle_cutoff=self.angle_cutoff))
pi_parallel_tensor = sum([
voxelize(convert_atom_to_voxel,
hash_function=None,
box_width=self.box_width,
voxel_width=self.voxel_width,
coordinates=xyz,
feature_dict=feature_dict,
nb_channel=1)
for (xyz, feature_dict
) in zip(xyzs, [ligand_pi_parallel, protein_pi_parallel])
])
pi_t_tensor = sum([
voxelize(convert_atom_to_voxel,
hash_function=None,
box_width=self.box_width,
voxel_width=self.voxel_width,
coordinates=frag1_xyz,
feature_dict=protein_pi_t,
nb_channel=1)
for (xyz,
feature_dict) in zip(xyzs, [ligand_pi_t, protein_pi_t])
])
pairwise_features.append(
np.concatenate([pi_parallel_tensor, pi_t_tensor], axis=-1))
# Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 2) so we should concatenate on the last axis.
return np.concatenate(pairwise_features, axis=-1)
class HydrogenBondCounter(ComplexFeaturizer):
"""Counts hydrogen bonds between atoms in macromolecular complexes.
Given a macromolecular complex made up of multiple
constitutent molecules, count the number of hydrogen bonds
between atoms in the macromolecular complex.
Creates a scalar output of shape `(3,)` (assuming the default value
ofor `distance_bins` with 3 bins) for each macromolecular complex
that computes the total number of hydrogen bonds.
"""
def __init__(
self,
cutoff: float = 4.5,
reduce_to_contacts: bool = True,
distance_bins: Optional[List[Tuple[float, float]]] = None,
angle_cutoffs: Optional[List[float]] = None,
):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
reduce_to_contacts: bool, optional
If True, reduce the atoms in the complex to those near a contact
region.
distance_bins: list[tuple]
List of hydgrogen bond distance bins. If not specified is
set to default
`[(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)]`.
angle_cutoffs: list[float]
List of hydrogen bond angle cutoffs. Max allowed
deviation from the ideal (180 deg) angle between
hydrogen-atom1, hydrogen-atom2 vectors.If not specified
is set to default `[5, 50, 90]`
"""
self.cutoff = cutoff
if distance_bins is None:
self.distance_bins = HBOND_DIST_BINS
else:
self.distance_bins = distance_bins
if angle_cutoffs is None:
self.angle_cutoffs = HBOND_ANGLE_CUTOFFS
else:
self.angle_cutoffs = angle_cutoffs
self.reduce_to_contacts = reduce_to_contacts
def _featurize(self, datapoint, **kwargs): # -> Optional[np.ndarray]:
"""
Compute featurization for a single mol/protein complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = rdkit_utils.load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
# centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
if self.reduce_to_contacts:
fragments = reduce_molecular_complex_to_contacts(
fragments, self.cutoff)
# We compute pairwise contact fingerprints
for (frag1_ind,
frag2_ind) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[frag1_ind], fragments[frag2_ind]
distances = compute_pairwise_distances(frag1[0], frag2[0])
# frag1_xyz = subtract_centroid(frag1[0], centroid)
# frag2_xyz = subtract_centroid(frag2[0], centroid)
# xyzs = [frag1_xyz, frag2_xyz]
# rdks = [frag1[1], frag2[1]]
pairwise_features.append(
np.concatenate([
np.array([len(hbond_list)])
for hbond_list in compute_hydrogen_bonds(
frag1, frag2, distances, self.distance_bins,
self.angle_cutoffs)
],
axis=-1))
# Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 1) so we should concatenate on the last axis.
return np.concatenate(pairwise_features, axis=-1)
class HydrogenBondVoxelizer(ComplexFeaturizer):
"""Localize hydrogen bonds between atoms in macromolecular complexes.
Given a macromolecular complex made up of multiple
constitutent molecules, compute hydrogen bonds between atoms
in the macromolecular complex. For each atom, localize this
hydrogen bond in the voxel in which it originated to create a
local hydrogen bond array. Note that if atoms in two
different voxels interact in a hydrogen bond, the interaction
is double counted in both voxels.
Let `voxels_per_edge = int(box_width/voxel_width)`. Creates a
tensor output of shape `(voxels_per_edge, voxels_per_edge,
voxels_per_edge, 3)` (assuming the default for `distance_bins` which
has 3 bins) for each macromolecular complex that counts the number
of hydrogen bonds at each voxel.
"""
def __init__(
self,
cutoff: float = 4.5,
box_width: float = 16.0,
voxel_width: float = 1.0,
reduce_to_contacts: bool = True,
distance_bins: Optional[List[Tuple[float, float]]] = None,
angle_cutoffs: Optional[List[float]] = None,
):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for contact atoms in complex.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
reduce_to_contacts: bool, optional
If True, reduce the atoms in the complex to those near a contact
region.
distance_bins: list[tuple]
List of hydgrogen bond distance bins. If not specified is
set to default
`[(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)]`.
angle_cutoffs: list[float]
List of hydrogen bond angle cutoffs. Max allowed
deviation from the ideal (180 deg) angle between
hydrogen-atom1, hydrogen-atom2 vectors.If not specified
is set to default `[5, 50, 90]`
"""
self.cutoff = cutoff
if distance_bins is None:
self.distance_bins = HBOND_DIST_BINS
else:
self.distance_bins = distance_bins
if angle_cutoffs is None:
self.angle_cutoffs = HBOND_ANGLE_CUTOFFS
else:
self.angle_cutoffs = angle_cutoffs
self.box_width = box_width
self.voxel_width = voxel_width
self.reduce_to_contacts = reduce_to_contacts
def _featurize(self, datapoint, **kwargs): # -> Optional[np.ndarray]:
"""
Compute featurization for a single mol/protein complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = rdkit_utils.load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
if self.reduce_to_contacts:
fragments = reduce_molecular_complex_to_contacts(
fragments, self.cutoff)
for (frag1_ind,
frag2_ind) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[frag1_ind], fragments[frag2_ind]
distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
# rdks = [frag1[1], frag2[1]]
pairwise_features.append(
np.concatenate([
sum([
voxelize(convert_atom_pair_to_voxel,
hash_function=None,
box_width=self.box_width,
voxel_width=self.voxel_width,
coordinates=xyz,
feature_list=hbond_list,
nb_channel=1) for xyz in xyzs
]) for hbond_list in compute_hydrogen_bonds(
frag1, frag2, distances, self.distance_bins,
self.angle_cutoffs)
],
axis=-1))
# Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 1) so we should concatenate on the last axis.
return np.concatenate(pairwise_features, axis=-1)
<file_sep>#! /bin/bash
python mol2vec.py > data.txt
python -m gensim.scripts.word2vec_standalone -train data.txt -output vec.txt -size 200 -sample 1e-4 -binary 0 -iter 3
<file_sep>import numpy as np
import unittest
from deepchem.utils import voxel_utils
from deepchem.utils import hash_utils
class TestVoxelUtils(unittest.TestCase):
def test_convert_atom_to_voxel(self):
N = 5
coordinates = np.random.rand(N, 3)
atom_index = 2
box_width = 16
voxel_width = 1
indices = voxel_utils.convert_atom_to_voxel(coordinates, atom_index,
box_width, voxel_width)
assert indices.shape == (3,)
def test_convert_pair_atom_to_voxel(self):
N = 5
M = 6
coordinates1 = np.random.rand(N, 3)
coordinates2 = np.random.rand(M, 3)
atom_index_pair = (2, 3)
box_width = 16
voxel_width = 1
indices = voxel_utils.convert_atom_pair_to_voxel(
[coordinates1, coordinates2], atom_index_pair, box_width,
voxel_width)
assert indices.shape == (2, 3)
def test_voxelize_convert_atom(self):
N = 5
coordinates = np.random.rand(N, 3)
box_width = 16
voxel_width = 1
voxels_per_edge = int(box_width / voxel_width)
get_voxels = voxel_utils.convert_atom_to_voxel
hash_function = hash_utils.hash_ecfp
feature_dict = {1: "C", 2: "CC"}
nb_channel = 16
features = voxel_utils.voxelize(get_voxels,
coordinates,
box_width,
voxel_width,
hash_function,
feature_dict,
nb_channel=nb_channel)
assert features.shape == (voxels_per_edge, voxels_per_edge,
voxels_per_edge, nb_channel)
def test_voxelize_convert_atom_pair(self):
N = 5
M = 6
coordinates1 = np.random.rand(N, 3)
coordinates2 = np.random.rand(M, 3)
coordinates = [coordinates1, coordinates2]
box_width = 16
voxel_width = 1
voxels_per_edge = int(box_width / voxel_width)
get_voxels = voxel_utils.convert_atom_pair_to_voxel
hash_function = hash_utils.hash_ecfp_pair
feature_dict = {(1, 2): ("C", "O"), (2, 3): ("CC", "OH")}
nb_channel = 16
features = voxel_utils.voxelize(get_voxels,
coordinates,
box_width,
voxel_width,
hash_function,
feature_dict,
nb_channel=nb_channel)
assert features.shape == (voxels_per_edge, voxels_per_edge,
voxels_per_edge, nb_channel)
<file_sep>"""Generative Adversarial Networks."""
from deepchem.models import KerasModel, layers
from tensorflow.keras.layers import Input, Lambda, Layer, Softmax, Reshape, Multiply
import numpy as np
import tensorflow as tf
import time
class GAN(KerasModel):
"""Implements Generative Adversarial Networks.
A Generative Adversarial Network (GAN) is a type of generative model. It
consists of two parts called the "generator" and the "discriminator". The
generator takes random noise as input and transforms it into an output that
(hopefully) resembles the training data. The discriminator takes a set of
samples as input and tries to distinguish the real training samples from the
ones created by the generator. Both of them are trained together. The
discriminator tries to get better and better at telling real from false data,
while the generator tries to get better and better at fooling the discriminator.
In many cases there also are additional inputs to the generator and
discriminator. In that case it is known as a Conditional GAN (CGAN), since it
learns a distribution that is conditional on the values of those inputs. They
are referred to as "conditional inputs".
Many variations on this idea have been proposed, and new varieties of GANs are
constantly being proposed. This class tries to make it very easy to implement
straightforward GANs of the most conventional types. At the same time, it
tries to be flexible enough that it can be used to implement many (but
certainly not all) variations on the concept.
To define a GAN, you must create a subclass that provides implementations of
the following methods:
get_noise_input_shape()
get_data_input_shapes()
create_generator()
create_discriminator()
If you want your GAN to have any conditional inputs you must also implement:
get_conditional_input_shapes()
The following methods have default implementations that are suitable for most
conventional GANs. You can override them if you want to customize their
behavior:
create_generator_loss()
create_discriminator_loss()
get_noise_batch()
This class allows a GAN to have multiple generators and discriminators, a model
known as MIX+GAN. It is described in Arora et al., "Generalization and
Equilibrium in Generative Adversarial Nets (GANs)" (https://arxiv.org/abs/1703.00573).
This can lead to better models, and is especially useful for reducing mode
collapse, since different generators can learn different parts of the
distribution. To use this technique, simply specify the number of generators
and discriminators when calling the constructor. You can then tell
predict_gan_generator() which generator to use for predicting samples.
"""
def __init__(self, n_generators=1, n_discriminators=1, **kwargs):
"""Construct a GAN.
In addition to the parameters listed below, this class accepts all the
keyword arguments from KerasModel.
Parameters
----------
n_generators: int
the number of generators to include
n_discriminators: int
the number of discriminators to include
"""
self.n_generators = n_generators
self.n_discriminators = n_discriminators
# Create the inputs.
self.noise_input = Input(shape=self.get_noise_input_shape())
self.data_input_layers = []
for shape in self.get_data_input_shapes():
self.data_input_layers.append(Input(shape=shape))
self.data_inputs = [i.ref() for i in self.data_input_layers]
self.conditional_input_layers = []
for shape in self.get_conditional_input_shapes():
self.conditional_input_layers.append(Input(shape=shape))
self.conditional_inputs = [
i.ref() for i in self.conditional_input_layers
]
# Create the generators.
self.generators = []
self.gen_variables = []
generator_outputs = []
for i in range(n_generators):
generator = self.create_generator()
self.generators.append(generator)
generator_outputs.append(
generator(
_list_or_tensor([self.noise_input] +
self.conditional_input_layers)))
self.gen_variables += generator.trainable_variables
# Create the discriminators.
self.discriminators = []
self.discrim_variables = []
discrim_train_outputs = []
discrim_gen_outputs = []
for i in range(n_discriminators):
discriminator = self.create_discriminator()
self.discriminators.append(discriminator)
discrim_train_outputs.append(
self._call_discriminator(discriminator, self.data_input_layers,
True))
for gen_output in generator_outputs:
if tf.is_tensor(gen_output):
gen_output = [gen_output]
discrim_gen_outputs.append(
self._call_discriminator(discriminator, gen_output, False))
self.discrim_variables += discriminator.trainable_variables
# Compute the loss functions.
gen_losses = [
self.create_generator_loss(d) for d in discrim_gen_outputs
]
discrim_losses = []
for i in range(n_discriminators):
for j in range(n_generators):
discrim_losses.append(
self.create_discriminator_loss(
discrim_train_outputs[i],
discrim_gen_outputs[i * n_generators + j]))
if n_generators == 1 and n_discriminators == 1:
total_gen_loss = gen_losses[0]
total_discrim_loss = discrim_losses[0]
else:
# Create learnable weights for the generators and discriminators.
gen_alpha = layers.Variable(np.ones((1, n_generators)),
dtype=tf.float32)
# We pass an input to the Variable layer to work around a bug in TF 1.14.
gen_weights = Softmax()(gen_alpha([self.noise_input]))
discrim_alpha = layers.Variable(np.ones((1, n_discriminators)),
dtype=tf.float32)
discrim_weights = Softmax()(discrim_alpha([self.noise_input]))
# Compute the weighted errors
weight_products = Reshape(
(n_generators * n_discriminators,))(Multiply()([
Reshape((n_discriminators, 1))(discrim_weights),
Reshape((1, n_generators))(gen_weights)
]))
stacked_gen_loss = layers.Stack(axis=0)(gen_losses)
stacked_discrim_loss = layers.Stack(axis=0)(discrim_losses)
total_gen_loss = Lambda(lambda x: tf.reduce_sum(x[0] * x[1]))(
[stacked_gen_loss, weight_products])
total_discrim_loss = Lambda(lambda x: tf.reduce_sum(x[0] * x[1]))(
[stacked_discrim_loss, weight_products])
self.gen_variables += gen_alpha.trainable_variables
self.discrim_variables += gen_alpha.trainable_variables
self.discrim_variables += discrim_alpha.trainable_variables
# Add an entropy term to the loss.
entropy = Lambda(
lambda x: -(tf.reduce_sum(tf.math.log(x[0])) / n_generators + tf
.reduce_sum(tf.math.log(x[1])) / n_discriminators))(
[gen_weights, discrim_weights])
total_discrim_loss = Lambda(lambda x: x[0] + x[1])(
[total_discrim_loss, entropy])
# Create the Keras model.
inputs = [self.noise_input
] + self.data_input_layers + self.conditional_input_layers
outputs = [total_gen_loss, total_discrim_loss]
self.gen_loss_fn = lambda outputs, labels, weights: outputs[0]
self.discrim_loss_fn = lambda outputs, labels, weights: outputs[1]
model = tf.keras.Model(inputs=inputs, outputs=outputs)
super(GAN, self).__init__(model, self.gen_loss_fn, **kwargs)
def _call_discriminator(self, discriminator, inputs, train):
"""Invoke the discriminator on a set of inputs.
This is a separate method so WGAN can override it and also return the
gradient penalty.
"""
return discriminator(
_list_or_tensor(inputs + self.conditional_input_layers))
def get_noise_input_shape(self):
"""Get the shape of the generator's noise input layer.
Subclasses must override this to return a tuple giving the shape of the
noise input. The actual Input layer will be created automatically. The
dimension corresponding to the batch size should be omitted.
"""
raise NotImplementedError("Subclasses must implement this.")
def get_data_input_shapes(self):
"""Get the shapes of the inputs for training data.
Subclasses must override this to return a list of tuples, each giving the
shape of one of the inputs. The actual Input layers will be created
automatically. This list of shapes must also match the shapes of the
generator's outputs. The dimension corresponding to the batch size should
be omitted.
"""
raise NotImplementedError("Subclasses must implement this.")
def get_conditional_input_shapes(self):
"""Get the shapes of any conditional inputs.
Subclasses may override this to return a list of tuples, each giving the
shape of one of the conditional inputs. The actual Input layers will be
created automatically. The dimension corresponding to the batch size should
be omitted.
The default implementation returns an empty list, meaning there are no
conditional inputs.
"""
return []
def get_noise_batch(self, batch_size):
"""Get a batch of random noise to pass to the generator.
This should return a NumPy array whose shape matches the one returned by
get_noise_input_shape(). The default implementation returns normally
distributed values. Subclasses can override this to implement a different
distribution.
"""
size = list(self.get_noise_input_shape())
size = [batch_size] + size
return np.random.normal(size=size)
def create_generator(self):
"""Create and return a generator.
Subclasses must override this to construct the generator. The returned
value should be a tf.keras.Model whose inputs are a batch of noise, followed
by any conditional inputs. The number and shapes of its outputs must match
the return value from get_data_input_shapes(), since generated data must
have the same form as training data.
"""
raise NotImplementedError("Subclasses must implement this.")
def create_discriminator(self):
"""Create and return a discriminator.
Subclasses must override this to construct the discriminator. The returned
value should be a tf.keras.Model whose inputs are all data inputs, followed
by any conditional inputs. Its output should be a one dimensional tensor
containing the probability of each sample being a training sample.
"""
raise NotImplementedError("Subclasses must implement this.")
def create_generator_loss(self, discrim_output):
"""Create the loss function for the generator.
The default implementation is appropriate for most cases. Subclasses can
override this if the need to customize it.
Parameters
----------
discrim_output: Tensor
the output from the discriminator on a batch of generated data. This is
its estimate of the probability that each sample is training data.
Returns
-------
A Tensor equal to the loss function to use for optimizing the generator.
"""
return Lambda(lambda x: -tf.reduce_mean(tf.math.log(x + 1e-10)))(
discrim_output)
def create_discriminator_loss(self, discrim_output_train,
discrim_output_gen):
"""Create the loss function for the discriminator.
The default implementation is appropriate for most cases. Subclasses can
override this if the need to customize it.
Parameters
----------
discrim_output_train: Tensor
the output from the discriminator on a batch of training data. This is
its estimate of the probability that each sample is training data.
discrim_output_gen: Tensor
the output from the discriminator on a batch of generated data. This is
its estimate of the probability that each sample is training data.
Returns
-------
A Tensor equal to the loss function to use for optimizing the discriminator.
"""
return Lambda(lambda x: -tf.reduce_mean(
tf.math.log(x[0] + 1e-10) + tf.math.log(1 - x[1] + 1e-10)))(
[discrim_output_train, discrim_output_gen])
def fit_gan(self,
batches,
generator_steps=1.0,
max_checkpoints_to_keep=5,
checkpoint_interval=1000,
restore=False):
"""Train this model on data.
Parameters
----------
batches: iterable
batches of data to train the discriminator on, each represented as a dict
that maps Inputs to values. It should specify values for all members of
data_inputs and conditional_inputs.
generator_steps: float
the number of training steps to perform for the generator for each batch.
This can be used to adjust the ratio of training steps for the generator
and discriminator. For example, 2.0 will perform two training steps for
every batch, while 0.5 will only perform one training step for every two
batches.
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in batches. Set
this to 0 to disable automatic checkpointing.
restore: bool
if True, restore the model from the most recent checkpoint before training
it.
"""
self._ensure_built()
gen_train_fraction = 0.0
discrim_error = 0.0
gen_error = 0.0
discrim_average_steps = 0
gen_average_steps = 0
time1 = time.time()
if checkpoint_interval > 0:
manager = tf.train.CheckpointManager(self._checkpoint,
self.model_dir,
max_checkpoints_to_keep)
for feed_dict in batches:
# Every call to fit_generator() will increment global_step, but we only
# want it to get incremented once for the entire batch, so record the
# value and keep resetting it.
global_step = self.get_global_step()
# Train the discriminator.
inputs = [self.get_noise_batch(self.batch_size)]
for input in self.data_input_layers:
inputs.append(feed_dict[input.ref()])
for input in self.conditional_input_layers:
inputs.append(feed_dict[input.ref()])
discrim_error += self.fit_generator(
[(inputs, [], [])],
variables=self.discrim_variables,
loss=self.discrim_loss_fn,
checkpoint_interval=0,
restore=restore)
restore = False
discrim_average_steps += 1
# Train the generator.
if generator_steps > 0.0:
gen_train_fraction += generator_steps
while gen_train_fraction >= 1.0:
inputs = [self.get_noise_batch(self.batch_size)
] + inputs[1:]
gen_error += self.fit_generator(
[(inputs, [], [])],
variables=self.gen_variables,
checkpoint_interval=0)
gen_average_steps += 1
gen_train_fraction -= 1.0
self._global_step.assign(global_step + 1)
# Write checkpoints and report progress.
if discrim_average_steps == checkpoint_interval:
manager.save()
discrim_loss = discrim_error / max(1, discrim_average_steps)
gen_loss = gen_error / max(1, gen_average_steps)
print(
'Ending global_step %d: generator average loss %g, discriminator average loss %g'
% (global_step, gen_loss, discrim_loss))
discrim_error = 0.0
gen_error = 0.0
discrim_average_steps = 0
gen_average_steps = 0
# Write out final results.
if checkpoint_interval > 0:
if discrim_average_steps > 0 and gen_average_steps > 0:
discrim_loss = discrim_error / discrim_average_steps
gen_loss = gen_error / gen_average_steps
print(
'Ending global_step %d: generator average loss %g, discriminator average loss %g'
% (global_step, gen_loss, discrim_loss))
manager.save()
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1))
def predict_gan_generator(self,
batch_size=1,
noise_input=None,
conditional_inputs=[],
generator_index=0):
"""Use the GAN to generate a batch of samples.
Parameters
----------
batch_size: int
the number of samples to generate. If either noise_input or
conditional_inputs is specified, this argument is ignored since the batch
size is then determined by the size of that argument.
noise_input: array
the value to use for the generator's noise input. If None (the default),
get_noise_batch() is called to generate a random input, so each call will
produce a new set of samples.
conditional_inputs: list of arrays
the values to use for all conditional inputs. This must be specified if
the GAN has any conditional inputs.
generator_index: int
the index of the generator (between 0 and n_generators-1) to use for
generating the samples.
Returns
-------
An array (if the generator has only one output) or list of arrays (if it has
multiple outputs) containing the generated samples.
"""
if noise_input is not None:
batch_size = len(noise_input)
elif len(conditional_inputs) > 0:
batch_size = len(conditional_inputs[0])
if noise_input is None:
noise_input = self.get_noise_batch(batch_size)
inputs = [noise_input]
inputs += conditional_inputs
inputs = [i.astype(np.float32) for i in inputs]
pred = self.generators[generator_index](_list_or_tensor(inputs),
training=False)
pred = pred.numpy()
return pred
def _list_or_tensor(inputs):
if len(inputs) == 1:
return inputs[0]
return inputs
class WGAN(GAN):
"""Implements Wasserstein Generative Adversarial Networks.
This class implements Wasserstein Generative Adversarial Networks (WGANs) as
described in Arjovsky et al., "Wasserstein GAN" (https://arxiv.org/abs/1701.07875).
A WGAN is conceptually rather different from a conventional GAN, but in
practical terms very similar. It reinterprets the discriminator (often called
the "critic" in this context) as learning an approximation to the Earth Mover
distance between the training and generated distributions. The generator is
then trained to minimize that distance. In practice, this just means using
slightly different loss functions for training the generator and discriminator.
WGANs have theoretical advantages over conventional GANs, and they often work
better in practice. In addition, the discriminator's loss function can be
directly interpreted as a measure of the quality of the model. That is an
advantage over conventional GANs, where the loss does not directly convey
information about the quality of the model.
The theory WGANs are based on requires the discriminator's gradient to be
bounded. The original paper achieved this by clipping its weights. This
class instead does it by adding a penalty term to the discriminator's loss, as
described in https://arxiv.org/abs/1704.00028. This is sometimes found to
produce better results.
There are a few other practical differences between GANs and WGANs. In a
conventional GAN, the discriminator's output must be between 0 and 1 so it can
be interpreted as a probability. In a WGAN, it should produce an unbounded
output that can be interpreted as a distance.
When training a WGAN, you also should usually use a smaller value for
generator_steps. Conventional GANs rely on keeping the generator and
discriminator "in balance" with each other. If the discriminator ever gets
too good, it becomes impossible for the generator to fool it and training
stalls. WGANs do not have this problem, and in fact the better the
discriminator is, the easier it is for the generator to improve. It therefore
usually works best to perform several training steps on the discriminator for
each training step on the generator.
"""
def __init__(self, gradient_penalty=10.0, **kwargs):
"""Construct a WGAN.
In addition to the following, this class accepts all the keyword arguments
from GAN and KerasModel.
Parameters
----------
gradient_penalty: float
the magnitude of the gradient penalty loss
"""
self.gradient_penalty = gradient_penalty
super(WGAN, self).__init__(**kwargs)
def _call_discriminator(self, discriminator, inputs, train):
if train:
penalty = GradientPenaltyLayer(self, discriminator)
return penalty(inputs, self.conditional_input_layers)
return discriminator(
_list_or_tensor(inputs + self.conditional_input_layers))
def create_generator_loss(self, discrim_output):
return Lambda(lambda x: tf.reduce_mean(x))(discrim_output)
def create_discriminator_loss(self, discrim_output_train,
discrim_output_gen):
return Lambda(lambda x: tf.reduce_mean(x[0] - x[1]))([
discrim_output_train[0], discrim_output_gen
]) + discrim_output_train[1]
class GradientPenaltyLayer(Layer):
"""Implements the gradient penalty loss term for WGANs."""
def __init__(self, gan, discriminator, **kwargs):
super(GradientPenaltyLayer, self).__init__(**kwargs)
self.gan = gan
self.discriminator = discriminator
def call(self, inputs, conditional_inputs):
with tf.GradientTape() as tape:
for layer in inputs:
tape.watch(layer)
output = self.discriminator(
_list_or_tensor(inputs + conditional_inputs))
gradients = tape.gradient(output, inputs)
gradients = [g for g in gradients if g is not None]
if len(gradients) > 0:
norm2 = 0.0
for g in gradients:
g2 = tf.square(g)
dims = len(g.shape)
if dims > 1:
g2 = tf.reduce_sum(g2, axis=list(range(1, dims)))
norm2 += g2
penalty = tf.square(tf.sqrt(norm2) - 1.0)
penalty = self.gan.gradient_penalty * tf.reduce_mean(penalty)
else:
penalty = 0.0
return [output, penalty]
<file_sep>"""
Copies Classes from keras to remove dependency.
Most of this code is copied over from Keras. Hoping to use as a staging
area while we remove our Keras dependency.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import tensorflow as tf
<file_sep>echo "Pulling featurized and split ACNN datasets from deepchem"
wget http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/acnn_refined.tar.gz
echo "Extracting ACNN datasets"
tar -zxvf acnn_refined.tar.gz
<file_sep>import numpy as np
from typing import List, Any
from numpy.typing import ArrayLike
from deepchem.feat.graph_data import BatchGraphData
try:
import torch
except ModuleNotFoundError:
pass
def _get_atom_scopes(graph_index: ArrayLike) -> List[List[int]]:
"""Atom scope is a list of tuples with a single entry for every
molecule in the batched graph. The entry indicates the beginning
node index for a molecule and the number of nodes in the molecule.
Parameters
----------
graph_index: np.array
An array containing a mapping between node index and the graph
in the batched graph.
Returns
-------
scopes: List[List[int]]
Node index scope for each molecule in the batched graph.
Example
-------
>>> import numpy as np
>>> graph_index = np.array([0, 0, 1, 1, 1])
>>> _get_atom_scopes(graph_index)
[[0, 2], [2, 3]]
"""
# graph_index indicates which atom belongs to which molecule
mols = np.unique(graph_index)
scopes = []
for mol in mols:
positions = np.where(graph_index == mol, 1, 0)
scopes.append(
[int(np.argmax(positions)),
int(np.count_nonzero(positions))])
return scopes
def _get_bond_scopes(edge_index: ArrayLike,
graph_index: ArrayLike) -> List[List[int]]:
"""Bond scope is a list of tuples with a single entry for every molecule
in the batched graph. The entry indicates the beginning bond index for a
molecule and the number of bonds in the molecule.
Parameters
----------
edge_index: np.array
Graph connectivity in COO format with shape [2, num_edges]
graph_index: np.array
An array containing a mapping between node index and the graph
in the batched graph.
Returns
-------
scopes: List[List[int]]
Bond index scope for each molecule in the batched graph.
Example
-------
>>> edge_index = np.array([[0, 1, 2, 4], [1, 0, 4, 2]]) # a molecule with 4 bonds
>>> graph_index = np.array([0, 0, 1, 1, 1])
>>> _get_bond_scopes(edge_index, graph_index)
[[0, 2], [2, 2]]
"""
mols = np.unique(graph_index)
bond_index = graph_index[edge_index[0]] # type: ignore
scopes = []
for mol in mols:
positions = np.where(bond_index == mol, 1, 0)
scopes.append(
[int(np.argmax(positions)),
int(np.count_nonzero(positions))])
return scopes
def _compute_b2revb(edge_index: np.ndarray) -> List[int]:
"""Every edge in a grover graph is a directed edge. Hence, a bond
is represented by two edges of opposite directions. b2revb is a representation
which stores for every edge, the index of reverse edge of that edge.
Parameters
----------
edge_index: np.array
Graph connectivity in COO format with shape [2, num_edges]
Returns
-------
b2revb: List[int]
A mapping where an element at an index contains the index of the reverse bond.
Example
-------
>>> import numpy as np
>>> edge_index = np.array([[0, 1, 2, 4], [1, 0, 4, 2]])
>>> _compute_b2revb(edge_index)
[1, 0, 3, 2]
"""
b2revb = [0] * edge_index.shape[1]
for i, bond in enumerate(edge_index.T):
for j, (sa, da) in enumerate(edge_index.T):
if sa == bond[1] and da == bond[0]:
b2revb[i] = j
return b2revb
def _get_a2b(n_atoms: int, edge_index: np.ndarray) -> np.ndarray:
"""a2b is a mapping between atoms and their incoming bonds.
Parameters
----------
n_atoms: int
Number of atoms
edge_index: np.array
Graph connectivity in COO format with shape [2, num_edges]
Returns
-------
a2b: ArrayLike
A mapping between atoms and their incoming bonds
Example
-------
>>> import numpy as np
>>> edge_index = np.array([[0, 1], [1, 2]])
>>> n_atoms = 3
>>> _get_a2b(n_atoms, edge_index)
array([[0],
[0],
[1]])
"""
a2b: List[List[Any]] = [[] for atom in range(n_atoms)]
for i, bond in enumerate(edge_index.T):
dest_atom = bond[1]
a2b[dest_atom].append(i)
# padding
max_num_bonds = max(map(lambda x: len(x), a2b))
atom_bond_mapping = np.asarray(
[a2b[a] + [0] * (max_num_bonds - len(a2b[a])) for a in range(n_atoms)])
return atom_bond_mapping
def extract_grover_attributes(molgraph: BatchGraphData):
"""Utility to extract grover attributes for grover model
Parameters
----------
molgraph: BatchGraphData
A batched graph data representing a collection of molecules.
Returns
-------
graph_attributes: Tuple
A tuple containing atom features, bond features, atom to bond mapping, bond to atom mapping, bond to reverse bond mapping, atom to atom mapping, atom scope, bond scope, functional group labels and other additional features.
Example
-------
>>> import deepchem as dc
>>> from deepchem.feat.graph_data import BatchGraphData
>>> smiles = ['CC', 'CCC', 'CC(=O)C']
>>> featurizer = dc.feat.GroverFeaturizer(features_generator=dc.feat.CircularFingerprint())
>>> graphs = featurizer.featurize(smiles)
>>> molgraph = BatchGraphData(graphs)
>>> attributes = extract_grover_attributes(molgraph)
"""
fg_labels = getattr(molgraph, 'fg_labels')
additional_features = getattr(molgraph, 'additional_features')
f_atoms = molgraph.node_features
f_bonds = molgraph.edge_features
graph_index = molgraph.graph_index
edge_index = molgraph.edge_index
a_scope = _get_atom_scopes(graph_index)
b_scope = _get_bond_scopes(edge_index, graph_index)
b2revb = _compute_b2revb(edge_index)
# computing a2b
a2b = _get_a2b(molgraph.num_nodes, edge_index)
f_atoms_tensor = torch.FloatTensor(f_atoms)
f_bonds_tensor = torch.FloatTensor(f_bonds)
fg_labels_tensor = torch.FloatTensor(fg_labels)
additional_features_tensor = torch.FloatTensor(additional_features)
a2b_tensor = torch.LongTensor(a2b)
b2a_tensor = torch.LongTensor(molgraph.edge_index[0])
b2revb_tensor = torch.LongTensor(b2revb)
# only needed if using atom messages
a2a = b2a_tensor[a2b_tensor] # type: ignore
a_scope_tensor = torch.LongTensor(np.asarray(a_scope))
b_scope_tensor = torch.LongTensor(np.asarray(b_scope))
return f_atoms_tensor, f_bonds_tensor, a2b_tensor, b2a_tensor, b2revb_tensor, a2a, a_scope_tensor, b_scope_tensor, fg_labels_tensor, additional_features_tensor
<file_sep>from os import path
from typing import Optional
import numpy as np
from rdkit.Chem import AllChem
from deepchem.utils import download_url, get_data_dir, untargz_file
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
DEFAULT_PRETRAINED_MODEL_URL = 'https://deepchemdata.s3-us-west-1.amazonaws.com/trained_models/mol2vec_model_300dim.tar.gz'
def _mol2alt_sentence(mol, radius):
"""Same as mol2sentence() except it only returns the alternating sentence
Calc6ulates ECFP (Morgan fingerprint) and returns identifiers of substructures as 'sentence' (string).
Returns a tuple with 1) a list with sentence for each radius and 2) a sentence with identifiers from all radii
combined.
NOTE: Words are ALWAYS reordered according to atom order in the input mol object.
NOTE: Due to the way how Morgan FPs are generated, number of identifiers at each radius is smaller
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
radius : float
Fingerprint radius
Returns
-------
list
alternating sentence
combined
"""
# Copied from https://github.com/samoturk/mol2vec/blob/850d944d5f48a58e26ed0264332b5741f72555aa/mol2vec/features.py#L129-L168
radii = list(range(int(radius) + 1))
info = {}
_ = AllChem.GetMorganFingerprint(
mol, radius,
bitInfo=info) # info: dictionary identifier, atom_idx, radius
mol_atoms = [a.GetIdx() for a in mol.GetAtoms()]
dict_atoms = {x: {r: None for r in radii} for x in mol_atoms}
for element in info:
for atom_idx, radius_at in info[element]:
dict_atoms[atom_idx][
radius_at] = element # {atom number: {fp radius: identifier}}
# merge identifiers alternating radius to sentence: atom 0 radius0, atom 0 radius 1, etc.
identifiers_alt = []
for atom in dict_atoms: # iterate over atoms
for r in radii: # iterate over radii
identifiers_alt.append(dict_atoms[atom][r])
alternating_sentence = map(str, [x for x in identifiers_alt if x])
return list(alternating_sentence)
class Mol2VecFingerprint(MolecularFeaturizer):
"""Mol2Vec fingerprints.
This class convert molecules to vector representations by using Mol2Vec.
Mol2Vec is an unsupervised machine learning approach to learn vector representations
of molecular substructures and the algorithm is based on Word2Vec, which is
one of the most popular technique to learn word embeddings using neural network in NLP.
Please see the details from [1]_.
The Mol2Vec requires the pretrained model, so we use the model which is put on the mol2vec
github repository [2]_. The default model was trained on 20 million compounds downloaded
from ZINC using the following paramters.
- radius 1
- UNK to replace all identifiers that appear less than 4 times
- skip-gram and window size of 10
- embeddings size 300
References
----------
.. [1] Jaeger, Sabrina, <NAME>, and <NAME>. "Mol2vec: unsupervised machine learning
approach with chemical intuition." Journal of chemical information and modeling 58.1 (2018): 27-35.
.. [2] https://github.com/samoturk/mol2vec/
Note
----
This class requires mol2vec to be installed.
Examples
--------
>>> import deepchem as dc
>>> from rdkit import Chem
>>> smiles = ['CCC']
>>> featurizer = dc.feat.Mol2VecFingerprint()
>>> features = featurizer.featurize(smiles)
>>> type(features)
<class 'numpy.ndarray'>
>>> features[0].shape
(300,)
"""
def __init__(self,
pretrain_model_path: Optional[str] = None,
radius: int = 1,
unseen: str = 'UNK'):
"""
Parameters
----------
pretrain_file: str, optional
The path for pretrained model. If this value is None, we use the model which is put on
github repository (https://github.com/samoturk/mol2vec/tree/master/examples/models).
The model is trained on 20 million compounds downloaded from ZINC.
radius: int, optional (default 1)
The fingerprint radius. The default value was used to train the model which is put on
github repository.
unseen: str, optional (default 'UNK')
The string to used to replace uncommon words/identifiers while training.
"""
try:
from gensim.models import word2vec
except ModuleNotFoundError:
raise ImportError("This class requires mol2vec to be installed.")
self.radius = radius
self.unseen = unseen
self.mol2alt_sentence = _mol2alt_sentence
if pretrain_model_path is None:
data_dir = get_data_dir()
pretrain_model_path = path.join(data_dir,
'mol2vec_model_300dim.pkl')
if not path.exists(pretrain_model_path):
targz_file = path.join(data_dir, 'mol2vec_model_300dim.tar.gz')
if not path.exists(targz_file):
download_url(DEFAULT_PRETRAINED_MODEL_URL, data_dir)
untargz_file(path.join(data_dir, 'mol2vec_model_300dim.tar.gz'),
data_dir)
# load pretrained models
self.model = word2vec.Word2Vec.load(pretrain_model_path)
def sentences2vec(self, sentences: list, model, unseen=None) -> np.ndarray:
"""Generate vectors for each sentence (list) in a list of sentences. Vector is simply a
sum of vectors for individual words.
Parameters
----------
sentences : list, array
List with sentences
model : word2vec.Word2Vec
Gensim word2vec model
unseen : None, str
Keyword for unseen words. If None, those words are skipped.
https://stats.stackexchange.com/questions/163005/how-to-set-the-dictionary-for-text-analysis-using-neural-networks/163032#163032
Returns
-------
np.array
"""
keys = set(model.wv.key_to_index.keys())
vec = []
if unseen:
unseen_vec = model.wv.get_vector(unseen)
for sentence in sentences:
if unseen:
vec.append(
sum([
model.wv.get_vector(y) if y in set(sentence) &
keys else unseen_vec for y in sentence
]))
else:
vec.append(
sum([
model.wv.get_vector(y)
for y in sentence
if y in set(sentence) & keys
]))
return np.array(vec)
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Mordred descriptors.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of mol2vec fingerprint. The default length is 300.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
sentence = self.mol2alt_sentence(datapoint, self.radius)
feature = self.sentences2vec([sentence], self.model,
unseen=self.unseen)[0]
return feature
<file_sep>import unittest
from deepchem.utils.molecule_feature_utils import one_hot_encode
from deepchem.utils.molecule_feature_utils import get_atom_type_one_hot
from deepchem.utils.molecule_feature_utils import construct_hydrogen_bonding_info
from deepchem.utils.molecule_feature_utils import get_atom_hydrogen_bonding_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_hybridization_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_total_num_Hs_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_is_in_aromatic_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_chirality_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_formal_charge
from deepchem.utils.molecule_feature_utils import get_atom_formal_charge_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_partial_charge
from deepchem.utils.molecule_feature_utils import get_atom_total_degree_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_implicit_valence_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_explicit_valence_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_type_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_is_in_same_ring_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_is_conjugated_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_stereo_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_graph_distance_one_hot
class TestGraphConvUtils(unittest.TestCase):
def setUp(self):
from rdkit import Chem
self.mol = Chem.MolFromSmiles("CN=C=O") # methyl isocyanate
self.mol_copper_sulfate = Chem.MolFromSmiles("[Cu+2].[O-]S(=O)(=O)[O-]")
self.mol_benzene = Chem.MolFromSmiles("c1ccccc1")
self.mol_s_alanine = Chem.MolFromSmiles("N[C@@H](C)C(=O)O")
def test_one_hot_encode(self):
# string set
assert one_hot_encode("a", ["a", "b", "c"]) == [1.0, 0.0, 0.0]
# integer set
assert one_hot_encode(2, [0.0, 1, 2]) == [0.0, 0.0, 1.0]
# include_unknown_set is False
assert one_hot_encode(3, [0.0, 1, 2]) == [0.0, 0.0, 0.0]
# include_unknown_set is True
assert one_hot_encode(3, [0.0, 1, 2], True) == [0.0, 0.0, 0.0, 1.0]
def test_get_atom_type_one_hot(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_type_one_hot(atoms[0])
assert one_hot == [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# check unknown atoms
atoms = self.mol_copper_sulfate.GetAtoms()
assert atoms[0].GetSymbol() == "Cu"
one_hot = get_atom_type_one_hot(atoms[0])
assert one_hot == [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
one_hot = get_atom_type_one_hot(atoms[0], include_unknown_set=False)
assert one_hot == [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# check original set
atoms = self.mol.GetAtoms()
assert atoms[1].GetSymbol() == "N"
original_set = ["C", "O", "N"]
one_hot = get_atom_type_one_hot(atoms[1], allowable_set=original_set)
assert one_hot == [0.0, 0.0, 1.0, 0.0]
def test_construct_hydrogen_bonding_info(self):
info = construct_hydrogen_bonding_info(self.mol)
assert isinstance(info, list)
assert isinstance(info[0], tuple)
# Generally, =O behaves as an electron acceptor
assert info[0] == (3, "Acceptor")
def test_get_atom_hydrogen_bonding_one_hot(self):
info = construct_hydrogen_bonding_info(self.mol)
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_hydrogen_bonding_one_hot(atoms[0], info)
assert one_hot == [0.0, 0.0]
assert atoms[3].GetSymbol() == "O"
one_hot = get_atom_hydrogen_bonding_one_hot(atoms[3], info)
assert one_hot == [0.0, 1.0]
def test_get_atom_is_in_aromatic_one_hot(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_is_in_aromatic_one_hot(atoms[0])
assert one_hot == [0.0]
atoms = self.mol_benzene.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_is_in_aromatic_one_hot(atoms[0])
assert one_hot == [1.0]
def test_get_atom_hybridization_one_hot(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_hybridization_one_hot(atoms[0])
assert one_hot == [0.0, 0.0, 1.0]
def test_get_atom_total_num_Hs_one_hot(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_total_num_Hs_one_hot(atoms[0])
assert one_hot == [0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
assert atoms[3].GetSymbol() == "O"
one_hot = get_atom_total_num_Hs_one_hot(atoms[3])
assert one_hot == [1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
def test_get_atom_chirality_one_hot(self):
atoms = self.mol_s_alanine.GetAtoms()
assert atoms[0].GetSymbol() == "N"
one_hot = get_atom_chirality_one_hot(atoms[0])
assert one_hot == [0.0, 0.0]
assert atoms[1].GetSymbol() == "C"
one_hot = get_atom_chirality_one_hot(atoms[1])
assert one_hot == [0.0, 1.0]
def test_get_atom_formal_charge(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
formal_charge = get_atom_formal_charge(atoms[0])
assert formal_charge == [0.0]
def test_get_atom_formal_charge_one_hot(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
formal_charge = get_atom_formal_charge_one_hot(atoms[0])
assert formal_charge == [0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
def test_get_atom_partial_charge(self):
from rdkit.Chem import AllChem
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
with self.assertRaises(KeyError):
get_atom_partial_charge(atoms[0])
# we must compute partial charges before using `get_atom_partial_charge`
AllChem.ComputeGasteigerCharges(self.mol)
partial_charge = get_atom_partial_charge(atoms[0])
assert len(partial_charge) == 1.0
assert isinstance(partial_charge[0], float)
def test_get_atom_total_degree_one_hot(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_total_degree_one_hot(atoms[0])
assert one_hot == [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
assert atoms[3].GetSymbol() == "O"
one_hot = get_atom_total_degree_one_hot(atoms[3])
assert one_hot == [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
def test_get_atom_implicit_valence_one_hot(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_implicit_valence_one_hot(atoms[0])
assert one_hot == [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]
assert atoms[3].GetSymbol() == "O"
one_hot = get_atom_implicit_valence_one_hot(atoms[3])
assert one_hot == [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
def test_get_atom_explicit_valence_one_hot(self):
atoms = self.mol.GetAtoms()
assert atoms[0].GetSymbol() == "C"
one_hot = get_atom_explicit_valence_one_hot(atoms[0])
assert one_hot == [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
assert atoms[3].GetSymbol() == "O"
one_hot = get_atom_explicit_valence_one_hot(atoms[3])
assert one_hot == [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
def test_get_bond_type_one_hot(self):
bonds = self.mol.GetBonds()
one_hot = get_bond_type_one_hot(bonds[0])
# The C-N bond is a single bond
assert bonds[0].GetBeginAtomIdx() == 0.0
assert bonds[0].GetEndAtomIdx() == 1.0
assert one_hot == [1.0, 0.0, 0.0, 0.0]
def test_get_bond_is_in_same_ring_one_hot(self):
bonds = self.mol.GetBonds()
one_hot = get_bond_is_in_same_ring_one_hot(bonds[0])
assert one_hot == [0.0]
bonds = self.mol_benzene.GetBonds()
one_hot = get_bond_is_in_same_ring_one_hot(bonds[0])
assert one_hot == [1.0]
def test_get_bond_is_conjugated_one_hot(self):
bonds = self.mol.GetBonds()
one_hot = get_bond_is_conjugated_one_hot(bonds[0])
assert one_hot == [0.0]
bonds = self.mol_benzene.GetBonds()
one_hot = get_bond_is_conjugated_one_hot(bonds[0])
assert one_hot == [1.0]
def test_get_bond_stereo_one_hot(self):
bonds = self.mol.GetBonds()
one_hot = get_bond_stereo_one_hot(bonds[0])
assert one_hot == [1.0, 0.0, 0.0, 0.0, 0.0]
def test_get_bond_graph_distance_one_hot(self):
from rdkit import Chem
bonds = self.mol.GetBonds()
dist_matrix = Chem.GetDistanceMatrix(self.mol)
one_hot = get_bond_graph_distance_one_hot(bonds[0], dist_matrix)
assert one_hot == [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
<file_sep>try:
from deepchem.data.data_loader import DFTYamlLoader
from deepchem.models.dft.scf import XCNNSCF
import torch
from deepchem.models.dft.nnxc import HybridXC
has_dqc = True
except ModuleNotFoundError:
has_dqc = False
import pytest
import numpy as np
@pytest.mark.dqc
def test_multiatom():
inputs = 'deepchem/models/tests/assets/test_beh2.yaml'
k = DFTYamlLoader()
data = k.create_dataset(inputs)
nnmodel = (torch.nn.Sequential(torch.nn.Linear(2, 10), torch.nn.Softplus(),
torch.nn.Linear(10, 1, bias=False))).to(
torch.double)
hybridxc = HybridXC("lda_x", nnmodel, aweight0=0.0)
entry = data.X[0]
evl = XCNNSCF(hybridxc, entry)
qcs = []
for system in entry.get_systems():
qcs.append(evl.run(system))
val = entry.get_val(qcs)
expected_val = np.array([0.19325158])
assert np.allclose(val, expected_val)
<file_sep>"""
This file contains deprecated utilities to work with autodock vina.
"""
from deepchem.utils.docking_utils import write_vina_conf, load_docked_ligands, prepare_inputs
import warnings
import functools
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
"Call to deprecated function {}. Please use the corresponding function in deepchem.utils.docking_utils."
.format(func.__name__),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
# These functions have moved to deepchem.utils_docking_utils
write_vina_conf = deprecated(write_vina_conf)
load_docked_ligands = deprecated(load_docked_ligands)
prepare_inputs = deprecated(prepare_inputs)
<file_sep>"""
Script that trains Tensorflow singletask models on QM7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
from deepchem.models.optimizers import ExponentialDecay
np.random.seed(123)
qm7_tasks, datasets, transformers = dc.molnet.load_qm7(splitter='stratified')
train_dataset, valid_dataset, test_dataset = datasets
fit_transformers = [dc.trans.CoulombFitTransformer(train_dataset)]
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
rate = 0.001
model = dc.models.MultitaskFitTransformRegressor(
n_tasks=1,
n_features=[23, 23],
learning_rate=rate,
momentum=.8,
batch_size=25,
weight_init_stddevs=[1 / np.sqrt(400), 1 / np.sqrt(100), 1 / np.sqrt(100)],
bias_init_consts=[0., 0., 0.],
layer_sizes=[400, 100, 100],
dropouts=[0.01, 0.01, 0.01],
fit_transformers=fit_transformers,
seed=123)
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
<file_sep>"""This example implements RF experiments from https://pubs.acs.org/doi/abs/10.1021/acs.jcim.6b00290"""
import sys
import os
import deepchem
import deepchem as dc
import tempfile, shutil
from bace_datasets import load_bace
from deepchem.hyper import HyperparamOpt
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from deepchem.models.sklearn_models import SklearnModel
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.utils.evaluate import Evaluator
def bace_rf_model(mode="classification", split="20-80"):
"""Train random forests on BACE dataset."""
(bace_tasks, (train, valid, test, crystal), transformers) = load_bace(
mode=mode, transform=False, split=split)
if mode == "regression":
r2_metric = Metric(metrics.r2_score)
rms_metric = Metric(metrics.rms_score)
mae_metric = Metric(metrics.mae_score)
all_metrics = [r2_metric, rms_metric, mae_metric]
metric = r2_metric
model_class = RandomForestRegressor
def rf_model_builder(model_params, model_dir):
sklearn_model = RandomForestRegressor(**model_params)
return SklearnModel(sklearn_model, model_dir)
elif mode == "classification":
roc_auc_metric = Metric(metrics.roc_auc_score)
accuracy_metric = Metric(metrics.accuracy_score)
mcc_metric = Metric(metrics.matthews_corrcoef)
# Note sensitivity = recall
recall_metric = Metric(metrics.recall_score)
model_class = RandomForestClassifier
all_metrics = [accuracy_metric, mcc_metric, recall_metric, roc_auc_metric]
metric = roc_auc_metric
def rf_model_builder(model_params, model_dir):
sklearn_model = RandomForestClassifier(**model_params)
return SklearnModel(sklearn_model, model_dir)
else:
raise ValueError("Invalid mode %s" % mode)
params_dict = {
"n_estimators": [10, 100],
"max_features": ["auto", "sqrt", "log2", None],
}
optimizer = HyperparamOpt(rf_model_builder)
best_rf, best_rf_hyperparams, all_rf_results = optimizer.hyperparam_search(
params_dict, train, valid, transformers, metric=metric)
if len(train) > 0:
rf_train_evaluator = Evaluator(best_rf, train, transformers)
csv_out = "rf_%s_%s_train.csv" % (mode, split)
stats_out = "rf_%s_%s_train_stats.txt" % (mode, split)
rf_train_score = rf_train_evaluator.compute_model_performance(
all_metrics, csv_out=csv_out, stats_out=stats_out)
print("RF Train set scores: %s" % (str(rf_train_score)))
if len(valid) > 0:
rf_valid_evaluator = Evaluator(best_rf, valid, transformers)
csv_out = "rf_%s_%s_valid.csv" % (mode, split)
stats_out = "rf_%s_%s_valid_stats.txt" % (mode, split)
rf_valid_score = rf_valid_evaluator.compute_model_performance(
all_metrics, csv_out=csv_out, stats_out=stats_out)
print("RF Valid set scores: %s" % (str(rf_valid_score)))
if len(test) > 0:
rf_test_evaluator = Evaluator(best_rf, test, transformers)
csv_out = "rf_%s_%s_test.csv" % (mode, split)
stats_out = "rf_%s_%s_test_stats.txt" % (mode, split)
rf_test_score = rf_test_evaluator.compute_model_performance(
all_metrics, csv_out=csv_out, stats_out=stats_out)
print("RF Test set: %s" % (str(rf_test_score)))
if len(crystal) > 0:
rf_crystal_evaluator = Evaluator(best_rf, crystal, transformers)
csv_out = "rf_%s_%s_crystal.csv" % (mode, split)
stats_out = "rf_%s_%s_crystal_stats.txt" % (mode, split)
rf_crystal_score = rf_crystal_evaluator.compute_model_performance(
all_metrics, csv_out=csv_out, stats_out=stats_out)
print("RF Crystal set: %s" % (str(rf_crystal_score)))
if __name__ == "__main__":
print("Classifier RF 20-80:")
print("--------------------------------")
bace_rf_model(mode="classification", split="20-80")
print("Classifier RF 80-20:")
print("--------------------------------")
bace_rf_model(mode="classification", split="80-20")
print("Regressor RF 20-80:")
print("--------------------------------")
bace_rf_model(mode="regression", split="20-80")
print("Regressor RF 80-20:")
print("--------------------------------")
bace_rf_model(mode="regression", split="80-20")
<file_sep>import logging
import time
from collections.abc import Sequence as SequenceCollection
from typing import (TYPE_CHECKING, Any, Callable, Iterable, List, Optional,
Tuple, Union)
import numpy as np
import torch
from deepchem.models.optimizers import LearningRateSchedule
from deepchem.models.torch_models import TorchModel
from deepchem.trans import Transformer, undo_transforms
from deepchem.utils.typing import LossFn, OneOrMany
from transformers.data.data_collator import DataCollatorForLanguageModeling
from transformers.models.auto import AutoModel, AutoModelForSequenceClassification, AutoModelForMaskedLM
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
import transformers
from transformers.modeling_utils import PreTrainedModel
class HuggingFaceModel(TorchModel):
r"""Wrapper class that wraps HuggingFace models as DeepChem models
The class provides a wrapper for wrapping models from HuggingFace
ecosystem in DeepChem and training it via DeepChem's api. The reason
for this might be that you might want to do an apples-to-apples comparison
between HuggingFace from the transformers library and DeepChem library.
The `HuggingFaceModel` has a Has-A relationship by wrapping models from
`transformers` library. Once a model is wrapped, DeepChem's API are used
for training, prediction, evaluation and other downstream tasks.
A `HuggingFaceModel` wrapper also has a `tokenizer` which tokenizes raw
SMILES strings into tokens to be used by downstream models. The SMILES
strings are generally stored in the `X` attribute of deepchem.data.Dataset object'.
This differs from the DeepChem standard workflow as tokenization is done
on the fly here. The approach allows us to leverage `transformers` library's fast
tokenization algorithms and other utilities like data collation, random masking of tokens
for masked language model training etc.
Parameters
----------
model: transformers.modeling_utils.PreTrainedModel
The HuggingFace model to wrap.
task: str, (optional, default None)
The task defines the type of learning task in the model. The supported tasks are
- `mlm` - masked language modeling commonly used in pretraining
- `mtr` - multitask regression - a task used for both pretraining base models and finetuning
- `regression` - use it for regression tasks, like property prediction
- `classification` - use it for classification tasks
When the task is not specified or None, the wrapper returns raw output of the HuggingFaceModel.
In cases where the HuggingFaceModel is a model without a task specific head, this output will be
the last hidden states.
tokenizer: transformers.tokenization_utils.PreTrainedTokenizer
Tokenizer
Example
-------
>>> import os
>>> import tempfile
>>> tempdir = tempfile.mkdtemp()
>>> # preparing dataset
>>> smiles = ['CN(c1ccccc1)c1ccccc1C(=O)NCC1(O)CCOCC1', 'CC[NH+](CC)C1CCC([NH2+]C2CC2)(C(=O)[O-])C1', \
... 'COCC(CNC(=O)c1ccc2c(c1)NC(=O)C2)OC', 'OCCn1cc(CNc2cccc3c2CCCC3)nn1', \
... 'CCCCCCc1ccc(C#Cc2ccc(C#CC3=CC=C(CCC)CC3)c(C3CCCCC3)c2)c(F)c1', 'nO=C(NCc1ccc(F)cc1)N1CC=C(c2c[nH]c3ccccc23)CC1']
>>> filepath = os.path.join(tempdir, 'smiles.txt')
>>> f = open(filepath, 'w')
>>> f.write('\n'.join(smiles))
253
>>> f.close()
>>> # preparing tokenizer
>>> from tokenizers import ByteLevelBPETokenizer
>>> from transformers.models.roberta import RobertaTokenizerFast
>>> tokenizer = ByteLevelBPETokenizer()
>>> tokenizer.train(files=filepath, vocab_size=1_000, min_frequency=2, special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>"])
>>> tokenizer_path = os.path.join(tempdir, 'tokenizer')
>>> os.makedirs(tokenizer_path)
>>> result = tokenizer.save_model(tokenizer_path)
>>> tokenizer = RobertaTokenizerFast.from_pretrained(tokenizer_path)
>>> # preparing dataset
>>> import pandas as pd
>>> import deepchem as dc
>>> smiles = ["CCN(CCSC)C(=O)N[C@@](C)(CC)C(F)(F)F","CC1(C)CN(C(=O)Nc2cc3ccccc3nn2)C[C@@]2(CCOC2)O1"]
>>> labels = [3.112,2.432]
>>> df = pd.DataFrame(list(zip(smiles, labels)), columns=["smiles", "task1"])
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_csv(tmpfile.name)
... loader = dc.data.CSVLoader(["task1"], feature_field="smiles", featurizer=dc.feat.DummyFeaturizer())
... dataset = loader.create_dataset(tmpfile.name)
>>> # pretraining
>>> from deepchem.models.torch_models.hf_models import HuggingFaceModel
>>> from transformers.models.roberta import RobertaForMaskedLM, RobertaModel, RobertaConfig
>>> config = RobertaConfig(vocab_size=tokenizer.vocab_size)
>>> model = RobertaForMaskedLM(config)
>>> hf_model = HuggingFaceModel(model=model, tokenizer=tokenizer, task='mlm', model_dir='model-dir')
>>> training_loss = hf_model.fit(dataset, nb_epoch=1)
>>> # finetuning a regression model
>>> from transformers.models.roberta import RobertaForSequenceClassification
>>> config = RobertaConfig(vocab_size=tokenizer.vocab_size, problem_type='regression', num_labels=1)
>>> model = RobertaForSequenceClassification(config)
>>> hf_model = HuggingFaceModel(model=model, tokenizer=tokenizer, task='regression', model_dir='model-dir')
>>> hf_model.load_from_pretrained()
>>> training_loss = hf_model.fit(dataset, nb_epoch=1)
>>> prediction = hf_model.predict(dataset) # prediction
>>> eval_results = hf_model.evaluate(dataset, metrics=dc.metrics.Metric(dc.metrics.mae_score))
>>> # finetune a classification model
>>> # making dataset suitable for classification
>>> import numpy as np
>>> y = np.random.choice([0, 1], size=dataset.y.shape)
>>> dataset = dc.data.NumpyDataset(X=dataset.X, y=y, w=dataset.w, ids=dataset.ids)
>>> from transformers import RobertaForSequenceClassification
>>> config = RobertaConfig(vocab_size=tokenizer.vocab_size)
>>> model = RobertaForSequenceClassification(config)
>>> hf_model = HuggingFaceModel(model=model, task='classification', tokenizer=tokenizer)
>>> training_loss = hf_model.fit(dataset, nb_epoch=1)
>>> predictions = hf_model.predict(dataset)
>>> eval_result = hf_model.evaluate(dataset, metrics=dc.metrics.Metric(dc.metrics.f1_score))
"""
def __init__(
self,
model: 'PreTrainedModel',
tokenizer: 'transformers.tokenization_utils.PreTrainedTokenizer',
task: Optional[str] = None,
**kwargs):
self.task = task
self.tokenizer = tokenizer
if self.task == 'mlm':
self.data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer)
else:
self.data_collator = None # type: ignore
# Ignoring type. For TorchModel, loss is a required argument but HuggingFace computes
# loss during the forward iteration, removing the need for a loss function.
super(HuggingFaceModel, self).__init__(
model=model,
loss=None, # type: ignore
**kwargs)
def load_from_pretrained( # type: ignore
self,
model_dir: Optional[str] = None,
from_hf_checkpoint: bool = False):
"""Load HuggingFace model from a pretrained checkpoint.
The utility can be used for loading a model from a checkpoint.
Given `model_dir`, it checks for existing checkpoint in the directory.
If a checkpoint exists, the models state is loaded from the checkpoint.
If the option `from_hf_checkpoint` is set as True, then it loads a pretrained
model using HuggingFace models `from_pretrained` method. This option
interprets model_dir as a model id of a pretrained model hosted inside a model repo
on huggingface.co or path to directory containing model weights saved using `save_pretrained`
method of a HuggingFace model.
Parameter
----------
model_dir: str
Directory containing model checkpoint
from_hf_checkpoint: bool, default False
Loads a pretrained model from HuggingFace checkpoint.
Example
-------
>>> from transformers import RobertaTokenizerFast
>>> tokenizer = RobertaTokenizerFast.from_pretrained("seyonec/PubChem10M_SMILES_BPE_60k")
>>> from deepchem.models.torch_models.hf_models import HuggingFaceModel
>>> from transformers.models.roberta import RobertaForMaskedLM, RobertaModel, RobertaConfig
>>> config = RobertaConfig(vocab_size=tokenizer.vocab_size)
>>> model = RobertaForMaskedLM(config)
>>> pretrain_model = HuggingFaceModel(model=model, tokenizer=tokenizer, task='mlm', model_dir='model-dir')
>>> pretrain_model.save_checkpoint()
>>> from transformers import RobertaForSequenceClassification
>>> config = RobertaConfig(vocab_size=tokenizer.vocab_size)
>>> model = RobertaForSequenceClassification(config)
>>> finetune_model = HuggingFaceModel(model=model, task='classification', tokenizer=tokenizer, model_dir='model-dir')
>>> finetune_model.load_from_pretrained()
"""
if model_dir is None:
model_dir = self.model_dir
if from_hf_checkpoint:
# FIXME Transformers library has an api like AutoModel.from_pretrained. It allows to
# initialise and create a model instance directly without requiring a class instance initialisation step.
# To use `load_from_pretrained` in DeepChem, we need to follow a two step process
# of initialising class instance and then loading weights via `load_from_pretrained`.
if self.task == 'mlm':
self.model = AutoModelForMaskedLM.from_pretrained(model_dir)
elif self.task in ['mtr', 'regression', 'classification']:
self.model = AutoModelForSequenceClassification.from_pretrained(
model_dir)
else:
self.model = AutoModel.from_pretrained(model_dir)
elif not from_hf_checkpoint:
checkpoints = sorted(self.get_checkpoints(model_dir))
if len(checkpoints) == 0:
raise ValueError('No checkpoint found')
else:
checkpoint = checkpoints[0]
data = torch.load(checkpoint, map_location=self.device)
self.model.load_state_dict(data['model_state_dict'],
strict=False)
def _prepare_batch(self, batch: Tuple[Any, Any, Any]):
smiles_batch, y, w = batch
tokens = self.tokenizer(smiles_batch[0].tolist(),
padding=True,
return_tensors="pt")
if self.task == 'mlm':
inputs, labels = self.data_collator.torch_mask_tokens(
tokens['input_ids'])
inputs = {
'input_ids': inputs.to(self.device),
'labels': labels.to(self.device)
}
return inputs, None, w
elif self.task in ['regression', 'classification', 'mtr']:
if y is not None:
# y is None during predict
y = torch.from_numpy(y[0])
if self.task == 'regression' or self.task == 'mtr':
y = y.float().to(self.device)
elif self.task == 'classification':
y = y.long().to(self.device)
for key, value in tokens.items():
tokens[key] = value.to(self.device)
inputs = {**tokens, 'labels': y}
return inputs, y, w
def fit_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
max_checkpoints_to_keep: int = 5,
checkpoint_interval: int = 1000,
restore: bool = False,
variables: Optional[List[torch.nn.Parameter]] = None,
loss: Optional[LossFn] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
"""Train this model on data from a generator.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in training steps.
Set this to 0 to disable automatic checkpointing.
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
variables: list of torch.nn.Parameter
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
all_losses: Optional[List[float]], optional (default None)
If specified, all logged losses are appended into this list. Note that
you can call `fit()` repeatedly with the same list and losses will
continue to be appended.
Returns
-------
The average loss over the most recent checkpoint interval
Note
----
A HuggingFace model can return embeddings (last hidden state), attentions.
Support must be added to return the embeddings to the user, so that it can
be used for other downstream applications.
"""
if not isinstance(callbacks, SequenceCollection):
callbacks = [callbacks]
self._ensure_built()
self.model.train()
avg_loss = 0.0
last_avg_loss = 0.0
averaged_batches = 0
if variables is None:
optimizer = self._pytorch_optimizer
lr_schedule = self._lr_schedule
else:
var_key = tuple(variables)
if var_key in self._optimizer_for_vars:
optimizer, lr_schedule = self._optimizer_for_vars[var_key]
else:
optimizer = self.optimizer._create_pytorch_optimizer(variables)
if isinstance(self.optimizer.learning_rate,
LearningRateSchedule):
lr_schedule = self.optimizer.learning_rate._create_pytorch_schedule(
optimizer)
else:
lr_schedule = None
self._optimizer_for_vars[var_key] = (optimizer, lr_schedule)
time1 = time.time()
# Main training loop.
for batch in generator:
if restore:
self.restore()
restore = False
inputs: OneOrMany[torch.Tensor]
inputs, labels, weights = self._prepare_batch(batch)
optimizer.zero_grad()
outputs = self.model(**inputs)
if self._loss_outputs is not None:
outputs = [outputs[i] for i in self._loss_outputs]
batch_loss = outputs.get("loss")
batch_loss.backward()
optimizer.step()
if lr_schedule is not None:
lr_schedule.step()
self._global_step += 1
current_step = self._global_step
avg_loss += batch_loss
# Report progress and write checkpoints.
averaged_batches += 1
should_log = (current_step % self.log_frequency == 0)
if should_log:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
# Capture the last avg_loss in case of return since we're resetting to 0 now
last_avg_loss = avg_loss
avg_loss = 0.0
averaged_batches = 0
if checkpoint_interval > 0 and current_step % checkpoint_interval == checkpoint_interval - 1:
self.save_checkpoint(max_checkpoints_to_keep)
for c in callbacks:
c(self, current_step)
if self.tensorboard and should_log:
self._log_scalar_to_tensorboard('loss', batch_loss,
current_step)
if (self.wandb_logger is not None) and should_log:
all_data = dict({'train/loss': batch_loss})
self.wandb_logger.log_data(all_data, step=current_step)
# Report final results.
if averaged_batches > 0:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
last_avg_loss = avg_loss
if checkpoint_interval > 0:
self.save_checkpoint(max_checkpoints_to_keep)
time2 = time.time()
logger.info("TIMING: model fitting took %0.3f s" % (time2 - time1))
return last_avg_loss
def _predict(self, generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[Transformer], uncertainty: bool,
other_output_types: Optional[OneOrMany[str]]):
"""Predicts output for data provided by generator.
This is the private implementation of prediction. Do not
call it directly. Instead call one of the public prediction methods.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
uncertainty: bool
specifies whether this is being called as part of estimating uncertainty.
If True, it sets the training flag so that dropout will be enabled, and
returns the values of the uncertainty outputs.
other_output_types: list, optional
Provides a list of other output_types (strings) to predict from model.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
Note
----
A HuggingFace model does not output uncertainity. The argument is here
since it is also present in TorchModel. Similarly, other variables like
other_output_types are also not used. Instead, a HuggingFace model outputs
loss, logits, hidden state and attentions.
"""
results: Optional[List[List[np.ndarray]]] = None
variances: Optional[List[List[np.ndarray]]] = None
if uncertainty and (other_output_types is not None):
raise ValueError(
'This model cannot compute uncertainties and other output types simultaneously. Please invoke one at a time.'
)
if uncertainty:
if self._variance_outputs is None or len(
self._variance_outputs) == 0:
raise ValueError('This model cannot compute uncertainties')
if len(self._variance_outputs) != len(self._prediction_outputs):
raise ValueError(
'The number of variances must exactly match the number of outputs'
)
if other_output_types:
if self._other_outputs is None or len(self._other_outputs) == 0:
raise ValueError(
'This model cannot compute other outputs since no other output_types were specified.'
)
self._ensure_built()
self.model.eval()
for batch in generator:
inputs, labels, weights = batch
inputs, _, _ = self._prepare_batch((inputs, None, None))
# Invoke the model.
output_values = self.model(**inputs)
output_values = output_values.get('logits')
if isinstance(output_values, torch.Tensor):
output_values = [output_values]
output_values = [t.detach().cpu().numpy() for t in output_values]
# Apply tranformers and record results.
if uncertainty:
var = [output_values[i] for i in self._variance_outputs]
if variances is None:
variances = [var]
else:
for i, t in enumerate(var):
variances[i].append(t)
access_values = []
if other_output_types:
access_values += self._other_outputs
elif self._prediction_outputs is not None:
access_values += self._prediction_outputs
if len(access_values) > 0:
output_values = [output_values[i] for i in access_values]
if len(transformers) > 0:
if len(output_values) > 1:
raise ValueError(
"predict() does not support Transformers for models with multiple outputs."
)
elif len(output_values) == 1:
output_values = [
undo_transforms(output_values[0], transformers)
]
if results is None:
results = [[] for i in range(len(output_values))]
for i, t in enumerate(output_values):
results[i].append(t)
# Concatenate arrays to create the final results.
final_results = []
final_variances = []
if results is not None:
for r in results:
final_results.append(np.concatenate(r, axis=0))
if uncertainty and variances is not None:
for v in variances:
final_variances.append(np.concatenate(v, axis=0))
return zip(final_results, final_variances)
if len(final_results) == 1:
return final_results[0]
else:
return np.array(final_results)
<file_sep>"""
SIDER dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
import deepchem as dc
def load_sider(featurizer='ECFP', split='index'):
current_dir = os.path.dirname(os.path.realpath(__file__))
# Load SIDER dataset
print("About to load SIDER dataset.")
dataset_file = os.path.join(current_dir, "./sider.csv.gz")
dataset = dc.utils.save.load_from_disk(dataset_file)
print("Columns of dataset: %s" % str(dataset.columns.values))
print("Number of examples in dataset: %s" % str(dataset.shape[0]))
# Featurize SIDER dataset
print("About to featurize SIDER dataset.")
if featurizer == 'ECFP':
featurizer = dc.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = dc.feat.ConvMolFeaturizer()
SIDER_tasks = dataset.columns.values[1:].tolist()
print("SIDER tasks: %s" % str(SIDER_tasks))
print("%d tasks in total" % len(SIDER_tasks))
loader = dc.data.CSVLoader(
tasks=SIDER_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
print("%d datapoints in SIDER dataset" % len(dataset))
# Initialize transformers
transformers = [dc.trans.BalancingTransformer(dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {
'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
return SIDER_tasks, (train, valid, test), transformers
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import deepchem as dc
import numpy as np
import tensorflow as tf
from deepchem.models.atomic_conv import atomic_conv_model
sys.path.append("../../models")
from deepchem.models.tensorgraph.layers import Layer, Feature, Label, L2Loss, AtomicConvolution, Transpose, Dense
from deepchem.models import TensorGraph
import numpy as np
import tensorflow as tf
import itertools
import time
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, "datasets")
train_dir = os.path.join(data_dir, "random_train")
test_dir = os.path.join(data_dir, "random_test")
model_dir = os.path.join(base_dir, "random_model")
train_dataset = dc.data.DiskDataset(train_dir)
test_dataset = dc.data.DiskDataset(test_dir)
pdbbind_tasks = ["-logKd/Ki"]
transformers = []
y_train = train_dataset.y
y_train *= -1 * 2.479 / 4.184
train_dataset = dc.data.DiskDataset.from_numpy(
train_dataset.X,
y_train,
train_dataset.w,
train_dataset.ids,
tasks=pdbbind_tasks)
y_test = test_dataset.y
y_test *= -1 * 2.479 / 4.184
test_dataset = dc.data.DiskDataset.from_numpy(
test_dataset.X,
y_test,
test_dataset.w,
test_dataset.ids,
tasks=pdbbind_tasks)
batch_size = 24
tg, feed_dict_generator, label = atomic_conv_model()
print("Fitting")
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
tg.fit_generator(feed_dict_generator(train_dataset, batch_size, epochs=10))
train_evaluator = dc.utils.evaluate.GeneratorEvaluator(
tg, feed_dict_generator(train_dataset, batch_size), transformers, [label])
train_scores = train_evaluator.compute_model_performance(metric)
print("Train scores")
print(train_scores)
test_evaluator = dc.utils.evaluate.GeneratorEvaluator(
tg, feed_dict_generator(test_dataset, batch_size), transformers, [label])
test_scores = test_evaluator.compute_model_performance(metric)
print("Test scores")
print(test_scores)
<file_sep>"""
Contains BACE data loading utilities.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
import os
import deepchem
import tempfile
import deepchem as dc
from deepchem.molnet.load_function.bace_features import bace_user_specified_features
def load_bace(mode="regression", transform=True, split="20-80"):
"""Load BACE-1 dataset as regression/classification problem."""
assert split in ["20-80", "80-20"]
assert mode in ["regression", "classification"]
current_dir = os.path.dirname(os.path.realpath(__file__))
if split == "20-80":
dataset_file = os.path.join(current_dir,
"../../datasets/desc_canvas_aug30.csv")
elif split == "80-20":
dataset_file = os.path.join(current_dir,
"../../datasets/rev8020split_desc.csv")
crystal_dataset_file = os.path.join(
current_dir, "../../datasets/crystal_desc_canvas_aug30.csv")
if mode == "regression":
bace_tasks = ["pIC50"]
elif mode == "classification":
bace_tasks = ["Class"]
featurizer = dc.feat.UserDefinedFeaturizer(bace_user_specified_features)
loader = dc.data.UserCSVLoader(
tasks=bace_tasks,
smiles_field="mol",
id_field="CID",
featurizer=featurizer)
dataset = loader.featurize(dataset_file)
crystal_dataset = loader.featurize(crystal_dataset_file)
splitter = dc.splits.SpecifiedSplitter(dataset_file, "Model")
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
#NOTE THE RENAMING:
if split == "20-80":
valid_dataset, test_dataset = test_dataset, valid_dataset
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
print("Number of compounds in crystal set")
print(len(crystal_dataset))
transformers = [
dc.trans.NormalizationTransformer(
transform_X=True, dataset=train_dataset),
dc.trans.ClippingTransformer(transform_X=True, dataset=train_dataset)
]
if mode == "regression":
transformers += [
dc.trans.NormalizationTransformer(
transform_y=True, dataset=train_dataset)
]
for dataset in [train_dataset, valid_dataset, test_dataset, crystal_dataset]:
if len(dataset) > 0:
for transformer in transformers:
dataset = transformer.transform(dataset)
return (bace_tasks, (train_dataset, valid_dataset, test_dataset,
crystal_dataset), transformers)
<file_sep>#################################################################
# save.py is out of date. You should not import any functions from here.
#################################################################
# flake8: noqa
import logging
logger = logging.getLogger(__name__)
logger.warning("deepchem.utils.save has been deprecated.\n"
"The utilities in save.py are moved to deepchem.utils.data_utils"
" or deepchem.utils.genomics_utils.")
from deepchem.utils.data_utils import *
from deepchem.utils.genomics_utils import *
<file_sep>"""
Script that trains multitask models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
from deepchem.molnet import load_tox21
from sklearn.linear_model import LogisticRegression
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
n_features = 1024
tox21_tasks, tox21_datasets, transformers = load_tox21()
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
def model_builder(model_dir_logreg):
sklearn_model = LogisticRegression(
penalty="l2", C=1. / 0.05, class_weight="balanced", n_jobs=-1)
return dc.models.sklearn_models.SklearnModel(sklearn_model, model_dir_logreg)
model = dc.models.multitask.SingletaskToMultitask(tox21_tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Tests for FeaturizedSamples class
"""
import os
import tempfile
import shutil
import deepchem as dc
def test_unlabelled():
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "../../data/tests/no_labels.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(tasks=[],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
assert len(dataset.X)
def test_scaffold_test_train_valid_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.ScaffoldSplitter()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
assert len(train_dataset) == 8
assert len(valid_dataset) == 1
assert len(test_dataset) == 1
def test_scaffold_test_train_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.ScaffoldSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
assert len(train_dataset) == 8
assert len(test_dataset) == 2
def test_random_test_train_valid_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.RandomSplitter()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
assert len(train_dataset) == 8
assert len(valid_dataset) == 1
assert len(test_dataset) == 1
def test_random_test_train_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
assert len(train_dataset) == 8
assert len(test_dataset) == 2
def test_log_solubility_dataset():
"""Test of loading for simple log-solubility dataset."""
current_dir = os.path.dirname(os.path.realpath(__file__))
input_file = "../../models/tests/assets/example.csv"
input_file = os.path.join(current_dir, input_file)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(
tasks=tasks,
feature_field="smiles",
featurizer=dc.feat.CircularFingerprint(size=1024))
dataset = loader.create_dataset(input_file)
assert len(dataset) == 10
def test_dataset_move():
"""Test that dataset can be moved and reloaded."""
current_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = tempfile.mkdtemp()
data_dir = os.path.join(base_dir, "data")
moved_data_dir = os.path.join(base_dir, "moved_data")
dataset_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
featurized_dataset = loader.create_dataset(dataset_file, data_dir)
n_dataset = len(featurized_dataset)
# Now perform move
shutil.move(data_dir, moved_data_dir)
moved_featurized_dataset = dc.data.DiskDataset(moved_data_dir)
assert len(moved_featurized_dataset) == n_dataset
<file_sep>"""
Script that trains Sklearn multitask models on PCBA dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
from deepchem.molnet import load_pcba
from sklearn.ensemble import RandomForestClassifier
from deepchem.models.multitask import SingletaskToMultitask
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.models.sklearn_models import SklearnModel
from deepchem.utils.evaluate import Evaluator
np.random.seed(123)
# Set some global variables up top
reload = True
is_verbose = False
base_dir = "/tmp/pcba_sklearn"
model_dir = os.path.join(base_dir, "model")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
pcba_tasks, pcba_datasets, transformers = load_pcba()
(train_dataset, valid_dataset, test_dataset) = pcba_datasets
classification_metric = Metric(
metrics.roc_auc_score, np.mean, verbose=is_verbose, mode="classification")
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500)
return SklearnModel(sklearn_model, model_dir)
model = SingletaskToMultitask(pcba_tasks, model_builder, model_dir)
# Fit trained model
model.fit(train_dataset)
model.save()
train_evaluator = Evaluator(
model, train_dataset, transformers, verbose=is_verbose)
train_scores = train_evaluator.compute_model_performance(
[classification_metric])
print("Train scores")
print(train_scores)
valid_evaluator = Evaluator(
model, valid_dataset, transformers, verbose=is_verbose)
valid_scores = valid_evaluator.compute_model_performance(
[classification_metric])
print("Validation scores")
print(valid_scores)
<file_sep>import deepchem as dc
CheckFeaturizer = {
('bace_c', 'logreg'): ['ECFP', 1024],
('bace_c', 'tf'): ['ECFP', 1024],
('bace_c', 'tf_robust'): ['ECFP', 1024],
('bace_c', 'rf'): ['ECFP', 1024],
('bace_c', 'kernelsvm'): ['ECFP', 1024],
('bace_c', 'irv'): ['ECFP', 1024],
('bace_c', 'xgb'): ['ECFP', 1024],
('bace_c', 'graphconv'): ['GraphConv', 75],
('bace_c', 'dag'): ['GraphConv', 75],
('bace_c', 'weave'): ['Weave', 75],
('bace_c', 'textcnn'): ['Raw', None],
('bace_c', 'mpnn'): ['Weave', [75, 14]],
('bbbp', 'logreg'): ['ECFP', 1024],
('bbbp', 'tf'): ['ECFP', 1024],
('bbbp', 'tf_robust'): ['ECFP', 1024],
('bbbp', 'rf'): ['ECFP', 1024],
('bbbp', 'kernelsvm'): ['ECFP', 1024],
('bbbp', 'irv'): ['ECFP', 1024],
('bbbp', 'xgb'): ['ECFP', 1024],
('bbbp', 'graphconv'): ['GraphConv', 75],
('bbbp', 'dag'): ['GraphConv', 75],
('bbbp', 'weave'): ['Weave', 75],
('bbbp', 'textcnn'): ['Raw', None],
('bbbp', 'mpnn'): ['Weave', [75, 14]],
('clintox', 'logreg'): ['ECFP', 1024],
('clintox', 'tf'): ['ECFP', 1024],
('clintox', 'tf_robust'): ['ECFP', 1024],
('clintox', 'rf'): ['ECFP', 1024],
('clintox', 'kernelsvm'): ['ECFP', 1024],
('clintox', 'irv'): ['ECFP', 1024],
('clintox', 'xgb'): ['ECFP', 1024],
('clintox', 'graphconv'): ['GraphConv', 75],
('clintox', 'dag'): ['GraphConv', 75],
('clintox', 'weave'): ['Weave', 75],
('clintox', 'textcnn'): ['Raw', None],
('clintox', 'mpnn'): ['Weave', [75, 14]],
('hiv', 'logreg'): ['ECFP', 1024],
('hiv', 'tf'): ['ECFP', 1024],
('hiv', 'tf_robust'): ['ECFP', 1024],
('hiv', 'rf'): ['ECFP', 1024],
('hiv', 'kernelsvm'): ['ECFP', 1024],
('hiv', 'irv'): ['ECFP', 1024],
('hiv', 'xgb'): ['ECFP', 1024],
('hiv', 'graphconv'): ['GraphConv', 75],
('hiv', 'dag'): ['GraphConv', 75],
('hiv', 'weave'): ['Weave', 75],
('hiv', 'textcnn'): ['Raw', None],
('hiv', 'mpnn'): ['Weave', [75, 14]],
('muv', 'logreg'): ['ECFP', 1024],
('muv', 'tf'): ['ECFP', 1024],
('muv', 'tf_robust'): ['ECFP', 1024],
('muv', 'rf'): ['ECFP', 1024],
('muv', 'kernelsvm'): ['ECFP', 1024],
('muv', 'irv'): ['ECFP', 1024],
('muv', 'xgb'): ['ECFP', 1024],
('muv', 'graphconv'): ['GraphConv', 75],
('muv', 'siamese'): ['GraphConv', 75],
('muv', 'attn'): ['GraphConv', 75],
('muv', 'res'): ['GraphConv', 75],
('muv', 'weave'): ['Weave', 75],
('muv', 'textcnn'): ['Raw', None],
('muv', 'mpnn'): ['Weave', [75, 14]],
('pcba', 'logreg'): ['ECFP', 1024],
('pcba', 'tf'): ['ECFP', 1024],
('pcba', 'tf_robust'): ['ECFP', 1024],
('pcba', 'irv'): ['ECFP', 1024],
('pcba', 'xgb'): ['ECFP', 1024],
('pcba', 'graphconv'): ['GraphConv', 75],
('pcba', 'weave'): ['Weave', 75],
('pcba', 'textcnn'): ['Raw', None],
('pcba_146', 'logreg'): ['ECFP', 1024],
('pcba_146', 'tf'): ['ECFP', 1024],
('pcba_146', 'tf_robust'): ['ECFP', 1024],
('pcba_146', 'irv'): ['ECFP', 1024],
('pcba_146', 'xgb'): ['ECFP', 1024],
('pcba_146', 'graphconv'): ['GraphConv', 75],
('pcba_146', 'weave'): ['Weave', 75],
('pcba_2475', 'logreg'): ['ECFP', 1024],
('pcba_2475', 'tf'): ['ECFP', 1024],
('pcba_2475', 'tf_robust'): ['ECFP', 1024],
('pcba_2475', 'irv'): ['ECFP', 1024],
('pcba_2475', 'xgb'): ['ECFP', 1024],
('pcba_2475', 'graphconv'): ['GraphConv', 75],
('pcba_2475', 'weave'): ['Weave', 75],
('sider', 'logreg'): ['ECFP', 1024],
('sider', 'tf'): ['ECFP', 1024],
('sider', 'tf_robust'): ['ECFP', 1024],
('sider', 'rf'): ['ECFP', 1024],
('sider', 'kernelsvm'): ['ECFP', 1024],
('sider', 'irv'): ['ECFP', 1024],
('sider', 'xgb'): ['ECFP', 1024],
('sider', 'graphconv'): ['GraphConv', 75],
('sider', 'dag'): ['GraphConv', 75],
('sider', 'weave'): ['Weave', 75],
('sider', 'siamese'): ['GraphConv', 75],
('sider', 'attn'): ['GraphConv', 75],
('sider', 'res'): ['GraphConv', 75],
('sider', 'textcnn'): ['Raw', None],
('sider', 'mpnn'): ['Weave', [75, 14]],
('tox21', 'logreg'): ['ECFP', 1024],
('tox21', 'tf'): ['ECFP', 1024],
('tox21', 'tf_robust'): ['ECFP', 1024],
('tox21', 'rf'): ['ECFP', 1024],
('tox21', 'kernelsvm'): ['ECFP', 1024],
('tox21', 'irv'): ['ECFP', 1024],
('tox21', 'xgb'): ['ECFP', 1024],
('tox21', 'graphconv'): ['GraphConv', 75],
('tox21', 'dag'): ['GraphConv', 75],
('tox21', 'weave'): ['Weave', 75],
('tox21', 'siamese'): ['GraphConv', 75],
('tox21', 'attn'): ['GraphConv', 75],
('tox21', 'res'): ['GraphConv', 75],
('tox21', 'textcnn'): ['Raw', None],
('tox21', 'mpnn'): ['Weave', [75, 14]],
('toxcast', 'logreg'): ['ECFP', 1024],
('toxcast', 'tf'): ['ECFP', 1024],
('toxcast', 'tf_robust'): ['ECFP', 1024],
('toxcast', 'rf'): ['ECFP', 1024],
('toxcast', 'kernelsvm'): ['ECFP', 1024],
('toxcast', 'irv'): ['ECFP', 1024],
('toxcast', 'xgb'): ['ECFP', 1024],
('toxcast', 'graphconv'): ['GraphConv', 75],
('toxcast', 'weave'): ['Weave', 75],
('toxcast', 'textcnn'): ['Raw', None],
('toxcast', 'mpnn'): ['Weave', [75, 14]],
('bace_r', 'tf_regression'): ['ECFP', 1024],
('bace_r', 'rf_regression'): ['ECFP', 1024],
('bace_r', 'krr'): ['ECFP', 1024],
('bace_r', 'xgb_regression'): ['ECFP', 1024],
('bace_r', 'graphconvreg'): ['GraphConv', 75],
('bace_r', 'dag_regression'): ['GraphConv', 75],
('bace_r', 'weave_regression'): ['Weave', 75],
('bace_r', 'textcnn_regression'): ['Raw', None],
('chembl', 'tf_regression'): ['ECFP', 1024],
('chembl', 'rf_regression'): ['ECFP', 1024],
('chembl', 'krr'): ['ECFP', 1024],
('chembl', 'xgb_regression'): ['ECFP', 1024],
('chembl', 'graphconvreg'): ['GraphConv', 75],
('chembl', 'weave_regression'): ['Weave', 75],
('clearance', 'tf_regression'): ['ECFP', 1024],
('clearance', 'rf_regression'): ['ECFP', 1024],
('clearance', 'krr'): ['ECFP', 1024],
('clearance', 'xgb_regression'): ['ECFP', 1024],
('clearance', 'graphconvreg'): ['GraphConv', 75],
('clearance', 'dag_regression'): ['GraphConv', 75],
('clearance', 'weave_regression'): ['Weave', 75],
('delaney', 'tf_regression'): ['ECFP', 1024],
('delaney', 'rf_regression'): ['ECFP', 1024],
('delaney', 'krr'): ['ECFP', 1024],
('delaney', 'xgb_regression'): ['ECFP', 1024],
('delaney', 'graphconvreg'): ['GraphConv', 75],
('delaney', 'dag_regression'): ['GraphConv', 75],
('delaney', 'weave_regression'): ['Weave', 75],
('delaney', 'mpnn'): ['Weave', [75, 14]],
('delaney', 'textcnn_regression'): ['Raw', None],
('hopv', 'tf_regression'): ['ECFP', 1024],
('hopv', 'rf_regression'): ['ECFP', 1024],
('hopv', 'krr'): ['ECFP', 1024],
('hopv', 'xgb_regression'): ['ECFP', 1024],
('hopv', 'graphconvreg'): ['GraphConv', 75],
('hopv', 'dag_regression'): ['GraphConv', 75],
('hopv', 'weave_regression'): ['Weave', 75],
('lipo', 'tf_regression'): ['ECFP', 1024],
('lipo', 'rf_regression'): ['ECFP', 1024],
('lipo', 'krr'): ['ECFP', 1024],
('lipo', 'xgb_regression'): ['ECFP', 1024],
('lipo', 'graphconvreg'): ['GraphConv', 75],
('lipo', 'dag_regression'): ['GraphConv', 75],
('lipo', 'weave_regression'): ['Weave', 75],
('lipo', 'mpnn'): ['Weave', [75, 14]],
('lipo', 'textcnn_regression'): ['Raw', None],
('nci', 'tf_regression'): ['ECFP', 1024],
('nci', 'rf_regression'): ['ECFP', 1024],
('nci', 'krr'): ['ECFP', 1024],
('nci', 'xgb_regression'): ['ECFP', 1024],
('nci', 'graphconvreg'): ['GraphConv', 75],
('nci', 'weave_regression'): ['Weave', 75],
('ppb', 'tf_regression'): ['ECFP', 1024],
('ppb', 'rf_regression'): ['ECFP', 1024],
('ppb', 'krr'): ['ECFP', 1024],
('ppb', 'xgb_regression'): ['ECFP', 1024],
('ppb', 'graphconvreg'): ['GraphConv', 75],
('ppb', 'dag_regression'): ['GraphConv', 75],
('ppb', 'weave_regression'): ['Weave', 75],
('sampl', 'tf_regression'): ['ECFP', 1024],
('sampl', 'rf_regression'): ['ECFP', 1024],
('sampl', 'krr'): ['ECFP', 1024],
('sampl', 'xgb_regression'): ['ECFP', 1024],
('sampl', 'graphconvreg'): ['GraphConv', 75],
('sampl', 'dag_regression'): ['GraphConv', 75],
('sampl', 'weave_regression'): ['Weave', 75],
('sampl', 'mpnn'): ['Weave', [75, 14]],
('sampl', 'textcnn_regression'): ['Raw', None],
('kaggle', 'tf_regression'): [None, 14293],
('kaggle', 'rf_regression'): [None, 14293],
('kaggle', 'krr'): [None, 14293],
('pdbbind', 'tf_regression'): ['grid', 2052],
('pdbbind', 'rf_regression'): ['grid', 2052],
('pdbbind', 'krr'): ['grid', 2052],
('pdbbind', 'graphconvreg'): ['GraphConv', 75],
('qm7', 'tf_regression'): ['ECFP', 1024],
('qm7', 'rf_regression'): ['ECFP', 1024],
('qm7', 'krr'): ['ECFP', 1024],
('qm7', 'krr_ft'): [dc.feat.CoulombMatrix(23), 1024],
('qm7', 'textcnn_regression'): ['Raw', None],
('qm7', 'graphconvreg'): ['GraphConv', 75],
('qm7', 'weave_regression'): ['Weave', 75],
('qm7', 'tf_regression_ft'): [dc.feat.CoulombMatrix(23), [23, 23]],
('qm7', 'dtnn'): [dc.feat.CoulombMatrix(23), [23, 23]],
('qm7', 'ani'): ['BPSymmetryFunctionInput', [23, 4]],
('qm8', 'tf_regression'): ['ECFP', 1024],
('qm8', 'rf_regression'): ['ECFP', 1024],
('qm8', 'krr'): ['ECFP', 1024],
('qm8', 'graphconvreg'): ['GraphConv', 75],
('qm8', 'tf_regression_ft'): [dc.feat.CoulombMatrix(26), [26, 26]],
('qm8', 'krr_ft'): [dc.feat.CoulombMatrix(26), 1024],
('qm8', 'dtnn'): [dc.feat.CoulombMatrix(26), [26, 26]],
('qm8', 'ani'): ['BPSymmetryFunctionInput', [26, 4]],
('qm8', 'mpnn'): ['MP', [70, 8]],
('qm8', 'weave_regression'): ['Weave', 75],
('qm8', 'textcnn_regression'): ['Raw', None],
('qm9', 'tf_regression'): ['ECFP', 1024],
('qm9', 'rf_regression'): ['ECFP', 1024],
('qm9', 'krr'): ['ECFP', 1024],
('qm9', 'graphconvreg'): ['GraphConv', 75],
('qm9', 'tf_regression_ft'): [dc.feat.CoulombMatrix(29), [29, 29]],
('qm9', 'krr_ft'): [dc.feat.CoulombMatrix(29), 1024],
('qm9', 'dtnn'): [dc.feat.CoulombMatrix(29), [29, 29]],
('qm9', 'ani'): ['BPSymmetryFunctionInput', [29, 4]],
('qm9', 'mpnn'): ['MP', [70, 8]],
('qm9', 'weave_regression'): ['Weave', 75],
('qm9', 'textcnn_regression'): ['Raw', None]
}
CheckSplit = {
'bace_c': ['random', 'scaffold'],
'bace_r': ['random', 'scaffold'],
'bbbp': ['random', 'scaffold'],
'chembl': ['index', 'random', 'scaffold', 'year'],
'clearance': ['index', 'random', 'scaffold'],
'clintox': ['index', 'random', 'scaffold'],
'delaney': ['index', 'random', 'scaffold'],
'hiv': ['index', 'random', 'scaffold', 'butina'],
'hopv': ['index', 'random', 'scaffold', 'butina'],
'kaggle': ['index'], # already splitted, no splitter required
'lipo': ['index', 'random', 'scaffold'],
'muv': ['index', 'random', 'scaffold', 'task'],
'nci': ['index', 'random', 'scaffold'],
'pcba': ['index', 'random', 'scaffold'],
'pcba_146': ['index', 'random', 'scaffold'],
'pcba_2475': ['index', 'random', 'scaffold'],
'pdbbind': ['index', 'random', 'time'],
'ppb': ['index', 'random', 'scaffold'],
'qm7': ['index', 'random', 'stratified'],
'qm8': ['index', 'random', 'stratified'],
'qm9': ['index', 'random', 'stratified'],
'sampl': ['index', 'random', 'scaffold'],
'sider': ['index', 'random', 'scaffold', 'task'],
'tox21': ['index', 'random', 'scaffold', 'butina', 'task'],
'toxcast': ['index', 'random', 'scaffold']
}
<file_sep>import os
import unittest
from deepchem.data.data_loader import FASTQLoader
class TestFASTQLoader(unittest.TestCase):
"""
Test FASTQLoader
"""
def setUp(self):
super(TestFASTQLoader, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_fastq_one_hot(self):
input_file = os.path.join(self.current_dir, "sample1.fastq")
loader = FASTQLoader()
sequences = loader.create_dataset(input_file)
# Default file contains 4 sequences each of length 192 (excluding the end of line character '\n').
# The one-hot encoding turns base-pairs into vectors of length 5 (ATCGN).
# Expected shape is now (4, 192, 5)
assert sequences.X.shape == (4, 192, 5)
<file_sep>"""
Contains an abstract base class that supports chemically aware data splits.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>, <NAME> "
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tempfile
import numpy as np
import itertools
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from rdkit.ML.Cluster import Butina
import deepchem as dc
from deepchem.data import DiskDataset
from deepchem.utils import ScaffoldGenerator
from deepchem.utils.save import log
from deepchem.data import NumpyDataset
from deepchem.utils.save import load_data
def generate_scaffold(smiles, include_chirality=False):
"""Compute the Bemis-Murcko scaffold for a SMILES string."""
mol = Chem.MolFromSmiles(smiles)
engine = ScaffoldGenerator(include_chirality=include_chirality)
scaffold = engine.get_scaffold(mol)
return scaffold
def randomize_arrays(array_list):
# assumes that every array is of the same dimension
num_rows = array_list[0].shape[0]
perm = np.random.permutation(num_rows)
permuted_arrays = []
for array in array_list:
permuted_arrays.append(array[perm])
return permuted_arrays
class Splitter(object):
"""
Abstract base class for chemically aware splits..
"""
def __init__(self, verbose=False):
"""Creates splitter object."""
self.verbose = verbose
def k_fold_split(self, dataset, k, directories=None):
"""Does K-fold split of dataset."""
log("Computing K-fold split", self.verbose)
if directories is None:
directories = [tempfile.mkdtemp() for _ in range(k)]
else:
assert len(directories) == k
fold_datasets = []
# rem_dataset is remaining portion of dataset
rem_dataset = dataset
for fold in range(k):
# Note starts as 1/k since fold starts at 0. Ends at 1 since fold goes up
# to k-1.
frac_fold = 1. / (k - fold)
fold_dir = directories[fold]
fold_inds, rem_inds, _ = self.split(
rem_dataset,
frac_train=frac_fold,
frac_valid=1 - frac_fold,
frac_test=0)
fold_dataset = rem_dataset.select(fold_inds, fold_dir)
rem_dir = tempfile.mkdtemp()
rem_dataset = rem_dataset.select(rem_inds, rem_dir)
fold_datasets.append(fold_dataset)
return fold_datasets
def train_valid_test_split(self,
dataset,
train_dir=None,
valid_dir=None,
test_dir=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
seed=None,
log_every_n=1000,
verbose=True):
"""
Splits self into train/validation/test sets.
Returns Dataset objects.
"""
if (isinstance(dataset, NumpyDataset)):
raise ValueError(
"Only possible with DiskDataset. NumpyDataset doesn't support .select"
)
log("Computing train/valid/test indices", self.verbose)
train_inds, valid_inds, test_inds = self.split(
dataset,
frac_train=frac_train,
frac_test=frac_test,
frac_valid=frac_valid,
log_every_n=log_every_n)
if train_dir is None:
train_dir = tempfile.mkdtemp()
if valid_dir is None:
valid_dir = tempfile.mkdtemp()
if test_dir is None:
test_dir = tempfile.mkdtemp()
train_dataset = dataset.select(train_inds, train_dir)
if frac_valid != 0:
valid_dataset = dataset.select(valid_inds, valid_dir)
else:
valid_dataset = None
test_dataset = dataset.select(test_inds, test_dir)
return train_dataset, valid_dataset, test_dataset
def train_valid_test_indices(self,
dataset,
train_dir=None,
valid_dir=None,
test_dir=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
seed=None,
log_every_n=1000,
verbose=True):
"""
Splits self into train/validation/test sets.
Returns Dataset objects.
"""
log("Computing train/valid/test indices", self.verbose)
train_inds, valid_inds, test_inds = self.split(
dataset,
frac_train=frac_train,
frac_test=frac_test,
frac_valid=frac_valid,
log_every_n=log_every_n)
return train_inds, valid_inds, test_inds
def train_test_split(self,
dataset,
train_dir=None,
test_dir=None,
seed=None,
frac_train=.8,
verbose=True):
"""
Splits self into train/test sets.
Returns Dataset objects.
"""
valid_dir = tempfile.mkdtemp()
train_dataset, _, test_dataset = self.train_valid_test_split(
dataset,
train_dir,
valid_dir,
test_dir,
frac_train=frac_train,
frac_test=1 - frac_train,
frac_valid=0.,
verbose=verbose)
return train_dataset, test_dataset
def train_valid_test_indices(self,
dataset,
train_dir=None,
valid_dir=None,
test_dir=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
seed=None,
log_every_n=1000,
verbose=True):
"""
Splits self into train/validation/test sets.
Returns Dataset objects.
"""
log("Computing train/valid/test indices", self.verbose)
train_inds, valid_inds, test_inds = self.split(
dataset,
frac_train=frac_train,
frac_test=frac_test,
frac_valid=frac_valid,
log_every_n=log_every_n)
return train_inds, valid_inds, test_inds
def train_test_indices(self,
dataset,
train_dir=None,
test_dir=None,
seed=None,
frac_train=.8,
verbose=True):
"""
Splits self into train/test sets.
Returns Dataset objects.
"""
valid_dir = None
train_inds, _, test_inds = self.train_valid_test_indices(
dataset,
train_dir,
valid_dir,
test_dir,
frac_train=frac_train,
frac_test=1 - frac_train,
frac_valid=0.,
verbose=verbose)
return train_inds, test_inds
def split(self,
dataset,
frac_train=None,
frac_valid=None,
frac_test=None,
log_every_n=None,
verbose=False):
"""
Stub to be filled in by child classes.
"""
raise NotImplementedError
class RandomStratifiedSplitter(Splitter):
"""
RandomStratified Splitter class.
For sparse multitask datasets, a standard split offers no guarantees that the
splits will have any activate compounds. This class guarantees that each task
will have a proportional split of the activates in a split. TO do this, a
ragged split is performed with different numbers of compounds taken from each
task. Thus, the length of the split arrays may exceed the split of the
original array. That said, no datapoint is copied to more than one split, so
correctness is still ensured.
Note that this splitter is only valid for boolean label data.
TODO(rbharath): This splitter should be refactored to match style of other
splitter classes.
"""
def __generate_required_hits(self, w, frac_split):
# returns list of per column sum of non zero elements
required_hits = (w != 0).sum(axis=0)
for col_hits in required_hits:
col_hits = int(frac_split * col_hits)
return required_hits
def get_task_split_indices(self, y, w, frac_split):
"""Returns num datapoints needed per task to split properly."""
w_present = (w != 0)
y_present = y * w_present
# Compute number of actives needed per task.
task_actives = np.sum(y_present, axis=0)
task_split_actives = (frac_split * task_actives).astype(int)
# loop through each column and obtain index required to splice out for
# required fraction of hits
split_indices = []
n_tasks = np.shape(y)[1]
for task in range(n_tasks):
actives_count = task_split_actives[task]
cum_task_actives = np.cumsum(y_present[:, task])
# Find the first index where the cumulative number of actives equals
# the actives_count
split_index = np.amin(np.where(cum_task_actives >= actives_count)[0])
# Note that np.where tells us last index required to exceed
# actives_count, so we actually want the following location
split_indices.append(split_index + 1)
return split_indices
# TODO(rbharath): Refactor this split method to match API of other splits (or
# potentially refactor those to match this.
def split(self, dataset, frac_split, split_dirs=None):
"""
Method that does bulk of splitting dataset.
"""
if split_dirs is not None:
assert len(split_dirs) == 2
else:
split_dirs = [tempfile.mkdtemp(), tempfile.mkdtemp()]
# Handle edge case where frac_split is 1
if frac_split == 1:
dataset_1 = NumpyDataset(dataset.X, dataset.y, dataset.w, dataset.ids)
dataset_2 = None
return dataset_1, dataset_2
X, y, w, ids = randomize_arrays((dataset.X, dataset.y, dataset.w,
dataset.ids))
split_indices = self.get_task_split_indices(y, w, frac_split)
# Create weight matrices fpor two haves.
w_1, w_2 = np.zeros_like(w), np.zeros_like(w)
for task, split_index in enumerate(split_indices):
# copy over up to required index for weight first_split
w_1[:split_index, task] = w[:split_index, task]
w_2[split_index:, task] = w[split_index:, task]
# check out if any rows in either w_1 or w_2 are just zeros
rows_1 = w_1.any(axis=1)
X_1, y_1, w_1, ids_1 = X[rows_1], y[rows_1], w_1[rows_1], ids[rows_1]
dataset_1 = NumpyDataset(X_1, y_1, w_1, ids_1)
rows_2 = w_2.any(axis=1)
X_2, y_2, w_2, ids_2 = X[rows_2], y[rows_2], w_2[rows_2], ids[rows_2]
dataset_2 = NumpyDataset(X_2, y_2, w_2, ids_2)
return dataset_1, dataset_2
def train_valid_test_split(self,
dataset,
train_dir=None,
valid_dir=None,
test_dir=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
seed=None,
log_every_n=1000):
"""Custom split due to raggedness in original split.
"""
if train_dir is None:
train_dir = tempfile.mkdtemp()
if valid_dir is None:
valid_dir = tempfile.mkdtemp()
if test_dir is None:
test_dir = tempfile.mkdtemp()
# Obtain original x, y, and w arrays and shuffle
X, y, w, ids = randomize_arrays((dataset.X, dataset.y, dataset.w,
dataset.ids))
rem_dir = tempfile.mkdtemp()
train_dataset, rem_dataset = self.split(dataset, frac_train,
[train_dir, rem_dir])
# calculate percent split for valid (out of test and valid)
if frac_valid + frac_test > 0:
valid_percentage = frac_valid / (frac_valid + frac_test)
else:
return train_dataset, None, None
# split test data into valid and test, treating sub test set also as sparse
valid_dataset, test_dataset = self.split(dataset, valid_percentage,
[valid_dir, test_dir])
return train_dataset, valid_dataset, test_dataset
def k_fold_split(self, dataset, k, directories=None):
"""Needs custom implementation due to ragged splits for stratification."""
log("Computing K-fold split", self.verbose)
if directories is None:
directories = [tempfile.mkdtemp() for _ in range(k)]
else:
assert len(directories) == k
fold_datasets = []
# rem_dataset is remaining portion of dataset
rem_dataset = dataset
for fold in range(k):
# Note starts as 1/k since fold starts at 0. Ends at 1 since fold goes up
# to k-1.
frac_fold = 1. / (k - fold)
fold_dir = directories[fold]
rem_dir = tempfile.mkdtemp()
fold_dataset, rem_dataset = self.split(rem_dataset, frac_fold,
[fold_dir, rem_dir])
fold_datasets.append(fold_dataset)
return fold_datasets
class SingletaskStratifiedSplitter(Splitter):
"""
Class for doing data splits by stratification on a single task.
Example:
>>> n_samples = 100
>>> n_features = 10
>>> n_tasks = 10
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.random.rand(n_samples, n_tasks)
>>> w = np.ones_like(y)
>>> dataset = DiskDataset.from_numpy(np.ones((100,n_tasks)), np.ones((100,n_tasks)), verbose=False)
>>> splitter = SingletaskStratifiedSplitter(task_number=5, verbose=False)
>>> train_dataset, test_dataset = splitter.train_test_split(dataset)
"""
def __init__(self, task_number=0, verbose=False):
"""
Creates splitter object.
Parameters
----------
task_number: int (Optional, Default 0)
Task number for stratification.
verbose: bool (Optional, Default False)
Controls logging frequency.
"""
self.task_number = task_number
self.verbose = verbose
def k_fold_split(self, dataset, k, seed=None, log_every_n=None):
"""
Splits compounds into k-folds using stratified sampling.
Overriding base class k_fold_split.
Parameters
----------
dataset: dc.data.Dataset object
Dataset.
k: int
Number of folds.
seed: int (Optional, Default None)
Random seed.
log_every_n: int (Optional, Default None)
Log every n examples (not currently used).
Returns
-------
fold_datasets: List
List containing dc.data.Dataset objects
"""
log("Computing K-fold split", self.verbose)
if directories is None:
directories = [tempfile.mkdtemp() for _ in range(k)]
else:
assert len(directories) == k
y_s = dataset.y[:, self.task_number]
sortidx = np.argsort(y_s)
sortidx_list = np.array_split(sortidx, k)
fold_datasets = []
for fold in range(k):
fold_dir = directories[fold]
fold_ind = sortidx_list[fold]
fold_dataset = dataset.select(fold_ind, fold_dir)
fold_datasets.append(fold_dataset)
return fold_datasets
def split(self,
dataset,
seed=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
log_every_n=None):
"""
Splits compounds into train/validation/test using stratified sampling.
Parameters
----------
dataset: dc.data.Dataset object
Dataset.
seed: int (Optional, Default None)
Random seed.
frac_train: float (Optional, Default .8)
Fraction of dataset put into training data.
frac_valid: float (Optional, Default .1)
Fraction of dataset put into validation data.
frac_test: float (Optional, Default .1)
Fraction of dataset put into test data.
log_every_n: int (Optional, Default None)
Log every n examples (not currently used).
Returns
-------
retval: Tuple
Tuple containing train indices, valid indices, and test indices
"""
# JSG Assert that split fractions can be written as proper fractions over 10.
# This can be generalized in the future with some common demoninator determination.
# This will work for 80/20 train/test or 80/10/10 train/valid/test (most use cases).
np.testing.assert_equal(frac_train + frac_valid + frac_test, 1.)
np.testing.assert_equal(10 * frac_train + 10 * frac_valid + 10 * frac_test,
10.)
if not seed is None:
np.random.seed(seed)
y_s = dataset.y[:, self.task_number]
sortidx = np.argsort(y_s)
split_cd = 10
train_cutoff = int(frac_train * split_cd)
valid_cutoff = int(frac_valid * split_cd) + train_cutoff
test_cutoff = int(frac_test * split_cd) + valid_cutoff
train_idx = np.array([])
valid_idx = np.array([])
test_idx = np.array([])
while sortidx.shape[0] >= split_cd:
sortidx_split, sortidx = np.split(sortidx, [split_cd])
shuffled = np.random.permutation(range(split_cd))
train_idx = np.hstack([train_idx, sortidx_split[shuffled[:train_cutoff]]])
valid_idx = np.hstack(
[valid_idx, sortidx_split[shuffled[train_cutoff:valid_cutoff]]])
test_idx = np.hstack([test_idx, sortidx_split[shuffled[valid_cutoff:]]])
# Append remaining examples to train
if sortidx.shape[0] > 0: np.hstack([train_idx, sortidx])
return (train_idx, valid_idx, test_idx)
class MolecularWeightSplitter(Splitter):
"""
Class for doing data splits by molecular weight.
"""
def split(self,
dataset,
seed=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
log_every_n=None):
"""
Splits internal compounds into train/validation/test using the MW calculated
by SMILES string.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)
if not seed is None:
np.random.seed(seed)
mws = []
for smiles in dataset.ids:
mol = Chem.MolFromSmiles(smiles)
mw = Chem.rdMolDescriptors.CalcExactMolWt(mol)
mws.append(mw)
# Sort by increasing MW
mws = np.array(mws)
sortidx = np.argsort(mws)
train_cutoff = frac_train * len(sortidx)
valid_cutoff = (frac_train + frac_valid) * len(sortidx)
return (sortidx[:train_cutoff], sortidx[train_cutoff:valid_cutoff],
sortidx[valid_cutoff:])
class RandomSplitter(Splitter):
"""
Class for doing random data splits.
"""
def split(self,
dataset,
seed=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
log_every_n=None):
"""
Splits internal compounds randomly into train/validation/test.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)
if not seed is None:
np.random.seed(seed)
num_datapoints = len(dataset)
train_cutoff = int(frac_train * num_datapoints)
valid_cutoff = int((frac_train + frac_valid) * num_datapoints)
shuffled = np.random.permutation(range(num_datapoints))
return (shuffled[:train_cutoff], shuffled[train_cutoff:valid_cutoff],
shuffled[valid_cutoff:])
class IndexSplitter(Splitter):
"""
Class for simple order based splits.
"""
def split(self,
dataset,
seed=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
log_every_n=None):
"""
Splits internal compounds into train/validation/test in provided order.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)
num_datapoints = len(dataset)
train_cutoff = int(frac_train * num_datapoints)
valid_cutoff = int((frac_train + frac_valid) * num_datapoints)
indices = range(num_datapoints)
return (indices[:train_cutoff], indices[train_cutoff:valid_cutoff],
indices[valid_cutoff:])
class IndiceSplitter(Splitter):
"""
Class for splits based on input order.
"""
def __init__(self, verbose=False, valid_indices=None, test_indices=None):
"""
Parameters
-----------
valid_indices: list of int
indices of samples in the valid set
test_indices: list of int
indices of samples in the test set
"""
self.verbose = verbose
self.valid_indices = valid_indices
self.test_indices = test_indices
def split(self,
dataset,
seed=None,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
log_every_n=None):
"""
Splits internal compounds into train/validation/test in designated order.
"""
num_datapoints = len(dataset)
indices = np.arange(num_datapoints).tolist()
train_indices = []
if self.valid_indices is None:
self.valid_indices = []
if self.test_indices is None:
self.test_indices = []
valid_test = self.valid_indices
valid_test.extend(self.test_indices)
for indice in indices:
if not indice in valid_test:
train_indices.append(indice)
return (train_indices, self.valid_indices, self.test_indices)
def ClusterFps(fps, cutoff=0.2):
# (ytz): this is directly copypasta'd from <NAME>'s clustering example.
dists = []
nfps = len(fps)
for i in range(1, nfps):
sims = DataStructs.BulkTanimotoSimilarity(fps[i], fps[:i])
dists.extend([1 - x for x in sims])
cs = Butina.ClusterData(dists, nfps, cutoff, isDistData=True)
return cs
class ButinaSplitter(Splitter):
"""
Class for doing data splits based on the butina clustering of a bulk tanimoto
fingerprint matrix.
"""
def split(self,
dataset,
frac_train=None,
frac_valid=None,
frac_test=None,
log_every_n=1000,
cutoff=0.18):
"""
Splits internal compounds into train and validation based on the butina
clustering algorithm. This splitting algorithm has an O(N^2) run time, where N
is the number of elements in the dataset. The dataset is expected to be a classification
dataset.
This algorithm is designed to generate validation data that are novel chemotypes.
Note that this function entirely disregards the ratios for frac_train, frac_valid,
and frac_test. Furthermore, it does not generate a test set, only a train and valid set.
Setting a small cutoff value will generate smaller, finer clusters of high similarity,
whereas setting a large cutoff value will generate larger, coarser clusters of low similarity.
"""
print("Performing butina clustering with cutoff of", cutoff)
mols = []
for ind, smiles in enumerate(dataset.ids):
mols.append(Chem.MolFromSmiles(smiles))
n_mols = len(mols)
fps = [AllChem.GetMorganFingerprintAsBitVect(x, 2, 1024) for x in mols]
scaffold_sets = ClusterFps(fps, cutoff=cutoff)
scaffold_sets = sorted(scaffold_sets, key=lambda x: -len(x))
ys = dataset.y
valid_inds = []
for c_idx, cluster in enumerate(scaffold_sets):
# for m_idx in cluster:
valid_inds.extend(cluster)
# continue until we find an active in all the tasks, otherwise we can't
# compute a meaningful AUC
# TODO (ytz): really, we want at least one active and inactive in both scenarios.
# TODO (Ytz): for regression tasks we'd stop after only one cluster.
active_populations = np.sum(ys[valid_inds], axis=0)
if np.all(active_populations):
print("# of actives per task in valid:", active_populations)
print("Total # of validation points:", len(valid_inds))
break
train_inds = list(itertools.chain.from_iterable(scaffold_sets[c_idx + 1:]))
test_inds = []
return train_inds, valid_inds, []
class ScaffoldSplitter(Splitter):
"""
Class for doing data splits based on the scaffold of small molecules.
"""
def split(self,
dataset,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
log_every_n=1000):
"""
Splits internal compounds into train/validation/test by scaffold.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)
scaffolds = {}
log("About to generate scaffolds", self.verbose)
data_len = len(dataset)
for ind, smiles in enumerate(dataset.ids):
if ind % log_every_n == 0:
log("Generating scaffold %d/%d" % (ind, data_len), self.verbose)
scaffold = generate_scaffold(smiles)
if scaffold not in scaffolds:
scaffolds[scaffold] = [ind]
else:
scaffolds[scaffold].append(ind)
# Sort from largest to smallest scaffold sets
scaffolds = {key: sorted(value) for key, value in scaffolds.items()}
scaffold_sets = [
scaffold_set
for (scaffold, scaffold_set) in sorted(
scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
train_cutoff = frac_train * len(dataset)
valid_cutoff = (frac_train + frac_valid) * len(dataset)
train_inds, valid_inds, test_inds = [], [], []
log("About to sort in scaffold sets", self.verbose)
for scaffold_set in scaffold_sets:
if len(train_inds) + len(scaffold_set) > train_cutoff:
if len(train_inds) + len(valid_inds) + len(scaffold_set) > valid_cutoff:
test_inds += scaffold_set
else:
valid_inds += scaffold_set
else:
train_inds += scaffold_set
return train_inds, valid_inds, test_inds
class SpecifiedSplitter(Splitter):
"""
Class that splits data according to user specification.
"""
def __init__(self, input_file, split_field, verbose=False):
"""Provide input information for splits."""
raw_df = next(load_data([input_file], shard_size=None))
self.splits = raw_df[split_field].values
self.verbose = verbose
def split(self,
dataset,
frac_train=.8,
frac_valid=.1,
frac_test=.1,
log_every_n=1000):
"""
Splits internal compounds into train/validation/test by user-specification.
"""
train_inds, valid_inds, test_inds = [], [], []
for ind, split in enumerate(self.splits):
split = split.lower()
if split == "train":
train_inds.append(ind)
elif split in ["valid", "validation"]:
valid_inds.append(ind)
elif split == "test":
test_inds.append(ind)
else:
raise ValueError("Missing required split information.")
return train_inds, valid_inds, test_inds
<file_sep>import unittest
import os
from deepchem.utils.data_utils import load_sdf_files
class TestFileLoading(unittest.TestCase):
def test_load_sdf_files(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
file_path = [os.path.join(current_dir, 'assets', 'gdb9_small.sdf')]
for df in load_sdf_files(file_path):
break
df_shape = (2, 6)
self.assertEqual(df.shape, df_shape)
self.assertEqual(df['smiles'][0], '[H]C([H])([H])[H]')
n_atoms_mol1 = 5
self.assertEqual(df['mol'][0].GetNumAtoms(), n_atoms_mol1)
self.assertEqual(len(eval(df['pos_x'][0])), n_atoms_mol1)
self.assertEqual(len(eval(df['pos_y'][0])), n_atoms_mol1)
self.assertEqual(len(eval(df['pos_y'][0])), n_atoms_mol1)
<file_sep>"""
KINASE dataset loader
"""
import os
import logging
import time
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
from deepchem.utils import remove_missing_entries
TRAIN_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/KINASE_training_disguised_combined_full.csv.gz"
VALID_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/KINASE_test1_disguised_combined_full.csv.gz"
TEST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/KINASE_test2_disguised_combined_full.csv.gz"
TRAIN_FILENAME = "KINASE_training_disguised_combined_full.csv.gz"
VALID_FILENAME = "KINASE_test1_disguised_combined_full.csv.gz"
TEST_FILENAME = "KINASE_test2_disguised_combined_full.csv.gz"
logger = logging.getLogger(__name__)
def get_transformers(train_dataset):
"""Gets transformers applied to the dataset"""
# TODO: Check for this
transformers = list()
return transformers
def gen_kinase(KINASE_tasks,
train_dir,
valid_dir,
test_dir,
data_dir,
shard_size=2000):
time1 = time.time()
train_files = os.path.join(data_dir, TRAIN_FILENAME)
valid_files = os.path.join(data_dir, VALID_FILENAME)
test_files = os.path.join(data_dir, TEST_FILENAME)
# Download files if they don't exist
if not os.path.exists(train_files):
logger.info("Downloading training file...")
deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)
logger.info("Training file download complete.")
logger.info("Downloading validation file...")
deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)
logger.info("Validation file download complete.")
logger.info("Downloading test file...")
deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)
logger.info("Test file download complete")
# Featurize the KINASE dataset
logger.info("About to featurize KINASE dataset.")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(tasks=KINASE_tasks,
id_field="Molecule",
featurizer=featurizer)
logger.info("Featurizing train datasets...")
train_dataset = loader.featurize(input_files=train_files,
shard_size=shard_size)
logger.info("Featurizing validation datasets...")
valid_dataset = loader.featurize(input_files=valid_files,
shard_size=shard_size)
logger.info("Featurizing test datasets....")
test_dataset = loader.featurize(input_files=test_files,
shard_size=shard_size)
logger.info("Remove missing entries from dataset")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
# Shuffle the training data
logger.info("Shuffling the training dataset")
train_dataset.sparse_shuffle()
# Apply transformations
logger.info("Transformating datasets with transformers")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info("Performing transformations with {}".format(
transformer.__class__.__name__))
logger.info("Transforming the training dataset...")
train_dataset = transformer.transform(train_dataset)
logger.info("Transforming the validation dataset...")
valid_dataset = transformer.transform(valid_dataset)
logger.info("Transforming the test dataset...")
test_dataset = transformer.transform(test_dataset)
logger.info("Transformations complete.")
logger.info("Moving datasets to corresponding directories")
train_dataset.move(train_dir)
logger.info("Train dataset moved.")
valid_dataset.move(valid_dir)
logger.info("Validation dataset moved.")
test_dataset.move(test_dir)
logger.info("Test dataset moved.")
time2 = time.time()
# TIMING
logger.info("TIMING: KINASE fitting took %0.3f s" % (time2 - time1))
return train_dataset, valid_dataset, test_dataset
def load_kinase(shard_size=2000, featurizer=None, split=None, reload=True):
"""Loads Kinase datasets, does not do train/test split
The Kinase dataset is an in-house dataset from Merck that was first introduced in the following paper:
<NAME>, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
It contains 2500 Merck in-house compounds that were measured
for IC50 of inhibition on 99 protein kinases. Unlike most of
the other datasets featured in MoleculeNet, the Kinase
collection does not have structures for the compounds tested
since they were proprietary Merck compounds. However, the
collection does feature pre-computed descriptors for these
compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
Parameters
----------
shard_size: int, optional
Size of the DiskDataset shards to write on disk
featurizer: optional
Ignored since featurization pre-computed
split: optional
Ignored since split pre-computed
reload: bool, optional
Whether to automatically re-load from disk
"""
KINASE_tasks = [
'T_00013', 'T_00014', 'T_00015', 'T_00016', 'T_00017', 'T_00018',
'T_00019', 'T_00020', 'T_00021', 'T_00022', 'T_00023', 'T_00024',
'T_00025', 'T_00026', 'T_00027', 'T_00028', 'T_00029', 'T_00030',
'T_00031', 'T_00032', 'T_00033', 'T_00034', 'T_00035', 'T_00036',
'T_00037', 'T_00038', 'T_00039', 'T_00040', 'T_00041', 'T_00042',
'T_00043', 'T_00044', 'T_00045', 'T_00046', 'T_00047', 'T_00048',
'T_00049', 'T_00050', 'T_00051', 'T_00052', 'T_00053', 'T_00054',
'T_00055', 'T_00056', 'T_00057', 'T_00058', 'T_00059', 'T_00060',
'T_00061', 'T_00062', 'T_00063', 'T_00064', 'T_00065', 'T_00066',
'T_00067', 'T_00068', 'T_00069', 'T_00070', 'T_00071', 'T_00072',
'T_00073', 'T_00074', 'T_00075', 'T_00076', 'T_00077', 'T_00078',
'T_00079', 'T_00080', 'T_00081', 'T_00082', 'T_00083', 'T_00084',
'T_00085', 'T_00086', 'T_00087', 'T_00088', 'T_00089', 'T_00090',
'T_00091', 'T_00092', 'T_00093', 'T_00094', 'T_00095', 'T_00096',
'T_00097', 'T_00098', 'T_00099', 'T_00100', 'T_00101', 'T_00102',
'T_00103', 'T_00104', 'T_00105', 'T_00106', 'T_00107', 'T_00108',
'T_00109', 'T_00110', 'T_00111'
]
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "kinase")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = gen_kinase(
KINASE_tasks=KINASE_tasks,
train_dir=train_dir,
valid_dir=valid_dir,
test_dir=test_dir,
data_dir=data_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return KINASE_tasks, (train_dataset, valid_dataset,
test_dataset), transformers
<file_sep>"""
Tests that deepchem models make deterministic predictions.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import unittest
try:
import tensorflow as tf # noqa: F401
from tensorflow.python.framework import test_util # noqa: F401
from sklearn.ensemble import RandomForestClassifier # noqa: F401
from sklearn.ensemble import RandomForestRegressor # noqa: F401
has_tensorflow = True
except:
has_tensorflow = False
class TestPredict(unittest.TestCase):
"""
Test that models make deterministic predictions
These tests guard against failures like having dropout turned on at
test time.
"""
def setUp(self):
super(TestPredict, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
'''
def test_tf_progressive_regression_predict(self):
"""Test tf progressive multitask makes deterministic predictions."""
np.random.seed(123)
n_tasks = 9
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[.25],
learning_rate=0.003,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Check same predictions are made.
y_pred_first = model.predict(dataset)
y_pred_second = model.predict(dataset)
np.testing.assert_allclose(y_pred_first, y_pred_second)
'''
<file_sep>Model Classes
=============
DeepChem maintains an extensive collection of models for scientific
applications. DeepChem's focus is on facilitating scientific applications, so
we support a broad range of different machine learning frameworks (currently
scikit-learn, xgboost, TensorFlow, and PyTorch) since different frameworks are
more and less suited for different scientific applications.
.. include:: model_cheatsheet.rst
Model
-----
.. autoclass:: deepchem.models.Model
:members:
Scikit-Learn Models
===================
Scikit-learn's models can be wrapped so that they can interact conveniently
with DeepChem. Oftentimes scikit-learn models are more robust and easier to
train and are a nice first model to train.
SklearnModel
------------
.. autoclass:: deepchem.models.SklearnModel
:members:
Gradient Boosting Models
========================
Gradient Boosting Models (LightGBM and XGBoost) can be wrapped so they can interact with DeepChem.
GBDTModel
------------
.. autoclass:: deepchem.models.GBDTModel
:members:
Deep Learning Infrastructure
============================
DeepChem maintains a lightweight layer of common deep learning model
infrastructure that can be used for models built with different underlying
frameworks. The losses and optimizers can be used for both TensorFlow and
PyTorch models.
Losses
------
.. autoclass:: deepchem.models.losses.Loss
:members:
.. autoclass:: deepchem.models.losses.L1Loss
:members:
.. autoclass:: deepchem.models.losses.HuberLoss
:members:
.. autoclass:: deepchem.models.losses.L2Loss
:members:
.. autoclass:: deepchem.models.losses.HingeLoss
:members:
.. autoclass:: deepchem.models.losses.SquaredHingeLoss
:members:
.. autoclass:: deepchem.models.losses.PoissonLoss
:members:
.. autoclass:: deepchem.models.losses.BinaryCrossEntropy
:members:
.. autoclass:: deepchem.models.losses.CategoricalCrossEntropy
:members:
.. autoclass:: deepchem.models.losses.SigmoidCrossEntropy
:members:
.. autoclass:: deepchem.models.losses.SoftmaxCrossEntropy
:members:
.. autoclass:: deepchem.models.losses.SparseSoftmaxCrossEntropy
:members:
.. autoclass:: deepchem.models.losses.VAE_ELBO
:members:
.. autoclass:: deepchem.models.losses.VAE_KLDivergence
:members:
.. autoclass:: deepchem.models.losses.ShannonEntropy
:members:
.. autoclass:: deepchem.models.losses.GlobalMutualInformationLoss
:members:
.. autoclass:: deepchem.models.losses.LocalMutualInformationLoss
:members:
.. autoclass:: deepchem.models.losses.GroverPretrainLoss
:members:
.. autoclass:: deepchem.models.losses.EdgePredictionLoss
:members:
.. autoclass:: deepchem.models.losses.GraphNodeMaskingLoss
:members:
.. autoclass:: deepchem.models.losses.GraphEdgeMaskingLoss
:members:
.. autoclass:: deepchem.models.losses.DeepGraphInfomaxLoss
:members:
.. autoclass:: deepchem.models.losses.GraphContextPredLoss
:members:
.. autoclass:: deepchem.models.losses.DensityProfileLoss
:members:
.. autoclass:: deepchem.models.losses.NTXentMultiplePositives
:members:
Optimizers
----------
.. autoclass:: deepchem.models.optimizers.Optimizer
:members:
.. autoclass:: deepchem.models.optimizers.LearningRateSchedule
:members:
.. autoclass:: deepchem.models.optimizers.AdaGrad
:members:
.. autoclass:: deepchem.models.optimizers.Adam
:members:
.. autoclass:: deepchem.models.optimizers.AdamW
:members:
.. autoclass:: deepchem.models.optimizers.SparseAdam
:members:
.. autoclass:: deepchem.models.optimizers.RMSProp
:members:
.. autoclass:: deepchem.models.optimizers.GradientDescent
:members:
.. autoclass:: deepchem.models.optimizers.ExponentialDecay
:members:
.. autoclass:: deepchem.models.optimizers.PolynomialDecay
:members:
.. autoclass:: deepchem.models.optimizers.LinearCosineDecay
:members:
Keras Models
============
DeepChem extensively uses `Keras`_ to build deep learning models.
KerasModel
----------
Training loss and validation metrics can be automatically logged to `Weights & Biases`_ with the following commands::
# Install wandb in shell
pip install wandb
# Login in shell (required only once)
wandb login
# Login in notebook (required only once)
import wandb
wandb.login()
# Initialize a WandbLogger
logger = WandbLogger(…)
# Set `wandb_logger` when creating `KerasModel`
import deepchem as dc
# Log training loss to wandb
model = dc.models.KerasModel(…, wandb_logger=logger)
model.fit(…)
# Log validation metrics to wandb using ValidationCallback
import deepchem as dc
vc = dc.models.ValidationCallback(…)
model = KerasModel(…, wandb_logger=logger)
model.fit(…, callbacks=[vc])
logger.finish()
.. _`Keras`: https://keras.io/
.. _`Weights & Biases`: http://docs.wandb.com/
.. autoclass:: deepchem.models.KerasModel
:members:
TensorflowMultitaskIRVClassifier
--------------------------------
.. autoclass:: deepchem.models.TensorflowMultitaskIRVClassifier
:members:
RobustMultitaskClassifier
-------------------------
.. autoclass:: deepchem.models.RobustMultitaskClassifier
:members:
RobustMultitaskRegressor
------------------------
.. autoclass:: deepchem.models.RobustMultitaskRegressor
:members:
ProgressiveMultitaskClassifier
------------------------------
.. autoclass:: deepchem.models.ProgressiveMultitaskClassifier
:members:
ProgressiveMultitaskRegressor
-----------------------------
.. autoclass:: deepchem.models.ProgressiveMultitaskRegressor
:members:
WeaveModel
----------
.. autoclass:: deepchem.models.WeaveModel
:members:
DTNNModel
---------
.. autoclass:: deepchem.models.DTNNModel
:members:
DAGModel
--------
.. autoclass:: deepchem.models.DAGModel
:members:
GraphConvModel
--------------
.. autoclass:: deepchem.models.GraphConvModel
:members:
MPNNModel
---------
.. autoclass:: deepchem.models.MPNNModel
:members:
BasicMolGANModel
----------------
.. autoclass:: deepchem.models.BasicMolGANModel
:members:
ScScoreModel
------------
.. autoclass:: deepchem.models.ScScoreModel
:members:
SeqToSeq
--------
.. autoclass:: deepchem.models.SeqToSeq
:members:
GAN
---
.. autoclass:: deepchem.models.GAN
:members:
WGAN
^^^^
.. autoclass:: deepchem.models.WGAN
:members:
TextCNNModel
------------
.. autoclass:: deepchem.models.TextCNNModel
:members:
AtomicConvModel
---------------
.. autoclass:: deepchem.models.AtomicConvModel
:members:
Smiles2Vec
----------
.. autoclass:: deepchem.models.Smiles2Vec
:members:
ChemCeption
-----------
.. autoclass:: deepchem.models.ChemCeption
:members:
NormalizingFlowModel
--------------------
The purpose of a normalizing flow is to map a simple distribution (that is
easy to sample from and evaluate probability densities for) to a more
complex distribution that is learned from data. Normalizing flows combine the
advantages of autoregressive models (which provide likelihood estimation
but do not learn features) and variational autoencoders (which learn feature
representations but do not provide marginal likelihoods). They are effective
for any application requiring a probabilistic model with these capabilities, e.g. generative modeling, unsupervised learning, or probabilistic inference.
.. autoclass:: deepchem.models.normalizing_flows.NormalizingFlowModel
:members:
PyTorch Models
==============
DeepChem supports the use of `PyTorch`_ to build deep learning models.
.. _`PyTorch`: https://pytorch.org/
TorchModel
----------
You can wrap an arbitrary :code:`torch.nn.Module` in a :code:`TorchModel` object.
.. autoclass:: deepchem.models.TorchModel
:members:
ModularTorchModel
-----------------
You can modify networks for different tasks by using a :code:`ModularTorchModel`.
.. autoclass:: deepchem.models.torch_models.modular.ModularTorchModel
:members:
CNN
---
.. autoclass:: deepchem.models.CNN
:members:
MultitaskRegressor
------------------
.. autoclass:: deepchem.models.MultitaskRegressor
:members:
MultitaskFitTransformRegressor
------------------------------
.. autoclass:: deepchem.models.MultitaskFitTransformRegressor
:members:
MultitaskClassifier
-------------------
.. autoclass:: deepchem.models.MultitaskClassifier
:members:
CGCNNModel
----------
.. autoclass:: deepchem.models.CGCNNModel
:members:
GATModel
--------
.. autoclass:: deepchem.models.GATModel
:members:
GCNModel
--------
.. autoclass:: deepchem.models.GCNModel
:members:
AttentiveFPModel
----------------
.. autoclass:: deepchem.models.AttentiveFPModel
:members:
PagtnModel
----------
.. autoclass:: deepchem.models.PagtnModel
:members:
MPNNModel
---------
Note that this is an alternative implementation for MPNN and currently you can only import it from
``deepchem.models.torch_models``.
.. autoclass:: deepchem.models.torch_models.MPNNModel
:members:
InfoGraphModel
--------------
.. autoclass:: deepchem.models.torch_models.InfoGraphModel
:members:
InfoGraphStarModel
------------------
.. autoclass:: deepchem.models.torch_models.InfoGraphStarModel
:members:
GNNModular
----------
.. autoclass:: deepchem.models.torch_models.gnn.GNNModular
:members:
InfoMax3DModular
----------------
.. autoclass:: deepchem.models.torch_models.gnn3d.InfoMax3DModular
:members:
LCNNModel
---------
.. autoclass:: deepchem.models.LCNNModel
:members:
MEGNetModel
-----------
.. autoclass:: deepchem.models.MEGNetModel
:members:
MATModel
--------
.. autoclass:: deepchem.models.torch_models.MATModel
:members:
NormalizingFlowModel
--------------------
.. autoclass:: deepchem.models.torch_models.NormalizingFlow
DMPNNModel
----------
.. autoclass:: deepchem.models.torch_models.DMPNNModel
:members:
GroverModel
-----------
.. autoclass:: deepchem.models.torch_models.GroverModel
:members:
DTNNModel
---------
.. autoclass:: deepchem.models.torch_models.DTNNModel
:members:
Density Functional Theory Model - XCModel
-----------------------------------------
.. autoclass:: deepchem.models.dft.dftxc.XCModel
:members:
PyTorch Lightning Models
========================
DeepChem supports the use of `PyTorch-Lightning`_ to build PyTorch models.
.. _`PyTorch-Lightning`: https://www.pytorchlightning.ai/
DCLightningModule
-----------------
You can wrap an arbitrary :code:`TorchModel` in a :code:`DCLightningModule` object.
.. autoclass:: deepchem.models.DCLightningModule
:members:
Jax Models
==========
DeepChem supports the use of `Jax`_ to build deep learning models.
.. _`Jax`: https://github.com/google/jax
JaxModel
--------
.. autoclass:: deepchem.models.JaxModel
:members:
PinnModel
---------
.. autoclass:: deepchem.models.PINNModel
:members:
Hugging Face Models
===================
HuggingFace models from the `transformers <https://huggingface.co/models>`_ library can wrapped using the wrapper :code:`HuggingFaceModel`
.. autoclass:: deepchem.models.torch_models.hf_models.HuggingFaceModel
:members:
Chemberta
---------
.. autoclass:: deepchem.models.torch_models.chemberta.Chemberta
:members:
<file_sep>import itertools
import tempfile
import numpy as np
import deepchem as dc
def test_binary_1d():
"""Test balancing transformer on single-task dataset without explicit task dimension."""
n_samples = 20
n_features = 3
n_classes = 2
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(n_classes, size=(n_samples,))
w = np.ones((n_samples,))
dataset = dc.data.NumpyDataset(X, y, w)
balancing_transformer = dc.trans.BalancingTransformer(dataset=dataset)
dataset = balancing_transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
y_task = y_t
w_task = w_t
w_orig_task = w
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_binary_singletask():
"""Test balancing transformer on single-task dataset."""
n_samples = 20
n_features = 3
n_tasks = 1
n_classes = 2
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(n_classes, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w)
balancing_transformer = dc.trans.BalancingTransformer(dataset=dataset)
dataset = balancing_transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(np.sum(w_task[y_task == 0]),
np.sum(w_task[y_task == 1]))
def test_binary_multitask():
"""Test balancing transformer on multitask dataset."""
n_samples = 10
n_features = 3
n_tasks = 5
n_classes = 2
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(n_classes, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
multitask_dataset = dc.data.NumpyDataset(X, y, w)
balancing_transformer = dc.trans.BalancingTransformer(
dataset=multitask_dataset)
multitask_dataset = balancing_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(multitask_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(np.sum(w_task[y_task == 0]),
np.sum(w_task[y_task == 1]))
def test_multiclass_singletask():
"""Test balancing transformer on single-task dataset."""
n_samples = 50
n_features = 3
n_tasks = 1
n_classes = 5
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(n_classes, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w)
balancing_transformer = dc.trans.BalancingTransformer(dataset=dataset)
dataset = balancing_transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
# Check that sum of 0s equals sum of 1s in transformed for each task
for i, j in itertools.product(range(n_classes), range(n_classes)):
if i == j:
continue
assert np.isclose(np.sum(w_task[y_task == i]),
np.sum(w_task[y_task == j]))
def test_transform_to_directory():
"""Test that output can be written to a directory."""
n_samples = 20
n_features = 3
n_classes = 2
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(n_classes, size=(n_samples,))
w = np.ones((n_samples,))
dataset = dc.data.NumpyDataset(X, y, w)
balancing_transformer = dc.trans.BalancingTransformer(dataset=dataset)
with tempfile.TemporaryDirectory() as tmpdirname:
dataset = balancing_transformer.transform(dataset, out_dir=tmpdirname)
balanced_dataset = dc.data.DiskDataset(tmpdirname)
X_t, y_t, w_t, ids_t = (balanced_dataset.X, balanced_dataset.y,
balanced_dataset.w, balanced_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
y_task = y_t
w_task = w_t
w_orig_task = w
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_array_shapes():
"""Test BalancingTransformer when y and w have different shapes."""
n_samples = 20
X = np.random.rand(n_samples, 5)
y = np.random.randint(2, size=n_samples)
w = np.ones((n_samples, 1))
dataset = dc.data.NumpyDataset(X, y, w)
transformer = dc.trans.BalancingTransformer(dataset)
Xt, yt, wt, ids = transformer.transform_array(X, y, w, dataset.ids)
sum0 = np.sum(wt[np.where(y == 0)])
sum1 = np.sum(wt[np.where(y == 1)])
assert np.isclose(sum0, sum1)
<file_sep>"""
Perovskite crystal structures and formation energies.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
PEROVSKITE_URL = 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/perovskite.tar.gz'
PEROVSKITE_TASKS = ['formation_energy']
class _PerovskiteLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, 'perovskite.json')
targz_file = os.path.join(self.data_dir, 'perovskite.tar.gz')
if not os.path.exists(dataset_file):
if not os.path.exists(targz_file):
dc.utils.data_utils.download_url(url=PEROVSKITE_URL,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(targz_file, self.data_dir)
loader = dc.data.JsonLoader(tasks=self.tasks,
feature_field="structure",
label_field="formation_energy",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file)
def load_perovskite(
featurizer: Union[dc.feat.Featurizer, str] = dc.feat.DummyFeaturizer(),
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load perovskite dataset.
Contains 18928 perovskite structures and their formation energies.
In benchmark studies, random forest models and crystal graph
neural networks achieved mean average error of 0.23 and 0.05 eV/atom,
respectively, during five-fold nested cross validation on this
dataset.
For more details on the dataset see [1]_. For more details
on previous benchmarks for this dataset, see [2]_.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
Returns
-------
tasks, datasets, transformers : tuple
tasks : list
Column names corresponding to machine learning target variables.
datasets : tuple
train, validation, test splits of data as
``deepchem.data.datasets.Dataset`` instances.
transformers : list
``deepchem.trans.transformers.Transformer`` instances applied
to dataset.
References
----------
.. [1] <NAME>. et al. "New cubic perovskites for one- and two-photon water splitting
using the computational materials repository." Energy Environ. Sci., (2012), 5,
9034-9043 DOI: 10.1039/C2EE22341D.
.. [2] <NAME>. et al. "Benchmarking Materials Property Prediction Methods:
The Matbench Test Set and Automatminer Reference Algorithm." https://arxiv.org/abs/2005.00707 (2020)
Examples
--------
>>> import deepchem as dc
>>> tasks, datasets, transformers = dc.molnet.load_perovskite()
>>> train_dataset, val_dataset, test_dataset = datasets
>>> model = dc.models.CGCNNModel(mode='regression', batch_size=32, learning_rate=0.001)
"""
loader = _PerovskiteLoader(featurizer, splitter, transformers,
PEROVSKITE_TASKS, data_dir, save_dir, **kwargs)
return loader.load_dataset('perovskite', reload)
<file_sep>"""
Script that trains Tensorflow Multitask models on ChEMBL dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import tempfile
import shutil
import numpy as np
import deepchem as dc
from deepchem.molnet import load_chembl
# Set numpy seed
np.random.seed(123)
###Load data###
shard_size = 2000
print("About to load ChEMBL data.")
chembl_tasks, datasets, transformers = load_chembl(
shard_size=shard_size, featurizer="ECFP", set="5thresh", split="random")
train_dataset, valid_dataset, test_dataset = datasets
print("ChEMBL_tasks")
print(len(chembl_tasks))
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
###Create model###
n_layers = 3
nb_epoch = 10
model = dc.models.MultitaskRegressor(
len(chembl_tasks),
train_dataset.get_data_shape()[0],
layer_sizes=[1000] * n_layers,
dropouts=[.25] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
learning_rate=.0003,
weight_decay_penalty=.0001,
batch_size=100,
seed=123,
verbosity="high")
#Use R2 classification metric
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
print("Training model")
model.fit(train_dataset, nb_epoch=nb_epoch)
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
print("Test scores")
print(test_scores)
<file_sep>import deepchem as dc
import rdkit.Chem as Chem
model = dc.models.ChemCeption(
img_spec="engd", n_tasks=1, model_dir="./model", mode="regression")
model.restore()
smiles = "CCCCC"
featurizer = dc.feat.SmilesToImage(img_spec="engd", img_size=80, res=0.5)
dataset = dc.data.NumpyDataset(
featurizer.featurize([Chem.MolFromSmiles(smiles)]))
prediction = model.predict(dataset)
print("smiles: %s" % smiles)
print("prediction: %s" % str(prediction))
<file_sep>"""
Script that trains Tensorflow multitask models on PCBA dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
from deepchem.molnet import load_pcba
from deepchem.utils.save import load_from_disk
from deepchem.data import Dataset
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.metrics import to_one_hot
from deepchem.utils.evaluate import Evaluator
from deepchem.models import MultitaskClassifier
from deepchem.models.optimizers import ExponentialDecay
np.random.seed(123)
pcba_tasks, pcba_datasets, transformers = load_pcba()
(train_dataset, valid_dataset, test_dataset) = pcba_datasets
metric = Metric(metrics.roc_auc_score, np.mean, mode="classification")
n_features = train_dataset.get_data_shape()[0]
rate = ExponentialDecay(0.001, 0.8, 1000)
model = MultitaskClassifier(
len(pcba_tasks),
n_features,
dropouts=[.25],
learning_rate=rate,
weight_init_stddevs=[.1],
batch_size=64)
# Fit trained model
model.fit(train_dataset)
train_evaluator = Evaluator(model, train_dataset, transformers)
train_scores = train_evaluator.compute_model_performance([metric])
print("Train scores")
print(train_scores)
valid_evaluator = Evaluator(model, valid_dataset, transformers)
valid_scores = valid_evaluator.compute_model_performance([metric])
print("Validation scores")
print(valid_scores)
<file_sep># DeepChem Step-by-Step Tutorial
In this tutorial series, you'll learn how to use DeepChem to solve interesting
and challenging problems in the life sciences. The tutorial acts as a introduction
to DeepChem as well as application of DeepChem to a wide array of problems
across domains like molecular machine learning,
quantum chemistry, bioinformatics and material science. This tutorial series is
continually updated with new DeepChem features and models as implemented and is
designed to be accessible to beginners.
## Why do the DeepChem Tutorial?
**1) Career Advancement:** Applying AI in the life sciences is a booming
industry at present. There are a host of newly funded startups and initiatives
at large pharmaceutical and biotech companies centered around AI. Learning and
mastering DeepChem will bring you to the forefront of this field and will
prepare you to enter a career in this field.
**2) Humanitarian Considerations:** Disease is the oldest cause of human
suffering. From the dawn of human civilization, humans have suffered from pathogens,
cancers, and neurological conditions. One of the greatest achievements of
the last few centuries has been the development of effective treatments for
many diseases. By mastering the skills in this tutorial, you will be able to
stand on the shoulders of the giants of the past to help develop new
medicine.
**3) Lowering the Cost of Medicine:** The art of developing new medicine is
currently an elite skill that can only be practiced by a small core of expert
practitioners. By enabling the growth of open source tools for drug discovery,
you can help democratize these skills and open up drug discovery to more
competition. Increased competition can help drive down the cost of medicine.
The tutorial is organized as follows:
### Introduction to DeepChem
The following tutorials covers the core features of DeepChem. Covering these
tutorials will help you in getting started with DeepChem for machine learning. The later
tutorials discuss about using DeepChem for specific applications.
* [1 The Basic Tools of the Deep Life Sciences](The_Basic_Tools_of_the_Deep_Life_Sciences.ipynb)
* [2 Working With Datasets](Working_With_Datasets.ipynb)
* [3 An Introduction to MoleculeNet](An_Introduction_To_MoleculeNet.ipynb)
* [4 Molecular Fingerprints](Molecular_Fingerprints.ipynb)
* [5 Creating Models with Tensorflow and Pytorch](Creating_Models_with_TensorFlow_and_PyTorch.ipynb)
* [6 Introduction to Graph Convolutions](Introduction_to_Graph_Convolutions.ipynb)
* [7 Going Deeper on Molecular Featurizations](Going_Deeper_on_Molecular_Featurizations.ipynb)
* [8 Working With Splitters](Working_With_Splitters.ipynb)
* [9 Advanced Model Training](Advanced_Model_Training.ipynb)
* [10 Creating a high fidelity model from experimental data](Creating_a_high_fidelity_model_from_experimental_data.ipynb)
* [11 Putting Multitask Learning to Work](Putting_Multitask_Learning_to_Work.ipynb)
* [12 Modeling Protein Ligand Interactions](Modeling_Protein_Ligand_Interactions.ipynb)
* [13 Modeling Protein Ligand Interactions with Atomic Convolutions](Modeling_Protein_Ligand_Interactions_With_Atomic_Convolutions.ipynb)
* [14 Conditional Generative Adversarial Networks](Conditional_Generative_Adversarial_Networks.ipynb)
* [15 Training a Generative Adversarial Network on MNIST](Training_a_Generative_Adversarial_Network_on_MNIST.ipynb)
* [16 Distributed Multi-GPU Training of DeepChem Models with LitMatter](https://github.com/ncfrey/litmatter/blob/main/LitDeepChem.ipynb)
* [17 Advanced model training using Hyperopt](Advanced_model_training_using_hyperopt.ipynb)
* [18 Introduction to Gaussian Processes](Introduction_to_Gaussian_Processes.ipynb)
* [19 Pytorch-Lightning Integration for DeepChem Models](PytorchLightning_Integration.ipynb)
### Molecular Machine Learning
* [1 Molecular Fingerprints](Molecular_Fingerprints.ipynb)
* [2 Going Deeper on Molecular Featurizations](Going_Deeper_on_Molecular_Featurizations.ipynb)
* [3 Learning Unsupervised Embeddings for Molecules](Learning_Unsupervised_Embeddings_for_Molecules.ipynb)
* [4 Synthetic Feasibility Scoring](Synthetic_Feasibility_Scoring.ipynb)
* [5 Atomic Contributions for Molecules](Atomic_Contributions_for_Molecules.ipynb)
* [6 Interactive Model Evaluation with Trident Chemwidgets](Interactive_Model_Evaluation_with_Trident_Chemwidgets.ipynb)
* [7 Transfer Learning with ChemBERTa Transformers](Transfer_Learning_With_ChemBERTa_Transformers.ipynb)
* [8 Training a Normalizing Flow on QM9](Training_a_Normalizing_Flow_on_QM9.ipynb)
* [9 Large Scale Chemical Screens](Large_Scale_Chemical_Screens.ipynb)
* [10 Introduction to Molecular Attention Transformer](Introduction_to_Molecular_Attention_Transformer.ipynb)
* [11 Generating Molecules with MolGAN](Generating_molecules_with_MolGAN.ipynb)
* [12 Introduction to GROVER](Introduction_to_GROVER.ipynb)
### Modeling Proteins
* [1 Protein Deep Learning](Protein_Deep_Learning.ipynb)
### Protein Ligand Modeling
* [1 Modeling Protein Ligand Interactions](Modeling_Protein_Ligand_Interactions.ipynb)
* [2 Modeling Protein Ligand Interactions with Atomic Convolutions](Modeling_Protein_Ligand_Interactions_With_Atomic_Convolutions.ipynb)
### Quantum Chemistry
* [1 Exploring Quantum Chemistry with GDB1k](Exploring_Quantum_Chemistry_with_GDB1k.ipynb)
* [2 Finding ground state energy of molecules with DeepQMC](DeepQMC_tutorial.ipynb)
### Bioinformatics
* [1 Introduction to BioInformatics](Introduction_to_Bioinformatics.ipynb)
* [2 Multisequence Alignments](Multisequence_Alignments.ipynb)
* [3 Scanpy](Scanpy.ipynb)
* [4 Deep probabilistic analysis of omics data](Deep_probabilistic_analysis_of_single-cell_omics_data.ipynb)
### Material Science
* [1 Introduction to Material Science](Introduction_To_Material_Science.ipynb)
### Machine Learning Methods
* [1 Using Reinforcement Learning to Play Pong](Using_Reinforcement_Learning_to_Play_Pong.ipynb)
* [2 Introduction to Model Interpretability](Introduction_to_Model_Interpretability.ipynb)
* [3 Uncertainty in Deep Learning](Uncertainty_In_Deep_Learning.ipynb)
### Deep Differential Equations
* [1 Physics Informed Neural Networks (Burgers Equation)](Physics_Informed_Neural_Networks.ipynb)
* [2 Introducing_JaxModel_and_PINNModel](Introducing_JaxModel_and_PINNModel.ipynb)
* [3 About_nODE_Using_Torchdiffeq_in_Deepchem](About_nODE_Using_Torchdiffeq_in_Deepchem.ipynb)
### Equivariance
* [1 Introduction to Equivariance](Introduction_to_Equivariance.ipynb)
<file_sep>"""
A fake data generator
"""
import random
import numpy as np
from deepchem.data import NumpyDataset
from deepchem.feat import GraphData
class FakeGraphGenerator:
"""Generates a random graphs which can be used for testing or other purposes.
The generated graph supports both node-level and graph-level labels.
Example
-------
>>> from deepchem.utils.fake_data_generator import FakeGraphGenerator
>>> fgg = FakeGraphGenerator(min_nodes=8, max_nodes=10, n_node_features=5, avg_degree=8, n_edge_features=3, n_classes=2, task='graph', z=5)
>>> graphs = fgg.sample(n_graphs=10)
>>> type(graphs)
<class 'deepchem.data.datasets.NumpyDataset'>
>>> type(graphs.X[0])
<class 'deepchem.feat.graph_data.GraphData'>
>>> len(graphs) == 10 # num_graphs
True
Note
----
The FakeGraphGenerator class is based on torch_geometric.dataset.FakeDataset
class.
"""
def __init__(self,
min_nodes: int = 10,
max_nodes: int = 10,
n_node_features: int = 5,
avg_degree: int = 4,
n_edge_features: int = 3,
n_classes: int = 2,
task: str = 'graph',
**kwargs):
"""
Parameters
----------
min_nodes: int, default 10
Minimum number of permissible nodes in a graph
max_nodes: int, default 10
Maximum number of permissible nodes in a graph
n_node_features: int, default 5
Average number of node features in a graph
avg_degree: int, default 4
Average degree of the graph (avg_degree should be a positive number greater than the min_nodes)
n_edge_features: int, default 3
Average number of features in the edge
task: str, default 'graph'
Indicates node-level labels or graph-level labels
kwargs: optional
Additional graph attributes and their shapes , e.g. `global_features = 5`
"""
assert avg_degree >= 1, "Average degree should be greater than 0"
self.min_nodes = min_nodes
self.max_nodes = max_nodes
self.avg_degree = avg_degree
self.n_node_features = n_node_features
self.n_edge_features = n_edge_features
self.n_classes = n_classes
self.task = task
self.kwargs = kwargs
def sample(self, n_graphs: int = 100) -> NumpyDataset:
"""Samples graphs
Parameters
----------
n_graphs: int, default 100
Number of graphs to generate
Returns
-------
graphs: NumpyDataset
Generated Graphs
"""
graphs, labels = [], []
for i in range(n_graphs):
n_nodes = random.randint(self.min_nodes, self.max_nodes)
edge_index = generate_edge_index(n_nodes, self.avg_degree)
n_edges = edge_index.shape[1]
if self.task == 'graph':
graph_label = random.randint(0, self.n_classes - 1)
node_features = np.random.rand(
n_nodes, self.n_node_features) + graph_label
edge_features = np.random.rand(
n_edges, self.n_edge_features) + graph_label
kwargs = {}
for feature_name, feature_shape in self.kwargs.items():
kwargs[feature_name] = np.random.rand(
1, feature_shape) + graph_label
labels.append(graph_label)
elif self.task == 'node':
node_label = np.random.randint(0, self.n_classes - 1,
n_nodes).astype(np.float64)
node_features = np.random.rand(
n_nodes, self.n_node_features) + node_label.reshape(-1, 1)
# For a node-prediction task, label is not added to edge features and other global features
# because label here is a node-level attribute and not a graph-level attribute
edge_features = np.random.rand(n_edges, self.n_edge_features)
kwargs = {}
for feature_name, feature_shape in self.kwargs.items():
kwargs[feature_name] = np.random.rand(1, feature_shape)
kwargs['y'] = node_label
graph = GraphData(node_features, edge_index, edge_features,
**kwargs)
graphs.append(graph)
if self.task == 'graph':
graph_dataset = NumpyDataset(X=np.array(graphs),
y=np.array(labels))
elif self.task == 'node':
# In this case, the 'y' attribute of GraphData will contain the
# node-level labels.
graph_dataset = NumpyDataset(X=np.array(graphs))
return graph_dataset
def generate_edge_index(n_nodes: int,
avg_degree: int,
remove_loops: bool = True) -> np.ndarray:
"""Returns source and destination nodes for `num_nodes * avg_degree` number of randomly
generated edges. If remove_loops is True, then self-loops from the edge_index pairs
are removed.
Parameters
----------
n_nodes: int
Number of nodes in the graph
avg_degree: int
Average degree per node in a graph
remove_loops: bool
Remove self-loops in a graph
"""
n_edges = n_nodes * avg_degree
edge_index = np.random.randint(low=0, high=n_nodes, size=(2, n_edges))
if remove_loops:
edge_index = remove_self_loops(edge_index)
return edge_index
def remove_self_loops(edge_index: np.ndarray) -> np.ndarray:
"""Removes self-loops from a given set of edges
Parameters
----------
edge_index: numpy.ndarray
An numpy array of shape (2, |num_edges|) representing edges in a graph
"""
mask = []
for i in range(edge_index.shape[1]):
if edge_index[0][i] != edge_index[1][i]:
# not a self-loop
mask.append(i)
return edge_index[:, mask]
<file_sep>import os
import unittest
import deepchem as dc
class TestContactFeaturizers(unittest.TestCase):
"""Test Contact Fingerprints and Voxelizers."""
def setUp(self):
# TODO test more formats for ligand
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(current_dir, 'data',
'3ws9_protein_fixer_rdkit.pdb')
self.ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
self.complex_files = [(self.ligand_file, self.protein_file)]
def test_contact_fingerprint_shape(self):
size = 8
featurizer = dc.feat.ContactCircularFingerprint(size=size)
features = featurizer.featurize(self.complex_files)
assert features.shape == (1, 2 * size)
def test_contact_voxels_shape(self):
box_width = 48
voxel_width = 2
voxels_per_edge = box_width / voxel_width
size = 8
voxelizer = dc.feat.ContactCircularVoxelizer(box_width=box_width,
voxel_width=voxel_width,
size=size)
features = voxelizer.featurize(self.complex_files)
assert features.shape == (1, voxels_per_edge, voxels_per_edge,
voxels_per_edge, size)
def test_contact_voxels_flattened(self):
box_width = 48
voxel_width = 2
voxels_per_edge = box_width / voxel_width
size = 8
voxelizer = dc.feat.ContactCircularVoxelizer(box_width=box_width,
voxel_width=voxel_width,
size=size,
flatten=True)
features = voxelizer.featurize(self.complex_files)
assert features.shape == (1, int(size * voxels_per_edge**3))
<file_sep>Data
====
DeepChem :code:`dc.data` provides APIs for handling your data.
If your data is stored by the file like CSV and SDF, you can use the **Data Loaders**.
The Data Loaders read your data, convert them to features (ex: SMILES to ECFP) and save the features to Dataset class.
If your data is python objects like Numpy arrays or Pandas DataFrames, you can use the **Datasets** directly.
.. contents:: Contents
:local:
Datasets
--------
DeepChem :code:`dc.data.Dataset` objects are one of the core building blocks of DeepChem programs.
:code:`Dataset` objects hold representations of data for machine learning and are widely used throughout DeepChem.
The goal of the :code:`Dataset` class is to be maximally interoperable
with other common representations of machine learning datasets.
For this reason we provide interconversion methods mapping from :code:`Dataset` objects
to pandas DataFrames, TensorFlow Datasets, and PyTorch datasets.
NumpyDataset
^^^^^^^^^^^^
The :code:`dc.data.NumpyDataset` class provides an in-memory implementation of the abstract :code:`Dataset`
which stores its data in :code:`numpy.ndarray` objects.
.. autoclass:: deepchem.data.NumpyDataset
:members:
:inherited-members:
DiskDataset
^^^^^^^^^^^
The :code:`dc.data.DiskDataset` class allows for the storage of larger
datasets on disk. Each :code:`DiskDataset` is associated with a
directory in which it writes its contents to disk. Note that a
:code:`DiskDataset` can be very large, so some of the utility methods
to access fields of a :code:`Dataset` can be prohibitively expensive.
.. autoclass:: deepchem.data.DiskDataset
:members:
:inherited-members:
ImageDataset
^^^^^^^^^^^^
The :code:`dc.data.ImageDataset` class is optimized to allow
for convenient processing of image based datasets.
.. autoclass:: deepchem.data.ImageDataset
:members:
:inherited-members:
Data Loaders
------------
Processing large amounts of input data to construct a :code:`dc.data.Dataset` object can require some amount of hacking.
To simplify this process for you, you can use the :code:`dc.data.DataLoader` classes.
These classes provide utilities for you to load and process large amounts of data.
CSVLoader
^^^^^^^^^
.. autoclass:: deepchem.data.CSVLoader
:members: __init__, create_dataset
UserCSVLoader
^^^^^^^^^^^^^
.. autoclass:: deepchem.data.UserCSVLoader
:members: __init__, create_dataset
ImageLoader
^^^^^^^^^^^
.. autoclass:: deepchem.data.ImageLoader
:members: __init__, create_dataset
JsonLoader
^^^^^^^^^^
JSON is a flexible file format that is human-readable, lightweight,
and more compact than other open standard formats like XML. JSON files
are similar to python dictionaries of key-value pairs. All keys must
be strings, but values can be any of (string, number, object, array,
boolean, or null), so the format is more flexible than CSV. JSON is
used for describing structured data and to serialize objects. It is
conveniently used to read/write Pandas dataframes with the
`pandas.read_json` and `pandas.write_json` methods.
.. autoclass:: deepchem.data.JsonLoader
:members: __init__, create_dataset
SDFLoader
^^^^^^^^^
.. autoclass:: deepchem.data.SDFLoader
:members: __init__, create_dataset
FASTALoader
^^^^^^^^^^^
.. autoclass:: deepchem.data.FASTALoader
:members: __init__, create_dataset
FASTQLoader
^^^^^^^^^^^
.. autoclass:: deepchem.data.FASTQLoader
:members: __init__, create_dataset
InMemoryLoader
^^^^^^^^^^^^^^
The :code:`dc.data.InMemoryLoader` is designed to facilitate the processing of large datasets
where you already hold the raw data in-memory (say in a pandas dataframe).
.. autoclass:: deepchem.data.InMemoryLoader
:members: __init__, create_dataset
Density Functional Theory YAML Loader
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.data.data_loader.DFTYamlLoader
:members: __init__, create_dataset
Data Classes
------------
DeepChem featurizers often transform members into "data classes". These are
classes that hold all the information needed to train a model on that data
point. Models then transform these into the tensors for training in their
:code:`default_generator` methods.
Graph Data
^^^^^^^^^^
These classes document the data classes for graph convolutions.
We plan to simplify these classes (:code:`ConvMol`, :code:`MultiConvMol`, :code:`WeaveMol`)
into a joint data representation (:code:`GraphData`) for all graph convolutions in a future version of DeepChem,
so these APIs may not remain stable.
The graph convolution models which inherit :code:`KerasModel` depend on :code:`ConvMol`, :code:`MultiConvMol`, or :code:`WeaveMol`.
On the other hand, the graph convolution models which inherit :code:`TorchModel` depend on :code:`GraphData`.
.. autoclass:: deepchem.feat.mol_graphs.ConvMol
:members:
.. autoclass:: deepchem.feat.mol_graphs.MultiConvMol
:members:
:undoc-members:
.. autoclass:: deepchem.feat.mol_graphs.WeaveMol
:members:
:undoc-members:
.. autoclass:: deepchem.feat.graph_data.GraphData
:members:
Density Functional Theory Data
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These Data classes are used to create entry objects for DFT calculations.
.. autoclass:: deepchem.feat.dft_data.DFTSystem
:members:
.. autoclass:: deepchem.feat.dft_data.DFTEntry
:members:
Base Classes (for develop)
--------------------------
Dataset
^^^^^^^
The :code:`dc.data.Dataset` class is the abstract parent class for all
datasets. This class should never be directly initialized, but
contains a number of useful method implementations.
.. autoclass:: deepchem.data.Dataset
:members:
DataLoader
^^^^^^^^^^
The :code:`dc.data.DataLoader` class is the abstract parent class for all
dataloaders. This class should never be directly initialized, but
contains a number of useful method implementations.
.. autoclass:: deepchem.data.DataLoader
:members:
<file_sep>"""
Train low-data models on random forests.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import tempfile
import numpy as np
import deepchem as dc
from datasets import load_tox21_ecfp
from sklearn.ensemble import RandomForestClassifier
from deepchem.metrics import Metric
from deepchem.splits.task_splitter import merge_fold_datasets
from deepchem.splits.task_splitter import TaskSplitter
from deepchem.models.sklearn_models import SklearnModel
from deepchem.models.tf_keras_models.support_classifier import SupportGenerator
from deepchem.models.tf_keras_models.support_classifier import get_task_dataset_minus_support
model_dir = tempfile.mkdtemp()
# 4-fold splits
K = 4
# 10 positive/negative ligands
n_pos = 10
n_neg = 10
# 10 trials on test-set
n_trials = 10
# Sample supports without replacement (all pos/neg should be different)
replace = False
tox21_tasks, dataset, transformers = load_tox21_ecfp()
# Define metric
metric = Metric(dc.metrics.roc_auc_score, verbosity="high",
mode="classification")
task_splitter = TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
all_scores = {}
for fold in range(K):
train_inds = list(set(range(K)) - set([fold]))
train_folds = [fold_datasets[ind] for ind in train_inds]
train_dataset = merge_fold_datasets(train_folds)
test_dataset = fold_datasets[fold]
fold_tasks = range(fold * len(test_dataset.get_task_names()),
(fold+1) * len(test_dataset.get_task_names()))
# Get supports on test-set
support_generator = SupportGenerator(
test_dataset, range(len(test_dataset.get_task_names())), n_pos, n_neg,
n_trials, replace)
# Compute accuracies
task_scores = {task: [] for task in range(len(test_dataset.get_task_names()))}
for (task, support) in support_generator:
# Train model on support
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=50)
model = SklearnModel(sklearn_model, model_dir)
model.fit(support)
# Test model
task_dataset = get_task_dataset_minus_support(test_dataset, support, task)
y_pred = model.predict_proba(task_dataset)
score = metric.compute_metric(
task_dataset.y, y_pred, task_dataset.w)
#print("Score on task %s is %s" % (str(task), str(score)))
task_scores[task].append(score)
# Join information for all tasks.
mean_task_scores = {}
for task in range(len(test_dataset.get_task_names())):
mean_task_scores[task] = np.mean(np.array(task_scores[task]))
print("Fold %s" % str(fold))
print(mean_task_scores)
for (fold_task, task) in zip(fold_tasks, range(len(test_dataset.get_task_names()))):
all_scores[fold_task] = mean_task_scores[task]
print("All scores")
print(all_scores)
<file_sep>"""
Script that trains Sklearn multitask models on MUV dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
from deepchem.molnet import load_muv
from sklearn.ensemble import RandomForestClassifier
np.random.seed(123)
# Load MUV dataset
muv_tasks, muv_datasets, transformers = load_muv()
(train_dataset, valid_dataset, test_dataset) = muv_datasets
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(muv_tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
model.save()
# Evaluate train/test scores
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import pytest
import tempfile
import numpy as np
import deepchem as dc
@pytest.mark.torch
def test_mat_regression():
# load datasets
task, datasets, trans = dc.molnet.load_freesolv()
train, valid, test = datasets
# initialize model
model = dc.models.torch_models.MATModel(n_encoders=2,
sa_hsize=128,
d_input=128,
d_hidden=128,
d_output=128,
encoder_hsize=128,
embed_input_hsize=36,
gen_attn_hidden=32)
# overfit test
model.fit(valid, nb_epoch=100)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
scores = model.evaluate(valid, [metric], trans)
assert scores['mean_absolute_error'] < 1.0
@pytest.mark.torch
def test_mat_reload():
from deepchem.models.torch_models import MATModel
model_dir = tempfile.mkdtemp()
tasks, datasets, trans = dc.molnet.load_freesolv()
train, valid, test = datasets
model = MATModel(n_encoders=2,
sa_hsize=128,
d_input=128,
d_hidden=128,
d_output=128,
encoder_hsize=128,
embed_input_hsize=36,
gen_attn_hidden=32,
model_dir=model_dir)
model.fit(train, nb_epoch=1)
reloaded_model = MATModel(n_encoders=2,
sa_hsize=128,
d_input=128,
d_hidden=128,
d_output=128,
encoder_hsize=128,
embed_input_hsize=36,
gen_attn_hidden=32,
model_dir=model_dir)
reloaded_model.restore()
original_pred = model.predict(valid)
reload_pred = reloaded_model.predict(valid)
assert np.all(original_pred == reload_pred)
<file_sep>"""Normalizing flows for transforming probability distributions using PyTorch.
"""
import torch
from torch import nn
from typing import Sequence, Tuple
class NormalizingFlow(nn.Module):
"""Normalizing flows are widley used to perform generative models.
This algorithm gives advantages over variational autoencoders (VAE) because
of ease in sampling by applying invertible transformations
(Frey, Gadepally, & Ramsundar, 2022).
Example
--------
>>> import deepchem as dc
>>> from deepchem.models.torch_models.layers import Affine
>>> from deepchem.models.torch_models.normalizing_flows_pytorch import NormalizingFlow
>>> import torch
>>> from torch.distributions import MultivariateNormal
>>> # initialize the transformation layer's parameters
>>> dim = 2
>>> samples = 96
>>> transforms = [Affine(dim)]
>>> distribution = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
>>> # initialize normalizing flow model
>>> model = NormalizingFlow(transforms, distribution, dim)
>>> # evaluate the log_prob when applying the transformation layers
>>> input = distribution.sample(torch.Size((samples, dim)))
>>> len(model.log_prob(input))
96
>>> # evaluates the the sampling method and its log_prob
>>> len(model.sample(samples))
2
"""
def __init__(self, transform: Sequence, base_distribution,
dim: int) -> None:
"""This class considers a transformation, or a composition of transformations
functions (layers), between a base distribution and a target distribution.
Parameters
----------
transform: Sequence
Bijective transformation/transformations which are considered the layers
of a Normalizing Flow model.
base_distribution: torch.Tensor
Probability distribution to initialize the algorithm. The Multivariate Normal
distribution is mainly used for this parameter.
dim: int
Value of the Nth dimension of the dataset.
"""
super().__init__()
self.dim = dim
self.transforms = nn.ModuleList(transform)
self.base_distribution = base_distribution
def log_prob(self, inputs: torch.Tensor) -> torch.Tensor:
"""This method computes the probability of the inputs when
transformation/transformations are applied.
Parameters
----------
inputs: torch.Tensor
Tensor used to evaluate the log_prob computation of the learned
distribution.
shape: (samples, dim)
Returns
-------
log_prob: torch.Tensor
This tensor contains the value of the log probability computed.
shape: (samples)
"""
log_prob = torch.zeros(inputs.shape[0])
for biject in reversed(self.transforms):
inputs, inverse_log_det_jacobian = biject.inverse(inputs)
log_prob += inverse_log_det_jacobian
return log_prob
def sample(self, n_samples: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""Performs a sampling from the transformed distribution.
Besides the outputs (sampling), this method returns the logarithm of
probability to obtain the outputs at the base distribution.
Parameters
----------
n_samples: int
Number of samples to select from the transformed distribution
Returns
-------
sample: tuple
This tuple contains a two torch.Tensor objects. The first represents
a sampling of the learned distribution when transformations had been
applied. The secong torc.Tensor is the computation of log probabilities
of the transformed distribution.
shape: ((samples, dim), (samples))
"""
outputs = self.base_distribution.sample((n_samples,))
log_prob = self.base_distribution.log_prob(outputs)
for biject in self.transforms:
outputs, log_det_jacobian = biject.forward(outputs)
log_prob += log_det_jacobian
return outputs, log_prob
<file_sep>"""
Test basic molecular features.
"""
import numpy as np
import unittest
from deepchem.feat import RDKitDescriptors
class TestRDKitDescriptors(unittest.TestCase):
"""
Test RDKitDescriptors.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit.Chem import Descriptors
self.all_descriptors = Descriptors.descList
self.all_desc_count = len(self.all_descriptors)
self.smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
def test_rdkit_descriptors(self):
"""
Test simple descriptors.
"""
featurizer = RDKitDescriptors()
descriptors = featurizer.featurize(self.smiles)
assert descriptors.shape == (1, len(featurizer.reqd_properties))
exact_mol_wt_index = list(
featurizer.reqd_properties).index('ExactMolWt')
assert np.allclose(descriptors[0][exact_mol_wt_index], 180, atol=0.1)
def test_rdkit_descriptors_with_use_fragment(self):
"""
Test with use_fragment
"""
featurizer = RDKitDescriptors(use_fragment=False)
descriptors = featurizer(self.smiles)
assert descriptors.shape == (1, len(featurizer.reqd_properties))
assert len(featurizer.reqd_properties) < self.all_desc_count
exact_mol_wt_index = list(
featurizer.reqd_properties).index('ExactMolWt')
assert np.allclose(descriptors[0, exact_mol_wt_index], 180, atol=0.1)
def test_rdkit_descriptors_with_use_bcut2d_false(self):
"""
Test with use_bcut2d
"""
featurizer = RDKitDescriptors(use_bcut2d=False)
descriptors = featurizer(self.smiles)
assert descriptors.shape == (1, len(featurizer.reqd_properties))
assert len(featurizer.reqd_properties) < self.all_desc_count
with self.assertRaises(KeyError):
featurizer.reqd_properties['BCUT2D_MWHI']
exact_mol_wt_index = list(
featurizer.reqd_properties).index('ExactMolWt')
assert np.allclose(descriptors[0, exact_mol_wt_index], 180, atol=0.1)
def test_rdkit_descriptors_normalized(self):
"""
Test with normalization
"""
featurizer = RDKitDescriptors(is_normalized=True)
assert featurizer.normalized_desc != {}
descriptors = featurizer(self.smiles)
assert descriptors.shape == (1, len(featurizer.reqd_properties))
# no normalized feature value should be greater than 1.0
assert len(np.where(descriptors > 1.0)[0]) == 0
exact_mol_wt_index = sorted(
featurizer.reqd_properties).index('ExactMolWt')
assert np.allclose(descriptors[0, exact_mol_wt_index], 0.0098, atol=0.1)
def test_with_custom_descriptors(self):
# these are the properties used in grover
grover_props = [
'fr_Al_COO', 'fr_Al_OH', 'fr_Al_OH_noTert', 'fr_ArN', 'fr_Ar_COO',
'fr_Ar_N', 'fr_Ar_NH', 'fr_Ar_OH', 'fr_COO', 'fr_COO2', 'fr_C_O',
'fr_C_O_noCOO', 'fr_C_S', 'fr_HOCCN', 'fr_Imine', 'fr_NH0',
'fr_NH1', 'fr_NH2', 'fr_N_O', 'fr_Ndealkylation1',
'fr_Ndealkylation2', 'fr_Nhpyrrole', 'fr_SH', 'fr_aldehyde',
'fr_alkyl_carbamate', 'fr_alkyl_halide', 'fr_allylic_oxid',
'fr_amide', 'fr_amidine', 'fr_aniline', 'fr_aryl_methyl',
'fr_azide', 'fr_azo', 'fr_barbitur', 'fr_benzene',
'fr_benzodiazepine', 'fr_bicyclic', 'fr_diazo',
'fr_dihydropyridine', 'fr_epoxide', 'fr_ester', 'fr_ether',
'fr_furan', 'fr_guanido', 'fr_halogen', 'fr_hdrzine', 'fr_hdrzone',
'fr_imidazole', 'fr_imide', 'fr_isocyan', 'fr_isothiocyan',
'fr_ketone', 'fr_ketone_Topliss', 'fr_lactam', 'fr_lactone',
'fr_methoxy', 'fr_morpholine', 'fr_nitrile', 'fr_nitro',
'fr_nitro_arom', 'fr_nitro_arom_nonortho', 'fr_nitroso',
'fr_oxazole', 'fr_oxime', 'fr_para_hydroxylation', 'fr_phenol',
'fr_phenol_noOrthoHbond', 'fr_phos_acid', 'fr_phos_ester',
'fr_piperdine', 'fr_piperzine', 'fr_priamide', 'fr_prisulfonamd',
'fr_pyridine', 'fr_quatN', 'fr_sulfide', 'fr_sulfonamd',
'fr_sulfone', 'fr_term_acetylene', 'fr_tetrazole', 'fr_thiazole',
'fr_thiocyan', 'fr_thiophene', 'fr_unbrch_alkane', 'fr_urea'
]
smiles = 'CCC(=O)'
featurizer = RDKitDescriptors(descriptors=grover_props,
labels_only=True)
features = featurizer.featurize(smiles)[0]
assert len(features) == len(grover_props)
assert sum(
features) == 3 # expected number of functional groups in CCC(=O)
assert (np.where(features == 1)[0] == (10, 11, 23)).all()
def test_with_labels_only(self):
descriptors = ['fr_Al_COO', 'fr_Al_OH', 'fr_allylic_oxid']
smiles = 'CC(C)=CCCC(C)=CC(=O)'
featurizer = RDKitDescriptors(descriptors=descriptors, labels_only=True)
features = featurizer.featurize(smiles)[0]
assert len(features) == len(descriptors)
assert (features == [0, 0, 1]).all()
<file_sep>"""
Diabetic Retinopathy Images loader.
"""
from __future__ import division
from __future__ import unicode_literals
import os
import logging
import deepchem
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def load_images_DR(split='random', seed=None):
""" Loader for DR images """
data_dir = deepchem.utils.get_data_dir()
images_path = os.path.join(data_dir, 'DR', 'train')
label_path = os.path.join(data_dir, 'DR', 'trainLabels.csv')
if not os.path.exists(images_path) or not os.path.exists(label_path):
logger.warn("Cannot locate data, \n\
all images(.png) should be stored in the folder: $DEEPCHEM_DATA_DIR/DR/train/,\n\
corresponding label file should be stored as $DEEPCHEM_DATA_DIR/DR/trainLabels.csv.\n\
Please refer to https://www.kaggle.com/c/diabetic-retinopathy-detection for data access"
)
image_names = os.listdir(images_path)
raw_images = []
for im in image_names:
if im.endswith('.jpeg') and not im.startswith(
'cut_') and not 'cut_' + im in image_names:
raw_images.append(im)
if len(raw_images) > 0:
cut_raw_images(raw_images, images_path)
image_names = [
p for p in os.listdir(images_path)
if p.startswith('cut_') and p.endswith('.png')
]
all_labels = dict(zip(*np.transpose(np.array(pd.read_csv(label_path)))))
print("Number of images: %d" % len(image_names))
labels = np.array(
[all_labels[os.path.splitext(n)[0][4:]] for n in image_names]).reshape(
(-1, 1))
image_full_paths = [os.path.join(images_path, n) for n in image_names]
classes, cts = np.unique(list(all_labels.values()), return_counts=True)
weight_ratio = dict(zip(classes, np.max(cts) / cts.astype(float)))
weights = np.array([weight_ratio[l[0]] for l in labels]).reshape((-1, 1))
loader = deepchem.data.ImageLoader()
dat = loader.featurize(
image_full_paths, labels=labels, weights=weights)
if split == None:
return dat
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter()
}
if not seed is None:
np.random.seed(seed)
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dat)
all_dataset = (train, valid, test)
return all_dataset
def cut_raw_images(all_images, path):
"""Preprocess images:
(1) Crop the central square including retina
(2) Reduce resolution to 512 * 512
"""
print("Num of images to be processed: %d" % len(all_images))
try:
import cv2
except:
logger.warn("OpenCV required for image preprocessing")
return
for i, img_path in enumerate(all_images):
if i % 100 == 0:
print("on image %d" % i)
if os.path.join(path, 'cut_' + os.path.splitext(img_path)[0] + '.png'):
continue
img = cv2.imread(os.path.join(path, img_path))
edges = cv2.Canny(img, 10, 30)
coords = zip(*np.where(edges > 0))
n_p = len(coords)
coords.sort(key=lambda x: (x[0], x[1]))
center_0 = int(
(coords[int(0.01 * n_p)][0] + coords[int(0.99 * n_p)][0]) / 2)
coords.sort(key=lambda x: (x[1], x[0]))
center_1 = int(
(coords[int(0.01 * n_p)][1] + coords[int(0.99 * n_p)][1]) / 2)
edge_size = min(
[center_0, img.shape[0] - center_0, center_1, img.shape[1] - center_1])
img_cut = img[(center_0 - edge_size):(center_0 + edge_size), (
center_1 - edge_size):(center_1 + edge_size)]
img_cut = cv2.resize(img_cut, (512, 512))
cv2.imwrite(
os.path.join(path, 'cut_' + os.path.splitext(img_path)[0] + '.png'),
img_cut)
<file_sep>import os
import numpy as np
import deepchem as dc
seed = 123
np.random.seed(seed)
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, "refined_atomconv")
train_dir = os.path.join(base_dir, "train")
test_dir = os.path.join(base_dir, "test")
d = dc.data.DiskDataset(data_dir)
splitter = dc.splits.SingletaskStratifiedSplitter()
train_dataset, test_dataset = splitter.train_test_split(d, train_dir, test_dir)
<file_sep>Docker Tutorial
================
Docker is a software used for easy building, testing and deploying of software. Docker creates an isolated workspace called containers which can avoid dependency version clashes making development of software faster. Also, software can be modularized in different containers, which allows it to be tested without impacting other components or the host computer. Containers contain all the dependencies and the user need not worry about required packages
**This makes it easy for users to access older version of deepchem via docker and to develop with them.**
Docker works with the following layers:
- Images:
*Images are the instructions for creating docker containers. It specifies all the packages and their version to be installed fo the application to run. Images for deep chem can found at docker Hub.*
- Containers:
*Containers are live instances of Images and are lightweight isolated work-spaces(it does not put much workload on your PC), where you can run and devlop on previous deepchem versions.*
- Docker engine:
*It is the main application that manages, runs and build containers and images. It also provides a means to interact with the docker container after its built and when it is run.*
- Registries:
*It is a hub or place where docker images can be found. For deepchem, the default registry is the Docker Hub.*
**For docker installation, visit:** https://docs.docker.com/engine/install/
Using deepchem with docker:
---------------------------
To work with deepchem in docker, we first have to pull deepchem images from docker hub. It can be done in the following way.
if latest deepchem version is needed, then:-
.. code-block:: bash
#if latest:
docker pull deepchemio/deepchem:latest
Else if one wants to work with older version, then the following method should be used:-
.. code-block:: bash
docker pull deepchemio/deepchem:x.x.x
#x.x.x refers to the version number
Now, wait for some time until the image gets downloaded. Then we have to create a container using the image.
Then, you have to create a container and use it.
.. code-block:: bash
docker run --rm -it deepchemio/deepchem:x.x.x
#x.x.x refers to the version number
#replace "x.x.x" with "latest" if latest version is used
If you want GPU support:
.. code-block:: bash
# If nvidia-docker is installed
nvidia-docker run --rm -it deepchemio/deepchem:latest
docker run --runtime nvidia --rm -it deepchemio/deepchem:latest
# If nvidia-container-toolkit is installed
docker run --gpus all --rm -it deepchemio/deepchem:latest
Now, you have successfully entered the container's bash where you can execute your programs.
**To exit the container press "Ctrl+D".** This stops the container and opens host computer's bash.
To view all the containers present, open up a new terminal/bash of the host computer, then:-
.. code-block:: bash
docker ps -a
This gives a containers list like this:
.. code-block:: bash
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
Thus you can see all the created container's Names and its details.
*Now you can develop code in you host computer(development environment) and test it in a container having specific version of the deepchem(testing environment).*
To test the program you have written, you should copy the program to the container. Open a new host computer's terminal:
.. code-block:: bash
docker cp host-file-path <container-id>:path-in-container
#container ID should be check in a separate terminal
Similarly if you want to copy files out from a container, then open a new host computer's terminal:
.. code-block:: bash
docker cp <container-id>:path-in-container host-file-path
#container ID should be check in a separate terminal
Hands-on tutorial
-----------------
Lets create a simple deepchem script and work it out in the docker container of deepchem 2.4.0.
Let the script be named deepchem.py in the host computer's location: /home/
*deepchem.py contents:*
.. code-block:: bash
import deepchem as dc
print(dc.__version__)
*Step 1:* pull deepchem 2.4.0 image and wait for it to be dowloaded
.. code-block:: bash
$docker pull deepchemio/deepchem:2.4.0
*Step 2:* Create a container
.. code-block:: bash
$docker run --rm -it deepchemio/deepchem:2.4.0
(deepchem) root@51b1d2665016:~/mydir#
*Step 3:* Open a new terminal/bash and copy deep.py
.. code-block:: bash
$docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
51b1d2665016 deepchemio/deepchem:2.4.0 "/bin/bash" 5 seconds ago Up 4 seconds friendly_lehmann
$docker cp /home/deepchem.py 51b1d2665016:/root/mydir/deepchemp.py
*step 4:* return back to the previous terminal in which container is runing
.. code-block:: bash
(deepchem) root@51b1d2665016:~/mydir#python3 deepchem.py>>output.txt
2022-01-12 15:33:27.967170: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
This should have created a output file in the container having the deepchem version number. The you should copy it back to host container.
*step 5:* In a new terminal execute the following commands.
.. code-block:: bash
$docker cp 51b1d2665016:/root/mydir/output.txt ~/output.txt
$cat ~/output.txt
2.4.0
Thus you have successfully executed the program in deepchem 2.4.0!!!
<file_sep>from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.feat import MolGraphConvFeaturizer
from rdkit import Chem
import unittest
import numpy as np
class DummyTestClass(MolecularFeaturizer):
"""
Dummy test class derived from MolecularFeaturizer where `use_original_atoms_order` parameter is not initialised
"""
def __init__(self):
pass
def _featurize(self, datapoint, **kwargs):
"""
Returns mapping of atomic number and atom ranks as feature vector (only for testing purposes)
"""
if isinstance(datapoint, Chem.rdchem.Mol):
atoms_order = []
for atom in datapoint.GetAtoms():
atoms_order.append((atom.GetAtomicNum(), atom.GetIdx()))
return atoms_order
class DummyTestClass2(MolecularFeaturizer):
"""
Dummy test class derived from MolecularFeaturizer where `use_original_atoms_order` parameter is initialised
"""
def __init__(self, use_original_atoms_order=False):
self.use_original_atoms_order = use_original_atoms_order
def _featurize(self, datapoint, **kwargs):
"""
Returns mapping of atomic number and atom ranks as feature vector (only for testing purposes)
"""
if isinstance(datapoint, Chem.rdchem.Mol):
atoms_order = []
for atom in datapoint.GetAtoms():
atoms_order.append((atom.GetAtomicNum(), atom.GetIdx()))
return atoms_order
def get_edge_index(mol):
# construct edge (bond) index
src, dest = [], []
for bond in mol.GetBonds():
# add edge list considering a directed graph
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
src += [start, end]
dest += [end, start]
return np.asarray([src, dest], dtype=int)
class TestUpdatedMolecularFeaturizer(unittest.TestCase):
"""
Test for `use_original_atoms_order` boolean condition added to `MolecularFeaturizer` base class
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
self.smile = "C1=CC=CN=C1"
mol = Chem.MolFromSmiles(self.smile)
self.original_atoms_order = []
for atom in mol.GetAtoms():
self.original_atoms_order.append(
(atom.GetAtomicNum(), atom.GetIdx()
)) # mapping of atomic number and original atom ordering
new_order = rdmolfiles.CanonicalRankAtoms(mol)
canonical_mol = rdmolops.RenumberAtoms(mol, new_order)
self.canonical_atoms_order = []
for atom in canonical_mol.GetAtoms():
self.canonical_atoms_order.append(
(atom.GetAtomicNum(), atom.GetIdx()
)) # mapping of atomic number and canonical atom ordering
self.bond_index = get_edge_index(
mol) # bond index based on original atom order
self.canonical_bond_index = get_edge_index(
canonical_mol) # bond index based on canonical atom order
def test_without_init(self):
"""
Test without use_original_atoms_order being initialised
"""
featurizer = DummyTestClass()
datapoint_atoms_order = featurizer.featurize(
self.smile) # should be canonical mapping
assert (datapoint_atoms_order == np.asarray(
[self.canonical_atoms_order])).all()
def test_with_canonical_order(self):
"""
Test with use_original_atoms_order = False
"""
featurizer = DummyTestClass2(use_original_atoms_order=False)
datapoint_atoms_order = featurizer.featurize(
self.smile) # should be canonical mapping
assert (datapoint_atoms_order == np.asarray(
[self.canonical_atoms_order])).all()
def test_with_original_order(self):
"""
Test with use_original_atoms_order = True
"""
featurizer = DummyTestClass2(use_original_atoms_order=True)
datapoint_atoms_order = featurizer.featurize(
self.smile) # should be canonical mapping
assert (datapoint_atoms_order == np.asarray([self.original_atoms_order
])).all()
def test_on_derived_featurizers(self):
"""
Test for atom order in featurizer classes derived from 'MolecularFeaturizer' base class
"""
# test for 'MolGraphConvFeaturizer' class
featurizer = MolGraphConvFeaturizer()
graph_feat = featurizer.featurize(self.smile)
# for "C1=CC=CN=C1" original bond index is not equal to canonical bond index
assert not (self.bond_index == self.canonical_bond_index).all()
assert (graph_feat[0].edge_index == self.canonical_bond_index).all()
<file_sep>import numpy as np
import torch
from deepchem.data.datasets import NumpyDataset, DiskDataset, ImageDataset
from typing import Optional
class _TorchNumpyDataset(torch.utils.data.IterableDataset): # type: ignore
def __init__(self,
numpy_dataset: NumpyDataset,
epochs: int,
deterministic: bool,
batch_size: Optional[int] = None):
"""
Parameters
----------
numpy_dataset: NumpyDataset
The original NumpyDataset which you want to convert to PyTorch Dataset
epochs: int
the number of times to iterate over the Dataset
deterministic: bool
if True, the data is produced in order. If False, a different random
permutation of the data is used for each epoch.
batch_size: int
the number of samples to return in each batch. If None, each returned
value is a single sample.
"""
self.numpy_dataset = numpy_dataset
self.epochs = epochs
self.deterministic = deterministic
self.batch_size = batch_size
def __iter__(self):
n_samples = self.numpy_dataset._X.shape[0]
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
first_sample = 0
last_sample = n_samples
else:
first_sample = worker_info.id * n_samples // worker_info.num_workers
last_sample = (worker_info.id +
1) * n_samples // worker_info.num_workers
for epoch in range(self.epochs):
if self.deterministic:
order = first_sample + np.arange(last_sample - first_sample)
else:
# Ensure that every worker will pick the same random order for each epoch.
random = np.random.RandomState(epoch)
order = random.permutation(n_samples)[first_sample:last_sample]
if self.batch_size is None:
for i in order:
yield (self.numpy_dataset._X[i], self.numpy_dataset._y[i],
self.numpy_dataset._w[i], self.numpy_dataset._ids[i])
else:
for i in range(0, len(order), self.batch_size):
indices = order[i:i + self.batch_size]
yield (self.numpy_dataset._X[indices],
self.numpy_dataset._y[indices],
self.numpy_dataset._w[indices],
self.numpy_dataset._ids[indices])
class _TorchDiskDataset(torch.utils.data.IterableDataset): # type: ignore
def __init__(self,
disk_dataset: DiskDataset,
epochs: int,
deterministic: bool,
batch_size: Optional[int] = None):
"""
Parameters
----------
disk_dataset: DiskDataset
The original DiskDataset which you want to convert to PyTorch Dataset
epochs: int
the number of times to iterate over the Dataset
deterministic: bool
if True, the data is produced in order. If False, a different random
permutation of the data is used for each epoch.
batch_size: int
the number of samples to return in each batch. If None, each returned
value is a single sample.
"""
self.disk_dataset = disk_dataset
self.epochs = epochs
self.deterministic = deterministic
self.batch_size = batch_size
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
n_shards = self.disk_dataset.get_number_shards()
if worker_info is None:
first_shard = 0
last_shard = n_shards
else:
first_shard = worker_info.id * n_shards // worker_info.num_workers
last_shard = (worker_info.id +
1) * n_shards // worker_info.num_workers
if first_shard == last_shard:
return
shard_indices = list(range(first_shard, last_shard))
for X, y, w, ids in self.disk_dataset._iterbatches_from_shards(
shard_indices,
batch_size=self.batch_size,
epochs=self.epochs,
deterministic=self.deterministic):
if self.batch_size is None:
for i in range(X.shape[0]):
yield (X[i], y[i], w[i], ids[i])
else:
yield (X, y, w, ids)
class _TorchImageDataset(torch.utils.data.IterableDataset): # type: ignore
def __init__(self,
image_dataset: ImageDataset,
epochs: int,
deterministic: bool,
batch_size: Optional[int] = None):
"""
Parameters
----------
image_dataset: ImageDataset
The original ImageDataset which you want to convert to PyTorch Dataset
epochs: int
the number of times to iterate over the Dataset
deterministic: bool
if True, the data is produced in order. If False, a different random
permutation of the data is used for each epoch.
batch_size: int
the number of samples to return in each batch. If None, each returned
value is a single sample.
"""
self.image_dataset = image_dataset
self.epochs = epochs
self.deterministic = deterministic
self.batch_size = batch_size
def __iter__(self):
n_samples = self.image_dataset._X_shape[0]
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
first_sample = 0
last_sample = n_samples
else:
first_sample = worker_info.id * n_samples // worker_info.num_workers
last_sample = (worker_info.id +
1) * n_samples // worker_info.num_workers
for epoch in range(self.epochs):
if self.deterministic:
order = first_sample + np.arange(last_sample - first_sample)
else:
# Ensure that every worker will pick the same random order for each epoch.
random = np.random.RandomState(epoch)
order = random.permutation(n_samples)[first_sample:last_sample]
if self.batch_size is None:
for i in order:
yield (self.image_dataset._get_image(
self.image_dataset._X, i),
self.image_dataset._get_image(
self.image_dataset._y, i),
self.image_dataset._w[i], self.image_dataset._ids[i])
else:
for i in range(0, len(order), self.batch_size):
indices = order[i:i + self.batch_size]
yield (self.image_dataset._get_image(
self.image_dataset._X, indices),
self.image_dataset._get_image(
self.image_dataset._y,
indices), self.image_dataset._w[indices],
self.image_dataset._ids[indices])
<file_sep># flake8: noqa
from deepchem.hyper.base_classes import HyperparamOpt
from deepchem.hyper.grid_search import GridHyperparamOpt
from deepchem.hyper.gaussian_process import GaussianProcessHyperparamOpt
from deepchem.hyper.random_search import RandomHyperparamOpt
<file_sep>"""
freesolv dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
FREESOLV_URL = 'https://deepchemdata.s3.us-west-1.amazonaws.com/datasets/freesolv.csv.gz'
FREESOLV_TASKS = ['y']
class _FreesolvLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, 'freesolv.csv.gz')
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=FREESOLV_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field='smiles',
featurizer=self.featurizer)
return loader.create_dataset(dataset_file)
def load_freesolv(
featurizer: Union[dc.feat.Featurizer, str] = dc.feat.MATFeaturizer(),
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load Freesolv dataset
The FreeSolv dataset is a collection of experimental and calculated hydration
free energies for small molecules in water, along with their experiemental values.
Here, we are using a modified version of the dataset with the molecule smile string
and the corresponding experimental hydration free energies.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "mol" - SMILES representation of the molecular structure
- "y" - Experimental hydration free energy
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] <NAME>, et al. "Molecule Attention Transformer." NeurIPS 2019
arXiv:2002.08264v1 [cs.LG].
.. [2] <NAME>, <NAME>. FreeSolv:
a database of experimental and calculated hydration free energies, with input files.
J Comput Aided Mol Des. 2014;28(7):711-720. doi:10.1007/s10822-014-9747-x
"""
loader = _FreesolvLoader(featurizer, splitter, transformers, FREESOLV_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('freesolv', reload)
<file_sep>"""
Test for `_MapperDMPNN` helper class for DMPNN model
"""
from deepchem.feat import DMPNNFeaturizer
import numpy as np
import pytest
try:
from deepchem.models.torch_models.dmpnn import _MapperDMPNN
has_torch = True
except:
has_torch = False
# Set up tests.
smiles_list = ["C", "CC", "CCC", "C1=CC=CC=C1", "[I-].[K+]"]
featurizer = DMPNNFeaturizer(use_original_atom_ranks=True,
features_generators=['morgan'])
graphs = featurizer.featurize(smiles_list)
benezene_atom_to_incoming_bonds: np.ndarray = np.asarray([[1, 10], [0,
3], [2, 5],
[4, 7], [6, 9],
[8, 11]])
benezene_mapping: np.ndarray = np.asarray([[-1, 10], [-1, 3], [0, -1], [-1, 5],
[2, -1], [-1, 7], [4, -1], [-1, 9],
[6, -1], [-1, 11], [8, -1], [1, -1],
[-1, -1]])
@pytest.mark.torch
def test_mapper_general_attributes():
"""
General tests for the mapper class
"""
for graph in graphs:
mapper = _MapperDMPNN(graph)
assert (mapper.atom_features == graph.node_features).all()
assert (mapper.bond_features == graph.edge_features).all()
assert (mapper.bond_index == graph.edge_index).all()
assert (mapper.global_features == graph.global_features).all()
concat_feature_dim = graph.num_node_features + graph.num_edge_features
assert mapper.f_ini_atoms_bonds.shape == (graph.num_edges + 1,
concat_feature_dim)
assert (
mapper.f_ini_atoms_bonds[-1] == np.zeros(concat_feature_dim)).all()
assert len(mapper.f_ini_atoms_bonds) == len(mapper.mapping)
assert (mapper.values[0] == mapper.atom_features).all()
assert (mapper.values[1] == mapper.f_ini_atoms_bonds).all()
assert (mapper.values[2] == mapper.atom_to_incoming_bonds).all()
assert (mapper.values[3] == mapper.mapping).all()
assert (mapper.values[4] == mapper.global_features).all()
@pytest.mark.torch
def test_mapper_no_bond():
"""
Test 'C' in _MapperDMPNN (no bond present)
"""
mapper = _MapperDMPNN(graphs[0])
assert (mapper.bond_to_ini_atom == np.empty(0)).all()
assert (mapper.atom_to_incoming_bonds == np.asarray([[-1]])).all()
assert (mapper.mapping == np.asarray([[-1]])).all()
@pytest.mark.torch
def test_mapper_two_directed_bonds_btw_two_atoms():
"""
Test 'CC' in _MapperDMPNN (1 bond present (2 directed))
"""
mapper = _MapperDMPNN(graphs[1])
assert (mapper.bond_to_ini_atom == np.asarray([0, 1])).all()
assert (mapper.atom_to_incoming_bonds == np.asarray([[1], [0]])).all()
assert (mapper.mapping == np.asarray([[-1], [-1], [-1]])).all()
@pytest.mark.torch
def test_mapper_two_adjacent_bonds():
"""
Test 'CCC' in _MapperDMPNN (2 adjacent bonds present (4 directed))
"""
mapper = _MapperDMPNN(graphs[2])
assert (mapper.bond_to_ini_atom == np.asarray([0, 1, 1, 2])).all()
print(mapper.atom_to_incoming_bonds)
assert (mapper.atom_to_incoming_bonds == np.asarray([[1, -1], [0, 3],
[2, -1]])).all()
assert (mapper.mapping == np.asarray([[-1, -1], [-1, 3], [0, -1], [-1, -1],
[-1, -1]])).all()
@pytest.mark.torch
def test_mapper_ring():
"""
Test 'C1=CC=CC=C1' in _MapperDMPNN (benezene ring)
"""
mapper = _MapperDMPNN(graphs[3])
assert (mapper.bond_to_ini_atom == np.asarray(
[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 0])).all()
assert (
mapper.atom_to_incoming_bonds == benezene_atom_to_incoming_bonds).all()
assert (mapper.mapping == benezene_mapping).all()
@pytest.mark.torch
def test_mapper_disconnected_compounds():
"""
Test '[I-].[K+]' in _MapperDMPNN (disconnected compounds)
"""
mapper = _MapperDMPNN(graphs[4])
assert (mapper.bond_to_ini_atom == np.empty(0)).all()
assert (mapper.atom_to_incoming_bonds == np.asarray([[-1], [-1]])).all()
assert (mapper.mapping == np.asarray([[-1]])).all()
<file_sep>pandas
scikit-learn
# Make sure that sphinx, sphinx_rtd_theme versions are pinned, otherwise
# readthedocs uses its default version of sphinx (1.8.5) which may lead to
# build fails.
sphinx>=4.0,<5.0
sphinx_rtd_theme>=1.0
tensorflow
transformers>=4.6.*
torch==1.13.1 --extra-index-url https://download.pytorch.org/whl/cpu
pytorch-lightning==1.6.5
jax
dm-haiku
optax
rdkit
torch_geometric
git+https://github.com/diffqc/dqc.git
PyYAML
yamlloader
dgl
sphinx-copybutton
<file_sep>import pytest
import tempfile
from os import path
import numpy as np
from deepchem.utils import load_dataset_from_disk, download_url, untargz_file
from deepchem.metrics import Metric, mae_score
try:
import dgl # noqa: F401
import torch # noqa: F401
from deepchem.models import LCNNModel
has_pytorch_and_dgl = True
except:
has_pytorch_and_dgl = False
URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/lcnn_data_feature.tar.gz"
@pytest.mark.torch
def test_lcnn_regression():
current_dir = tempfile.mkdtemp()
download_url(url=URL, dest_dir=current_dir)
untargz_file(path.join(current_dir, 'lcnn_data_feature.tar.gz'),
current_dir)
_, datasets, transformers = load_dataset_from_disk(
path.join(current_dir, 'lcnn_data'))
train, valid, test = datasets
model = LCNNModel(mode='regression', batch_size=8, learning_rate=0.001)
model.fit(train, nb_epoch=10)
# check predict shape
valid_preds = model.predict_on_batch(valid.X)
assert valid_preds.shape == (65, 1)
test_preds = model.predict(test)
assert test_preds.shape == (65, 1)
# check overfit
regression_metric = Metric(mae_score)
scores = model.evaluate(test, [regression_metric], transformers)
assert scores[regression_metric.name] < 0.6
@pytest.mark.torch
def test_lcnn_reload():
# needs change
current_dir = tempfile.mkdtemp()
download_url(url=URL, dest_dir=current_dir)
untargz_file(path.join(current_dir, 'lcnn_data_feature.tar.gz'),
current_dir)
tasks, datasets, transformers = load_dataset_from_disk(
path.join(current_dir, 'lcnn_data'))
train, valid, test = datasets
model_dir = tempfile.mkdtemp()
model = LCNNModel(mode='regression',
batch_size=8,
learning_rate=0.001,
model_dir=model_dir)
model.fit(train, nb_epoch=10)
# check predict shape
valid_preds = model.predict_on_batch(valid.X)
assert valid_preds.shape == (65, 1)
test_preds = model.predict(test)
assert test_preds.shape == (65, 1)
# check overfit
regression_metric = Metric(mae_score)
scores = model.evaluate(test, [regression_metric], transformers)
assert scores[regression_metric.name] < 0.6
# reload
reloaded_model = LCNNModel(mode='regression',
batch_size=8,
learning_rate=0.001,
model_dir=model_dir)
reloaded_model.restore()
original_pred = model.predict(test)
reload_pred = reloaded_model.predict(test)
assert np.all(np.abs(original_pred - reload_pred) < 0.0000001)
<file_sep>Known Issues & Limitations
--------------------------
Broken features
^^^^^^^^^^^^^^^
A small number of Deepchem features are known to be broken. The Deepchem team
will either fix or deprecate these broken features. It is impossible to
know of every possible bug in a large project like Deepchem, but we hope to
save you some headache by listing features that we know are partially or completely
broken.
*Note: This list is likely to be non-exhaustive. If we missed something,
please let us know [here](https://github.com/deepchem/deepchem/issues/2376).*
+--------------------------------+-------------------+---------------------------------------------------+
| Feature | Deepchem response | Tracker and notes |
| | | |
+================================+===================+===================================================+
| ANIFeaturizer/ANIModel | Low Priority | The Deepchem team recommends using TorchANI |
| | Likely deprecate | instead. |
| | | |
+--------------------------------+-------------------+---------------------------------------------------+
Experimental features
^^^^^^^^^^^^^^^^^^^^^
Deepchem features usually undergo rigorous code review and testing to ensure that
they are ready for production environments. The following Deepchem features have not
been thoroughly tested to the level of other Deepchem modules, and could be
potentially problematic in production environments.
*Note: This list is likely to be non-exhaustive. If we missed something,
please let us know [here](https://github.com/deepchem/deepchem/issues/2376).*
+--------------------------------+---------------------------------------------------+
| Feature | Tracker and notes |
| | |
+================================+===================================================+
| Mol2 Loading | Needs more testing. |
| | |
| | |
+--------------------------------+---------------------------------------------------+
| Interaction Fingerprints | Needs more testing. |
| | |
| | |
+--------------------------------+---------------------------------------------------+
If you would like to help us address these known issues, please consider contributing to Deepchem!
<file_sep>"""
Script that trains Sklearn singletask models on GDB7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from sklearn.kernel_ridge import KernelRidge
tasks, datasets, transformers = dc.molnet.load_qm7(
featurizer='CoulombMatrix', split='stratified', move_mean=False)
train, valid, test = datasets
regression_metric = dc.metrics.Metric(
dc.metrics.mean_absolute_error, mode="regression")
def model_builder(model_dir):
sklearn_model = KernelRidge(kernel="rbf", alpha=5e-4, gamma=0.008)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(train)
model.save()
train_evaluator = dc.utils.evaluate.Evaluator(model, train, transformers)
train_scores = train_evaluator.compute_model_performance([regression_metric])
print("Train scores [kcal/mol]")
print(train_scores)
test_evaluator = dc.utils.evaluate.Evaluator(model, test, transformers)
test_scores = test_evaluator.compute_model_performance([regression_metric])
print("Validation scores [kcal/mol]")
print(test_scores)
<file_sep>"""
DGL-based MPNN for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class MPNN(nn.Module):
"""Model for Graph Property Prediction.
This model proceeds as follows:
* Combine latest node representations and edge features in updating node representations,
which involves multiple rounds of message passing
* For each graph, compute its representation by combining the representations
of all nodes in it, which involves a Set2Set layer.
* Perform the final prediction using an MLP
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models.torch_models import MPNN
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph(self_loop=True) for i in range(len(graphs))]
>>> # Batch two graphs into a graph of two connected components
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = MPNN(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"Neural Message Passing for Quantum Chemistry." ICML 2017.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
node_out_feats: int = 64,
edge_hidden_feats: int = 128,
num_step_message_passing: int = 3,
num_step_set2set: int = 6,
num_layer_set2set: int = 3,
mode: str = 'regression',
number_atom_features: int = 30,
number_bond_features: int = 11,
n_classes: int = 2,
nfeat_name: str = 'x',
efeat_name: str = 'edge_attr'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
node_out_feats: int
The length of the final node representation vectors. Default to 64.
edge_hidden_feats: int
The length of the hidden edge representation vectors. Default to 128.
num_step_message_passing: int
The number of rounds of message passing. Default to 3.
num_step_set2set: int
The number of set2set steps. Default to 6.
num_layer_set2set: int
The number of set2set layers. Default to 3.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
number_bond_features: int
The length of the initial bond feature vectors. Default to 11.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
efeat_name: str
For an input graph ``g``, the model assumes that it stores edge features in
``g.edata[efeat_name]`` and will retrieve input edge features from that.
Default to 'edge_attr'.
"""
try:
import dgl # noqa: F401
except:
raise ImportError('This class requires dgl.')
try:
import dgllife # noqa: F401
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
super(MPNN, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
self.efeat_name = efeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import MPNNPredictor as DGLMPNNPredictor
self.model = DGLMPNNPredictor(
node_in_feats=number_atom_features,
edge_in_feats=number_bond_features,
node_out_feats=node_out_feats,
edge_hidden_feats=edge_hidden_feats,
n_tasks=out_size,
num_step_message_passing=num_step_message_passing,
num_step_set2set=num_step_set2set,
num_layer_set2set=num_layer_set2set)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]`` and edge features in
``dgl_graph.edata[self.efeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be
``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1;
its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
edge_feats = g.edata[self.efeat_name]
out = self.model(g, node_feats, edge_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class MPNNModel(TorchModel):
"""Model for graph property prediction
This model proceeds as follows:
* Combine latest node representations and edge features in updating node representations,
which involves multiple rounds of message passing
* For each graph, compute its representation by combining the representations
of all nodes in it, which involves a Set2Set layer.
* Perform the final prediction using an MLP
Examples
--------
>>> import deepchem as dc
>>> from deepchem.models.torch_models import MPNNModel
>>> # preparing dataset
>>> smiles = ["C1CCC1", "CCC"]
>>> labels = [0., 1.]
>>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
>>> X = featurizer.featurize(smiles)
>>> dataset = dc.data.NumpyDataset(X=X, y=labels)
>>> # training model
>>> model = MPNNModel(mode='classification', n_tasks=1,
... batch_size=16, learning_rate=0.001)
>>> loss = model.fit(dataset, nb_epoch=5)
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"Neural Message Passing for Quantum Chemistry." ICML 2017.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
The featurizer used with MPNNModel must produce a GraphData object which should have both 'edge' and 'node' features.
"""
def __init__(self,
n_tasks: int,
node_out_feats: int = 64,
edge_hidden_feats: int = 128,
num_step_message_passing: int = 3,
num_step_set2set: int = 6,
num_layer_set2set: int = 3,
mode: str = 'regression',
number_atom_features: int = 30,
number_bond_features: int = 11,
n_classes: int = 2,
self_loop: bool = False,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
node_out_feats: int
The length of the final node representation vectors. Default to 64.
edge_hidden_feats: int
The length of the hidden edge representation vectors. Default to 128.
num_step_message_passing: int
The number of rounds of message passing. Default to 3.
num_step_set2set: int
The number of set2set steps. Default to 6.
num_layer_set2set: int
The number of set2set layers. Default to 3.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
number_bond_features: int
The length of the initial bond feature vectors. Default to 11.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
self_loop: bool
Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
Generally, an MPNNModel does not require self loops. Default to False.
kwargs
This can include any keyword argument of TorchModel.
"""
model = MPNN(n_tasks=n_tasks,
node_out_feats=node_out_feats,
edge_hidden_feats=edge_hidden_feats,
num_step_message_passing=num_step_message_passing,
num_step_set2set=num_step_set2set,
num_layer_set2set=num_layer_set2set,
mode=mode,
number_atom_features=number_atom_features,
number_bond_features=number_bond_features,
n_classes=n_classes)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(MPNNModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
self._self_loop = self_loop
def _prepare_batch(self, batch):
"""Create batch data for MPNN.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [
graph.to_dgl_graph(self_loop=self._self_loop) for graph in inputs[0]
]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(MPNNModel, self)._prepare_batch(
([], labels, weights))
return inputs, labels, weights
<file_sep>import os
import numpy as np
import deepchem as dc
import pytest
@pytest.mark.pytorch
def test_AtomEncoder():
import torch
from deepchem.feat.molecule_featurizers.conformer_featurizer import (
full_atom_feature_dims,)
from deepchem.models.torch_models.pna_gnn import AtomEncoder
atom_encoder = AtomEncoder(emb_dim=32)
num_samples = 10
# Create input tensor with values within full_atom_feature_dims
graph_features = torch.stack([
torch.randint(low=0, high=dim, size=(num_samples,))
for dim in full_atom_feature_dims
],
dim=1)
atom_embeddings = atom_encoder(graph_features)
assert atom_embeddings.shape == (num_samples, 32)
@pytest.mark.pytorch
def test_BondEncoder():
import torch
from deepchem.feat.molecule_featurizers.conformer_featurizer import (
full_bond_feature_dims,)
from deepchem.models.torch_models.pna_gnn import BondEncoder
bond_encoder = BondEncoder(emb_dim=32)
num_samples = 10
# Create input tensor with values within full_bond_feature_dims
graph_features = torch.stack([
torch.randint(low=0, high=dim, size=(num_samples,))
for dim in full_bond_feature_dims
],
dim=1)
bond_embeddings = bond_encoder(graph_features)
assert bond_embeddings.shape == (num_samples, 32)
@pytest.mark.pytorch
def test_pnalayer():
import dgl
import numpy as np
import torch
from deepchem.models.torch_models.pna_gnn import PNALayer
in_dim = 32
out_dim = 64
in_dim_edges = 16
aggregators = ["mean", "max"]
scalers = ["identity", "amplification", "attenuation"]
pna_layer = PNALayer(in_dim=in_dim,
out_dim=out_dim,
in_dim_edges=in_dim_edges,
aggregators=aggregators,
scalers=scalers)
num_nodes = 10
num_edges = 20
node_features = torch.randn(num_nodes, in_dim)
edge_features = torch.randn(num_edges, in_dim_edges)
g = dgl.graph((np.random.randint(0, num_nodes, num_edges),
np.random.randint(0, num_nodes, num_edges)))
g.ndata['feat'] = node_features
g.edata['feat'] = edge_features
g.ndata['feat'] = pna_layer(g)
assert g.ndata['feat'].shape == (num_nodes, out_dim)
def get_regression_dataset():
from deepchem.feat.molecule_featurizers.conformer_featurizer import (
RDKitConformerFeaturizer,)
np.random.seed(123)
featurizer = RDKitConformerFeaturizer(num_conformers=2)
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/example_regression.csv')
loader = dc.data.CSVLoader(tasks=["outcome"],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
return dataset, metric
@pytest.mark.pytorch
def test_PNAGNN():
import numpy as np
from deepchem.feat.graph_data import BatchGraphData
from deepchem.models.torch_models.pna_gnn import PNAGNN
data, _ = get_regression_dataset()
features = BatchGraphData(np.concatenate(data.X).ravel())
features = features.to_dgl_graph()
model = PNAGNN(hidden_dim=16,
aggregators=['mean', 'sum'],
scalers=['identity'])
output = model(features)
assert output.ndata['feat'].shape == (features.ndata['x'].shape[0], 16)
def test_PNA():
from deepchem.feat.graph_data import BatchGraphData
from deepchem.models.torch_models.pna_gnn import PNA
data, _ = get_regression_dataset()
features = BatchGraphData(np.concatenate(data.X).ravel())
features = features.to_dgl_graph()
target_dim = 1
model = PNA(hidden_dim=16, target_dim=target_dim)
output = model(features)
assert output.shape[1] == target_dim
<file_sep># Pretraining Example
In this example we will walk you through the use of pretraining
to transfer learned weights from a trained model to a new model.
The code for transfering pretrained weights for a
fully-connected network is in `fnet_pretraining.py`. To run this
example, execute the following command in your shell
```
python fcnet_pretraining.py
```
<file_sep>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 15:53:27 2016
@author: <NAME>
Benchmark test:
Giving classification performances of:
Random forest(rf), MultitaskDNN(tf),
RobustMultitaskDNN(tf_robust),
Logistic regression(logreg), IRV(irv)
Graph convolution(graphconv), xgboost(xgb),
Directed acyclic graph(dag), Weave(weave)
on datasets: bace_c, bbbp, clintox, hiv, muv, pcba, sider, tox21, toxcast
Giving regression performances of:
MultitaskDNN(tf_regression),
Fit Transformer MultitaskDNN(tf_regression_ft),
Random forest(rf_regression),
Graph convolution regression(graphconvreg),
xgboost(xgb_regression), Deep tensor neural net(dtnn),
Directed acyclic graph(dag_regression),
Weave(weave_regression)
on datasets: bace_r, chembl, clearance, delaney(ESOL), hopv, kaggle, lipo,
nci, pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl(FreeSolv)
Hyperparameters and all benchmark scripts for MoleculeNet are available at:
http://deepchem.io.s3-website-us-west-1.amazonaws.com/trained_models/Hyperparameter_MoleculeNetv3.tar.gz
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import deepchem as dc
import argparse
parser = argparse.ArgumentParser(
description='Deepchem benchmark: ' +
'giving performances of different learning models on datasets')
parser.add_argument(
'-s',
action='append',
dest='splitter_args',
default=[],
help='Choice of splitting function: index, random, scaffold, stratified')
parser.add_argument(
'-m',
action='append',
dest='model_args',
default=[],
help='Choice of model: tf, tf_robust, logreg, rf, irv, graphconv, xgb,' + \
' dag, weave, tf_regression, tf_regression_ft, rf_regression, ' + \
'graphconvreg, xgb_regression, dtnn, dag_regression, weave_regression')
parser.add_argument(
'-d',
action='append',
dest='dataset_args',
default=[],
help='Choice of dataset: bace_c, bace_r, bbbp, chembl, clearance, ' +
'clintox, delaney, hiv, hopv, kaggle, lipo, muv, nci, pcba, pcba_146, pcba_2475 '
+ 'pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl, sider, tox21, toxcast')
parser.add_argument(
'-t',
action='store_true',
dest='test',
default=False,
help='Evalute performance on test set')
parser.add_argument(
'--seed',
action='append',
dest='seed_args',
default=[],
help='Choice of random seed')
args = parser.parse_args()
#Datasets and models used in the benchmark test
splitters = args.splitter_args
models = args.model_args
datasets = args.dataset_args
test = args.test
if len(args.seed_args) > 0:
seed = int(args.seed_args[0])
else:
seed = 123
if len(splitters) == 0:
splitters = ['index', 'random', 'scaffold']
if len(models) == 0:
models = [
'tf', 'tf_robust', 'logreg', 'graphconv', 'irv', 'tf_regression',
'tf_regression_ft', 'graphconvreg', 'weave', 'weave_regression', 'dtnn'
]
#irv, rf, rf_regression should be assigned manually
if len(datasets) == 0:
datasets = [
'bace_c', 'bace_r', 'bbbp', 'clearance', 'clintox', 'delaney', 'hiv',
'hopv', 'lipo', 'muv', 'pdbbind', 'ppb', 'qm7b', 'qm8', 'qm9', 'sampl',
'sider', 'tox21', 'toxcast'
]
for dataset in datasets:
for split in splitters:
for model in models:
np.random.seed(seed)
dc.molnet.run_benchmark(
[dataset], str(model), split=split, test=test, seed=seed)
<file_sep>"""
Implementation of KFAC, a second order optimizer, in PyTorch.
"""
import math
from typing import Optional, Callable, Dict, Tuple, List, Union
try:
import torch
import torch.optim as optim
has_pytorch = True
except ModuleNotFoundError:
has_pytorch = False
class KFACOptimizer(optim.Optimizer):
""""
This class implement the second order optimizer - KFAC, which uses Kronecker factor products of inputs and the gradients to
get the approximate inverse fisher matrix, which is used to update the model parameters. Presently this optimizer works only
on liner and 2D convolution layers. If you want to know more details about KFAC, please check the paper [1]_ and [2]_.
References:
-----------
[1] Martens, James, and <NAME>. Optimizing Neural Networks with Kronecker-Factored Approximate Curvature.
arXiv:1503.05671, arXiv, 7 June 2020. arXiv.org, http://arxiv.org/abs/1503.05671.
[2] Grosse, Roger, and <NAME>. A Kronecker-Factored Approximate Fisher Matrix for Convolution Layers.
arXiv:1602.01407, arXiv, 23 May 2016. arXiv.org, http://arxiv.org/abs/1602.01407.
"""
def __init__(self,
model: torch.nn.Module,
lr: float = 0.001,
momentum: float = 0.9,
stat_decay: float = 0.95,
damping: float = 0.001,
kl_clip: float = 0.001,
weight_decay: float = 0,
TCov: int = 10,
TInv: int = 100,
batch_averaged: bool = True,
mean: bool = False):
"""
Parameters:
-----------
model: torch.nn.Module
The model to be optimized.
lr: float (default: 0.001)
Learning rate for the optimizer.
momentum: float (default: 0.9)
Momentum for the optimizer.
stat_decay: float (default: 0.95)
Decay rate for the update of covariance matrix with mean.
damping: float (default: 0.001)
damping factor for the update of covariance matrix.
kl_clip: float (default: 0.001)
Clipping value for the update of covariance matrix.
weight_decay: float (default: 0)
weight decay for the optimizer.
Tcov: int (default: 10)
The number of steps to update the covariance matrix.
Tinv: int (default: 100)
The number of steps to calculate the inverse of covariance matrix.
batch_averaged: bool (default: True)
States whether to use batch averaged covariance matrix.
mean: bool (default: False)
States whether to use mean centered covariance matrix.
"""
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr,
momentum=momentum,
damping=damping,
weight_decay=weight_decay)
super(KFACOptimizer, self).__init__(model.parameters(), defaults)
self.batch_averaged = batch_averaged
self.known_modules = {'Linear', 'Conv2d'}
self.modules: List[torch.nn.Module] = []
self.model = model
self._prepare_model()
self.steps = 0
self.m_aa: Dict[torch.nn.Module, torch.Tensor] = {}
self.m_gg: Dict[torch.nn.Module, torch.Tensor] = {}
self.Q_a: Dict[torch.nn.Module, torch.Tensor] = {}
self.Q_g: Dict[torch.nn.Module, torch.Tensor] = {}
self.d_a: Dict[torch.nn.Module, torch.Tensor] = {}
self.d_g: Dict[torch.nn.Module, torch.Tensor] = {}
self.stat_decay = stat_decay
self.kl_clip = kl_clip
self.TCov = TCov
self.TInv = TInv
self.mean = mean
def try_contiguous(self, x: torch.Tensor) -> torch.Tensor:
"""
Checks the memory layout of the input tensor and changes it to contiguous type.
Parameters:
-----------
x: torch.Tensor
The input tensor to be made contiguous in memory, if it is not so.
Return:
-------
torch.Tensor
Tensor with contiguous memory
"""
if not x.is_contiguous():
x = x.contiguous()
return x
def _extract_patches(
self, x: torch.Tensor, kernel_size: Tuple[int,
...], stride: Tuple[int,
...],
padding: Union[int, str, Tuple[int, ...]]) -> torch.Tensor:
"""
Extract patches of a given size from the input tensor given. Used in calculating
the matrices for the kronecker product in the case of 2d Convolutions.
Parameters:
-----------
x: torch.Tensor
The input feature maps. with the size of (batch_size, in_c, h, w)
kernel_size: Tuple[int, ...]
the kernel size of the conv filter.
stride: Tuple[int, ...]
the stride of conv operation.
padding: Union[int, str, Tuple[int, ...]]
number of paddings. be a tuple of two elements
Return:
-------
torch.Tensor:
Extracted patches with shape (batch_size, out_h, out_w, in_c*kh*kw)
"""
if isinstance(padding, tuple):
if padding[0] + padding[1] > 0:
x = torch.nn.functional.pad(
x, (padding[1], padding[1], padding[0],
padding[0])).data # Actually check dims
elif isinstance(padding, int):
if padding > 0:
x = torch.nn.functional.pad(
x, (padding, padding, padding, padding)).data
elif isinstance(padding, str):
if padding == 'VALID':
pad = int((kernel_size[0] - 1) / 2)
x = torch.nn.functional.pad(x, (pad, pad, pad, pad)).data
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(x.size(0), x.size(1), x.size(2),
x.size(3) * x.size(4) * x.size(5))
return x
def compute_cov_a(self, a: torch.Tensor,
layer: torch.nn.Module) -> torch.Tensor:
"""
Compute the covariance matrix of the A matrix (the output of each layer).
Parameters:
-----------
a: torch.Tensor
It is the output of the layer for which the covariance matrix should be calculated.
layer: torch.nn.Module
It specifies the type of layer from which the output of the layer is taken.
Returns:
--------
torch.Tensor
The covariance matrix of the A matrix.
"""
if isinstance(layer, torch.nn.Linear):
batch_size = a.size(0)
if layer.bias is not None:
a = torch.cat((a, a.new(a.size(0), 1).fill_(1)), 1)
elif isinstance(layer, torch.nn.Conv2d):
batch_size = a.size(0)
a = self._extract_patches(a, layer.kernel_size, layer.stride,
layer.padding)
spatial_size = a.size(1) * a.size(2)
a = a.view(-1, a.size(-1))
if layer.bias is not None:
a = torch.cat((a, a.new(a.size(0), 1).fill_(1)), 1)
a = a / spatial_size
return a.t() @ (a / batch_size)
def compute_cov_g(self, g: torch.Tensor,
layer: torch.nn.Module) -> torch.Tensor:
"""
Compute the covariance matrix of the G matrix (the gradient of the layer).
Parameters:
-----------
g: torch.Tensor
It is the gradient of the layer for which the covariance matrix should be calculated.
layer: torch.nn.Module
It specifies the type of layer from which the output of the layer is taken.
Returns:
--------
torch.Tensor
The covariance matrix of the G matrix.
"""
if isinstance(layer, torch.nn.Linear):
batch_size = g.size(0)
if self.batch_averaged:
cov_g = g.t() @ (g * batch_size)
else:
cov_g = g.t() @ (g / batch_size)
elif isinstance(layer, torch.nn.Conv2d):
spatial_size = g.size(2) * g.size(3)
batch_size = g.shape[0]
g = g.transpose(1, 2).transpose(2, 3)
g = self.try_contiguous(g)
g = g.view(-1, g.size(-1))
if self.batch_averaged:
g = g * batch_size
g = g * spatial_size
cov_g = g.t() @ (g / g.size(0))
return cov_g
def _save_input(self, module: torch.nn.Module, input: torch.Tensor):
"""
Updates the input of the layer using exponentially weighted averages of the layer input.
Parameters:
-----------
module: torch.nn.Module
specifies the layer for which the input should be taken
input: torch.Tensor
the input matrix which should get updated
"""
if self.steps % self.TCov == 0:
aa = self.compute_cov_a(input[0].data, module)
# Initialize buffers
if self.steps == 0:
self.m_aa[module] = torch.diag(aa.new(aa.size(0)).fill_(1))
self.m_aa[module] *= self.stat_decay + aa * (1 - self.stat_decay)
def _save_grad_output(self, module: torch.nn.Module,
grad_input: torch.Tensor, grad_output: torch.Tensor):
"""
Updates the backward gradient of the layer using exponentially weighted averages of the layer input.
Parameters:
-----------
module: torch.nn.Module
specifies the layer for which the gradient should be taken
input: torch.Tensor
the gradient matrix which should get updated
"""
# Accumulate statistics for Fisher matrices
if self.steps % self.TCov == 0:
gg = self.compute_cov_g(grad_output[0].data, module)
# Initialize buffers
if self.steps == 0:
self.m_gg[module] = torch.diag(gg.new(gg.size(0)).fill_(1))
self.m_gg[module] *= self.stat_decay + gg * (1 - self.stat_decay)
def _prepare_model(self):
""""
Attaches hooks(saving the ouptut and grad according to the update function) to the model for
to calculate gradients at every step.
"""
count = 0
for module in self.model.modules():
classname = module.__class__.__name__
if classname in self.known_modules:
self.modules.append(module)
module.register_forward_pre_hook(self._save_input)
module.register_backward_hook(self._save_grad_output)
count += 1
def _update_inv(self, m: torch.nn.Module):
"""
Does eigen decomposition of the input(A) and gradient(G) matrix for computing inverse of the ~ fisher.
Parameter:
----------
m: torch.nn.Module
This is the layer for which the eigen decomposition should be done on.
"""
eps = 1e-10 # for numerical stability
if self.mean:
self.d_a[m], self.Q_a[m] = torch.symeig(self.m_aa[m] -
torch.mean(self.m_aa[m]),
eigenvectors=True)
self.d_g[m], self.Q_g[m] = torch.symeig(self.m_gg[m] -
torch.mean(self.m_gg[m]),
eigenvectors=True)
else:
self.d_a[m], self.Q_a[m] = torch.symeig(self.m_aa[m],
eigenvectors=True)
self.d_g[m], self.Q_g[m] = torch.symeig(self.m_gg[m],
eigenvectors=True)
self.d_a[m].mul_((self.d_a[m] > eps).float())
self.d_g[m].mul_((self.d_g[m] > eps).float())
@staticmethod
def _get_matrix_form_grad(m: torch.nn.Module):
"""
Returns the gradient of the layer in a matrix form
Parameter:
----------
m: torch.nn.Module
the layer for which the gradient must be calculated
Return:
-------
torch.tensor
a matrix form of the gradient. it should be a [output_dim, input_dim] matrix.
"""
if isinstance(m, torch.nn.Conv2d):
p_grad_mat = m.weight.grad.data.view(
m.weight.grad.data.size(0), -1) # n_filters * (in_c * kw * kh)
elif isinstance(m, torch.nn.Linear):
p_grad_mat = m.weight.grad.data
else:
raise NotImplementedError(
"KFAC optimizer currently support only Linear and Conv2d layers"
)
if m.bias is not None:
if isinstance(m.bias.grad.data, torch.Tensor):
p_grad_mat = torch.cat(
[p_grad_mat, m.bias.grad.data.view(-1, 1)], 1)
else:
raise TypeError("bias.grad.data should be a Tensor")
return p_grad_mat
def _get_natural_grad(self, m: torch.nn.Module, p_grad_mat: torch.Tensor,
damping: float) -> List[torch.Tensor]:
"""
This function returns the product of inverse of the fisher matrix and the weights gradient.
Parameters:
-----------
m: torch.nn.Module
Specifies the layer for which the calculation must be done on.
p_grad_mat: torch.Tensor
the gradients in matrix form isinstance(m.weight.grad.data, torch.Tensor) and i
damping: float
the damping factor for the calculation
Return:
-------
torch.Tensor
the product of inverse of the fisher matrix and the weights gradient.
"""
# p_grad_mat is of output_dim * input_dim
# inv((ss')) p_grad_mat inv(aa') = [ Q_g (1/R_g) Q_g^T ] @ p_grad_mat @ [Q_a (1/R_a) Q_a^T]
v1 = self.Q_g[m].t() @ p_grad_mat @ self.Q_a[m]
v2 = v1 / (self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0) +
damping)
a = self.Q_g[m] @ v2 @ self.Q_a[m].t()
if m.bias is not None:
# we always put gradient w.r.t weight in [0]
# and w.r.t bias in [1]
if isinstance(m.weight.grad.data, torch.Tensor) and isinstance(
m.bias.grad.data, torch.Tensor):
v = [a[:, :-1], a[:, -1:]]
v[0] = v[0].view(m.weight.grad.data.size())
v[1] = v[1].view(m.bias.grad.data.size())
else:
raise TypeError(
"weight.grad.data and bias.grad.data should be a Tensor")
else:
v = [a.view(m.weight.grad.data.size())]
return v
def _kl_clip_and_update_grad(self, updates: Dict[torch.nn.Module,
List[torch.Tensor]],
lr: float):
"""
Performs clipping on the updates matrix, if the value is large. Then final value is updated in the backwards gradient data
Parameters:
-----------
updates: Dict[torch.nn.Module,List[torch.Tensor]]
A dicitonary containing the product of gradient and fisher inverse of each layer.
lr: float
learning rate of the optimizer
"""
# do kl clip
vg_sum = 0.0
for m in self.modules:
v = updates[m]
vg_sum += (v[0] * m.weight.grad.data * lr**2).sum().item()
if m.bias is not None:
vg_sum += (v[1] * m.bias.grad.data * lr**2).sum().item()
nu = min(1.0, math.sqrt(self.kl_clip / vg_sum))
for m in self.modules:
v = updates[m]
if isinstance(m.weight.grad.data, torch.Tensor):
m.weight.grad.data.copy_(v[0])
m.weight.grad.data.mul_(nu)
else:
raise TypeError("weight.grad.data should be a Tensor")
if m.bias is not None:
if isinstance(m.bias.grad.data, torch.Tensor):
m.bias.grad.data.copy_(v[1])
m.bias.grad.data.mul_(nu)
else:
raise TypeError("bias.grad.data should be a Tensor")
def _step(self, closure: Optional[Callable] = None):
"""
Called in every step of the optimizer, updating the model parameters from the gradient by the KFAC equation.
Also, performs weight decay and adds momentum if any.
Parameters:
-----------
closure: Callable, optional(default: None)
an optional customizable function to be passed which can be used to clear the gradients and other compute loss for every step.
"""
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
lr = group['lr']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0 and self.steps >= 20 * self.TCov:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(
p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p)
d_p = buf
torch.add(p.data, -lr, d_p, out=p.data)
def step(self, closure: Optional[Callable] = None):
"""
This is the function that gets called in each step of the optimizer to update the weights and biases of the model.
Parameters:
-----------
closure: Callable, optional(default: None)
an optional customizable function to be passed which can be used to clear the gradients and other compute loss for every step.
"""
group = self.param_groups[0]
lr = group['lr']
damping = group['damping']
updates = {}
for m in self.modules:
if self.steps % self.TInv == 0:
self._update_inv(m)
p_grad_mat = self._get_matrix_form_grad(m)
v = self._get_natural_grad(m, p_grad_mat, damping)
updates[m] = v
self._kl_clip_and_update_grad(updates, lr)
self._step(closure)
self.steps += 1
<file_sep>import unittest
import tempfile
import pytest
import deepchem as dc
import numpy as np
try:
import tensorflow as tf # noqa: F401
has_tensorflow = True
except:
has_tensorflow = False
class TestScScoreModel(unittest.TestCase):
@pytest.mark.tensorflow
def test_overfit_scscore(self):
"""Test fitting to a small dataset"""
n_samples = 10
n_features = 3
n_tasks = 1
# Create a dataset and an input function for processing it.
X = np.random.rand(n_samples, 2, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.ScScoreModel(n_features, dropouts=0)
model.fit(dataset, nb_epoch=100)
pred = model.predict(dataset)
assert np.array_equal(y, pred[0] > pred[1])
@pytest.mark.tensorflow
def test_scscore_reload():
"""Test reloading of ScScoreModel"""
n_samples = 10
n_features = 3
n_tasks = 1
# Create a dataset and an input function for processing it.
X = np.random.rand(n_samples, 2, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y)
model_dir = tempfile.mkdtemp()
model = dc.models.ScScoreModel(n_features, dropouts=0, model_dir=model_dir)
model.fit(dataset, nb_epoch=100)
pred = model.predict(dataset)
assert np.array_equal(y, pred[0] > pred[1])
reloaded_model = dc.models.ScScoreModel(n_features,
dropouts=0,
model_dir=model_dir)
reloaded_model.restore()
reloaded_pred = reloaded_model.predict(dataset)
assert len(pred) == len(reloaded_pred)
for p, r in zip(pred, reloaded_pred):
assert np.all(p == r)
<file_sep>import numpy as np
import tensorflow as tf
from deepchem.models import KerasModel, layers
from deepchem.models.losses import SigmoidCrossEntropy
from tensorflow.keras.layers import Input, Layer, Activation, Concatenate, Lambda
class IRVLayer(Layer):
""" Core layer of IRV classifier, architecture described in:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2750043/
"""
def __init__(self, n_tasks, K, penalty, **kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
K: int
Number of nearest neighbours used in classification
penalty: float
Amount of penalty (l2 or l1 applied)
"""
self.n_tasks = n_tasks
self.K = K
self.penalty = penalty
super(IRVLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.V = tf.Variable(tf.constant([0.01, 1.]),
name="vote",
dtype=tf.float32)
self.W = tf.Variable(tf.constant([1., 1.]), name="w", dtype=tf.float32)
self.b = tf.Variable(tf.constant([0.01]), name="b", dtype=tf.float32)
self.b2 = tf.Variable(tf.constant([0.01]), name="b2", dtype=tf.float32)
def call(self, inputs):
K = self.K
outputs = []
for count in range(self.n_tasks):
# Similarity values
similarity = inputs[:, 2 * K * count:(2 * K * count + K)]
# Labels for all top K similar samples
ys = tf.cast(inputs[:, (2 * K * count + K):2 * K * (count + 1)],
tf.int32)
R = self.b + self.W[0] * similarity + self.W[1] * tf.constant(
np.arange(K) + 1, dtype=tf.float32)
R = tf.sigmoid(R)
z = tf.reduce_sum(R * tf.gather(self.V, ys), axis=1) + self.b2
outputs.append(tf.reshape(z, shape=[-1, 1]))
loss = (tf.nn.l2_loss(self.W) + tf.nn.l2_loss(self.V) +
tf.nn.l2_loss(self.b) + tf.nn.l2_loss(self.b2)) * self.penalty
self.add_loss(loss)
return tf.concat(outputs, axis=1)
class Slice(Layer):
""" Choose a slice of input on the last axis given order,
Suppose input x has two dimensions,
output f(x) = x[:, slice_num:slice_num+1]
"""
def __init__(self, slice_num, axis=1, **kwargs):
"""
Parameters
----------
slice_num: int
index of slice number
axis: int
axis id
"""
self.slice_num = slice_num
self.axis = axis
super(Slice, self).__init__(**kwargs)
def call(self, inputs):
slice_num = self.slice_num
axis = self.axis
return tf.slice(inputs, [0] * axis + [slice_num], [-1] * axis + [1])
class MultitaskIRVClassifier(KerasModel):
def __init__(self,
n_tasks,
K=10,
penalty=0.0,
mode="classification",
**kwargs):
"""Initialize MultitaskIRVClassifier
Parameters
----------
n_tasks: int
Number of tasks
K: int
Number of nearest neighbours used in classification
penalty: float
Amount of penalty (l2 or l1 applied)
"""
self.n_tasks = n_tasks
self.K = K
self.n_features = 2 * self.K * self.n_tasks
self.penalty = penalty
mol_features = Input(shape=(self.n_features,))
predictions = IRVLayer(self.n_tasks, self.K, self.penalty)(mol_features)
logits = []
outputs = []
for task in range(self.n_tasks):
task_output = Slice(task, 1)(predictions)
sigmoid = Activation(tf.sigmoid)(task_output)
logits.append(task_output)
outputs.append(sigmoid)
outputs = layers.Stack(axis=1)(outputs)
outputs2 = Lambda(lambda x: 1 - x)(outputs)
outputs = [
Concatenate(axis=2)([outputs2, outputs]),
logits[0] if len(logits) == 1 else Concatenate(axis=1)(logits)
]
model = tf.keras.Model(inputs=[mol_features], outputs=outputs)
super(MultitaskIRVClassifier,
self).__init__(model,
SigmoidCrossEntropy(),
output_types=['prediction', 'loss'],
**kwargs)
import warnings # noqa: E402
class TensorflowMultitaskIRVClassifier(MultitaskIRVClassifier):
def __init__(self, *args, **kwargs):
warnings.warn(
"TensorflowMultitaskIRVClassifier is deprecated and has been renamed to MultitaskIRVClassifier",
FutureWarning)
super(TensorflowMultitaskIRVClassifier, self).__init__(*args, **kwargs)
<file_sep># flake8:noqa
from deepchem.models.lightning.dc_lightning_module import DCLightningModule
from deepchem.models.lightning.dc_lightning_dataset_module import DCLightningDatasetModule
<file_sep>"""
PCBA dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
import deepchem as dc
def load_pcba(featurizer='ECFP', split='random'):
"""Load PCBA datasets."""
current_dir = os.path.dirname(os.path.realpath(__file__))
print("About to load PCBA dataset.")
dataset_file = os.path.join(current_dir, "../../datasets/pcba.csv.gz")
# Featurize PCBA dataset
print("About to featurize PCBA dataset.")
if featurizer == 'ECFP':
featurizer = dc.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = dc.feat.ConvMolFeaturizer()
PCBA_tasks = [
'PCBA-1030', 'PCBA-1379', 'PCBA-1452', 'PCBA-1454', 'PCBA-1457',
'PCBA-1458', 'PCBA-1460', 'PCBA-1461', 'PCBA-1468', 'PCBA-1469',
'PCBA-1471', 'PCBA-1479', 'PCBA-1631', 'PCBA-1634', 'PCBA-1688',
'PCBA-1721', 'PCBA-2100', 'PCBA-2101', 'PCBA-2147', 'PCBA-2242',
'PCBA-2326', 'PCBA-2451', 'PCBA-2517', 'PCBA-2528', 'PCBA-2546',
'PCBA-2549', 'PCBA-2551', 'PCBA-2662', 'PCBA-2675', 'PCBA-2676',
'PCBA-411', 'PCBA-463254', 'PCBA-485281', 'PCBA-485290', 'PCBA-485294',
'PCBA-485297', 'PCBA-485313', 'PCBA-485314', 'PCBA-485341', 'PCBA-485349',
'PCBA-485353', 'PCBA-485360', 'PCBA-485364', 'PCBA-485367', 'PCBA-492947',
'PCBA-493208', 'PCBA-504327', 'PCBA-504332', 'PCBA-504333', 'PCBA-504339',
'PCBA-504444', 'PCBA-504466', 'PCBA-504467', 'PCBA-504706', 'PCBA-504842',
'PCBA-504845', 'PCBA-504847', 'PCBA-504891', 'PCBA-540276', 'PCBA-540317',
'PCBA-588342', 'PCBA-588453', 'PCBA-588456', 'PCBA-588579', 'PCBA-588590',
'PCBA-588591', 'PCBA-588795', 'PCBA-588855', 'PCBA-602179', 'PCBA-602233',
'PCBA-602310', 'PCBA-602313', 'PCBA-602332', 'PCBA-624170', 'PCBA-624171',
'PCBA-624173', 'PCBA-624202', 'PCBA-624246', 'PCBA-624287', 'PCBA-624288',
'PCBA-624291', 'PCBA-624296', 'PCBA-624297', 'PCBA-624417', 'PCBA-651635',
'PCBA-651644', 'PCBA-651768', 'PCBA-651965', 'PCBA-652025', 'PCBA-652104',
'PCBA-652105', 'PCBA-652106', 'PCBA-686970', 'PCBA-686978', 'PCBA-686979',
'PCBA-720504', 'PCBA-720532', 'PCBA-720542', 'PCBA-720551', 'PCBA-720553',
'PCBA-720579', 'PCBA-720580', 'PCBA-720707', 'PCBA-720708', 'PCBA-720709',
'PCBA-720711', 'PCBA-743255', 'PCBA-743266', 'PCBA-875', 'PCBA-881',
'PCBA-883', 'PCBA-884', 'PCBA-885', 'PCBA-887', 'PCBA-891', 'PCBA-899',
'PCBA-902', 'PCBA-903', 'PCBA-904', 'PCBA-912', 'PCBA-914', 'PCBA-915',
'PCBA-924', 'PCBA-925', 'PCBA-926', 'PCBA-927', 'PCBA-938', 'PCBA-995'
]
loader = dc.data.CSVLoader(
tasks=PCBA_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
# Initialize transformers
transformers = [dc.trans.BalancingTransformer(dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {
'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter()
}
splitter = splitters[split]
print("Performing new split.")
train, valid, test = splitter.train_valid_test_split(dataset)
return PCBA_tasks, (train, valid, test), transformers
<file_sep>Understanding DeepChem CI
===========================
Continuous Integration(CI) is used to continuously build and run tests
for the code in your repository to make sure that the changes introduced
by the commits doesn't introduce errors. DeepChem runs a number of CI tests(jobs)
using workflows provided by Github Actions. When all CI tests in a workflow pass,
it implies that the changes introduced by a commit does not introduce any errors.
When creating a PR to master branch or when pushing to master branch, around 35 CI
tests are run from the following workflows.
#. Tests for DeepChem Core - The jobs are defined in the ``.github/workflows/main.yml`` file. The following jobs are performed in this workflow:
* Building and installation of DeepChem in latest Ubuntu OS and Python 3.7 and it checks for ``import deepchem``
* These tests run on Ubuntu latest version using Python 3.7-3.9 and on windows latest version using Python 3.7. The jobs are run for checking coding conventions using yapf, flake8 and mypy. It also includes tests for doctest and code-coverage.
* Tests for pypi-build and docker-build are also include but they are mostly skipped.
#. Tests for DeepChem Common - The jobs are defined in the ``.github/workflows/common_setup.yml`` file. The following tests are performed in this workflow:
* For build environments of Python 3.7, 3.8 and 3.9, DeepChem is built and import checking is performed.
* The tests are run for checking pytest. All pytests which are not marked as jax, tensorflow or pytorch is run on ubuntu latest with Python 3.7, 3.8 and 3.9 and on windows latest, it is run with Python 3.7.
#. Tests for DeepChem Jax/Tensorflow/PyTorch
* Jax - DeepChem with jax backend is installed and import check is performed for deepchem and jax. The tests for pytests with jax markers are run on ubuntu latest with Python 3.7, 3.8 and 3.9.
* Tensorflow - DeepChem with tensorflow backend is installed and import check is performed for DeepChem and tensorflow. The tests for pytests with tensorflow markers are run on ubuntu latest with Python 3.7-3.9 and on windows latest, it is run with Python 3.7.
* PyTorch - DeepChem with pytorch backend is installed and import check is performed for DeepChem and torch. The tests for pytests with pytorch markers are run on ubuntu latest with Python 3.7-3.9 and on windows latest, it is run with Python 3.7.
#. Tests for documents
* These tests are used for checking docs build. It is run on ubuntu latest with Python 3.7.
#. Tests for Release
* These tests are run only when pushing a tag. It is run on ubuntu latest with Python 3.7.
General recommendations
#. Handling additional or external files in unittest
When a new feature is added to DeepChem, the respective unittest should included too.
Sometimes, this test functions uses an external or additional file. To avoid problems in the CI
the absolute path of the file has to be included. For example, for the use of a file called
“Test_data_feature.csv”, the unittest function should manage the absolute path as :
::
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "Test_data_feature.csv")
result = newFeature(data_dir)
Notes on Requirement Files
--------------------------
DeepChem's CI as well as installation procedures use requirement files defined in
``requirements`` directory. Currently, there are a number of requirement files. Their
purposes are listed here.
+ `env_common.yml` - this file lists the scientific dependencies used by DeepChem like rdkit.
+ `env_ubuntu.yml` and `env_mac.yml` contain scientific dependencies which are have OS specific support. Currently, vina
+ `env_test.yml` - it is mostly used for the purpose of testing in development purpose. It contains the test dependencies.
+ The installation files in `tensorflow`, `torch` and `jax` directories contain the installation command for backend deep learning frameworks. For torch and jax, installation command is different for CPU and GPU. Hence, we use different installation files for CPU and GPU respectively.
<file_sep>"""
Original code by @philopon
https://gist.github.com/philopon/a75a33919d9ae41dbed5bc6a39f5ede2
"""
import sys
import os
import requests
import subprocess
import shutil
from logging import getLogger, StreamHandler, INFO
logger = getLogger(__name__)
logger.addHandler(StreamHandler())
logger.setLevel(INFO)
default_channels = [
"conda-forge",
]
default_packages = [
"openmm",
"pdbfixer",
]
def install(
chunk_size=4096,
file_name="Miniconda3-latest-Linux-x86_64.sh",
url_base="https://repo.continuum.io/miniconda/",
conda_path=os.path.expanduser(os.path.join("~", "miniconda")),
add_python_path=True,
# default channels are "conda-forge" and "omnia"
additional_channels=[],
# default packages are "rdkit", "openmm" and "pdbfixer"
additional_packages=[],
):
"""Install conda packages on Google Colab
For GPU/CPU notebook
```
import conda_installer
conda_installer.install()
```
If you want to add other packages, you can use additional_conda_channels and
additional_conda_package arguments. Please see the example.
```
import conda_installer
conda_installer.install(
additional_conda_channels=[]
additional_conda_packages=["mdtraj", "networkx"]
)
// add channel
import conda_installer
conda_installer.install(
additional_conda_channels=["dglteam"]
additional_conda_packages=["dgl-cuda10.1"]
)
```
"""
python_path = os.path.join(
conda_path,
"lib",
"python{0}.{1}".format(*sys.version_info),
"site-packages",
)
if add_python_path and python_path not in sys.path:
logger.info("add {} to PYTHONPATH".format(python_path))
sys.path.append(python_path)
is_installed = []
packages = list(set(default_packages + additional_packages))
for package in packages:
package = "simtk" if package == "openmm" else package
is_installed.append(os.path.isdir(os.path.join(python_path, package)))
if all(is_installed):
logger.info("all packages are already installed")
return
url = url_base + file_name
python_version = "{0}.{1}.{2}".format(*sys.version_info)
logger.info("python version: {}".format(python_version))
if os.path.isdir(conda_path):
logger.warning("remove current miniconda")
shutil.rmtree(conda_path)
elif os.path.isfile(conda_path):
logger.warning("remove {}".format(conda_path))
os.remove(conda_path)
logger.info('fetching installer from {}'.format(url))
res = requests.get(url, stream=True)
res.raise_for_status()
with open(file_name, 'wb') as f:
for chunk in res.iter_content(chunk_size):
f.write(chunk)
logger.info('done')
logger.info('installing miniconda to {}'.format(conda_path))
subprocess.check_call(["bash", file_name, "-b", "-p", conda_path])
logger.info('done')
logger.info("installing openmm, pdbfixer")
channels = list(set(default_channels + additional_channels))
for channel in channels:
subprocess.check_call([
os.path.join(conda_path, "bin", "conda"), "config", "--append",
"channels", channel
])
logger.info("added {} to channels".format(channel))
subprocess.check_call([
os.path.join(conda_path, "bin", "conda"),
"install",
"--yes",
"python=={}".format(python_version),
*packages,
])
logger.info("done")
logger.info("conda packages installation finished!")
if __name__ == "__main__":
install()
<file_sep># flake8: noqa
from deepchem.feat.molecule_featurizers.atomic_coordinates import AtomicCoordinates
from deepchem.feat.molecule_featurizers.bp_symmetry_function_input import BPSymmetryFunctionInput
from deepchem.feat.molecule_featurizers.circular_fingerprint import CircularFingerprint
from deepchem.feat.molecule_featurizers.coulomb_matrices import CoulombMatrix
from deepchem.feat.molecule_featurizers.coulomb_matrices import CoulombMatrixEig
from deepchem.feat.molecule_featurizers.molgan_featurizer import GraphMatrix
from deepchem.feat.molecule_featurizers.maccs_keys_fingerprint import MACCSKeysFingerprint
from deepchem.feat.molecule_featurizers.mordred_descriptors import MordredDescriptors
from deepchem.feat.molecule_featurizers.mol2vec_fingerprint import Mol2VecFingerprint
from deepchem.feat.molecule_featurizers.one_hot_featurizer import OneHotFeaturizer
from deepchem.feat.molecule_featurizers.sparse_matrix_one_hot_featurizer import SparseMatrixOneHotFeaturizer
from deepchem.feat.molecule_featurizers.pubchem_fingerprint import PubChemFingerprint
from deepchem.feat.molecule_featurizers.raw_featurizer import RawFeaturizer
from deepchem.feat.molecule_featurizers.rdkit_descriptors import RDKitDescriptors
from deepchem.feat.molecule_featurizers.smiles_to_image import SmilesToImage
from deepchem.feat.molecule_featurizers.smiles_to_seq import SmilesToSeq, create_char_to_idx
from deepchem.feat.molecule_featurizers.mol_graph_conv_featurizer import MolGraphConvFeaturizer
from deepchem.feat.molecule_featurizers.mol_graph_conv_featurizer import PagtnMolGraphFeaturizer
from deepchem.feat.molecule_featurizers.molgan_featurizer import MolGanFeaturizer
from deepchem.feat.molecule_featurizers.mat_featurizer import MATFeaturizer
from deepchem.feat.molecule_featurizers.dmpnn_featurizer import DMPNNFeaturizer
from deepchem.feat.molecule_featurizers.grover_featurizer import GroverFeaturizer
from deepchem.feat.molecule_featurizers.snap_featurizer import SNAPFeaturizer
from deepchem.feat.molecule_featurizers.conformer_featurizer import RDKitConformerFeaturizer
from deepchem.feat.molecule_featurizers.mxmnet_featurizer import MXMNetFeaturizer
<file_sep>---
name: "❓Questions & Help"
about: Start a general discussion related to DeepChem
---
## ❓ Questions & Help
<!-- A clear and concise description of the question. -->
<file_sep>"""
Script that trains multitask models on clintox dataset.
@author <NAME>
"""
import numpy as np
import deepchem as dc
from deepchem.molnet import load_clintox
# Only for debug!
np.random.seed(123)
# Load clintox dataset
n_features = 1024
clintox_tasks, clintox_datasets, transformers = load_clintox(split='random')
train_dataset, valid_dataset, test_dataset = clintox_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
model = dc.models.MultitaskClassifier(
len(clintox_tasks),
n_features,
layer_sizes=[1000],
dropouts=[.25],
learning_rate=0.001,
batch_size=50)
# Fit trained model
model.fit(train_dataset)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import unittest
import numpy as np
from deepchem.feat.sequence_featurizers.position_frequency_matrix_featurizer import PFMFeaturizer, CHARSET, PFM_to_PPM
class TestPFMFeaturizer(unittest.TestCase):
"""
Test PFMFeaturizer.
"""
def setUp(self):
"""
Set up test.
"""
self.msa = np.array([['ABC', 'BCD'], ['AAA', 'AAB']])
self.featurizer = PFMFeaturizer()
self.max_length = 100
def test_PFMFeaturizer_arbitrary(self):
"""
Test PFM featurizer for simple MSA.
"""
pfm = self.featurizer.featurize(self.msa)
assert pfm.shape == (2, len(CHARSET) + 1, self.max_length)
assert pfm[0][0][0] == 1
def test_PFM_to_PPM(self):
"""
Test PFM_to_PPM.
"""
pfm = self.featurizer.featurize(self.msa)
ppm = PFM_to_PPM(pfm[0])
assert ppm.shape == (len(CHARSET) + 1, self.max_length)
assert ppm[0][0] == .5
<file_sep># flake8: noqa
# metric class
from deepchem.metrics.metric import Metric
# metrics utils
from deepchem.metrics.metric import threshold_predictions
from deepchem.metrics.metric import normalize_weight_shape
from deepchem.metrics.metric import normalize_labels_shape
from deepchem.metrics.metric import normalize_prediction_shape
from deepchem.metrics.metric import handle_classification_mode
from deepchem.metrics.metric import to_one_hot
from deepchem.metrics.metric import from_one_hot
# sklearn & scipy score function
from deepchem.metrics.score_function import matthews_corrcoef
from deepchem.metrics.score_function import recall_score
from deepchem.metrics.score_function import kappa_score
from deepchem.metrics.score_function import cohen_kappa_score
from deepchem.metrics.score_function import r2_score
from deepchem.metrics.score_function import mean_squared_error
from deepchem.metrics.score_function import mean_absolute_error
from deepchem.metrics.score_function import precision_score
from deepchem.metrics.score_function import precision_recall_curve
from deepchem.metrics.score_function import auc
from deepchem.metrics.score_function import jaccard_score
from deepchem.metrics.score_function import f1_score
from deepchem.metrics.score_function import roc_auc_score
from deepchem.metrics.score_function import accuracy_score
from deepchem.metrics.score_function import balanced_accuracy_score
from deepchem.metrics.score_function import top_k_accuracy_score
from deepchem.metrics.score_function import pearsonr
# original score function
from deepchem.metrics.score_function import pearson_r2_score
from deepchem.metrics.score_function import jaccard_index
from deepchem.metrics.score_function import pixel_error
from deepchem.metrics.score_function import prc_auc_score
from deepchem.metrics.score_function import rms_score
from deepchem.metrics.score_function import mae_score
from deepchem.metrics.score_function import bedroc_score
from deepchem.metrics.score_function import concordance_index
<file_sep>from typing import List, Optional, Sequence
class Loss:
"""A loss function for use in training models."""
def _compute_tf_loss(self, output, labels):
"""Compute the loss function for TensorFlow tensors.
The inputs are tensors containing the model's outputs and the labels for a
batch. The return value should be a tensor of shape (batch_size) or
(batch_size, tasks) containing the value of the loss function on each
sample or sample/task.
Parameters
----------
output: tensor
the output of the model
labels: tensor
the expected output
Returns
-------
The value of the loss function on each sample or sample/task pair
"""
raise NotImplementedError("Subclasses must implement this")
def _create_pytorch_loss(self):
"""Create a PyTorch loss function."""
raise NotImplementedError("Subclasses must implement this")
class L1Loss(Loss):
"""The absolute difference between the true and predicted values."""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.abs(output - labels)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.nn.functional.l1_loss(output, labels, reduction='none')
return loss
class HuberLoss(Loss):
"""Modified version of L1 Loss, also known as Smooth L1 loss.
Less sensitive to small errors, linear for larger errors.
Huber loss is generally better for cases where are are both large outliers as well as small, as compared to the L1 loss.
By default, Delta = 1.0 and reduction = 'none'.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
return tf.keras.losses.Huber(reduction='none')(output, labels)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.nn.functional.smooth_l1_loss(output,
labels,
reduction='none')
return loss
class L2Loss(Loss):
"""The squared difference between the true and predicted values."""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.square(output - labels)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.nn.functional.mse_loss(output,
labels,
reduction='none')
return loss
class HingeLoss(Loss):
"""The hinge loss function.
The 'output' argument should contain logits, and all elements of 'labels'
should equal 0 or 1.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
return tf.keras.losses.hinge(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(torch.clamp(1 - labels * output, min=0), dim=-1)
return loss
class SquaredHingeLoss(Loss):
"""The Squared Hinge loss function.
Defined as the square of the hinge loss between y_true and y_pred. The Squared Hinge Loss is differentiable.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
return tf.keras.losses.SquaredHinge(reduction='none')(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(torch.pow(
torch.max(1 - torch.mul(labels, output), torch.tensor(0.0)), 2),
dim=-1)
return loss
class PoissonLoss(Loss):
"""The Poisson loss function is defined as the mean of the elements of y_pred - (y_true * log(y_pred) for an input of (y_true, y_pred).
Poisson loss is generally used for regression tasks where the data follows the poisson
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
loss = tf.keras.losses.Poisson(reduction='auto')
return loss(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(output - labels * torch.log(output))
return loss
class BinaryCrossEntropy(Loss):
"""The cross entropy between pairs of probabilities.
The arguments should each have shape (batch_size) or (batch_size, tasks) and
contain probabilities.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.keras.losses.binary_crossentropy(labels, output)
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCELoss(reduction='none')
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(bce(output, labels), dim=-1)
return loss
class CategoricalCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The arguments should each have shape (batch_size, classes) or
(batch_size, tasks, classes), and represent a probability distribution over
classes.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.keras.losses.categorical_crossentropy(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return -torch.sum(labels * torch.log(output), dim=-1)
return loss
class SigmoidCrossEntropy(Loss):
"""The cross entropy between pairs of probabilities.
The arguments should each have shape (batch_size) or (batch_size, tasks). The
labels should be probabilities, while the outputs should be logits that are
converted to probabilities using a sigmoid function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.nn.sigmoid_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCEWithLogitsLoss(reduction='none')
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return bce(output, labels)
return loss
class SoftmaxCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The arguments should each have shape (batch_size, classes) or
(batch_size, tasks, classes). The labels should be probabilities, while the
outputs should be logits that are converted to probabilities using a softmax
function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.nn.softmax_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
ls = torch.nn.LogSoftmax(dim=-1)
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return -torch.sum(labels * ls(output), dim=-1)
return loss
class SparseSoftmaxCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The labels should have shape (batch_size) or (batch_size, tasks), and be
integer class labels. The outputs have shape (batch_size, classes) or
(batch_size, tasks, classes) and be logits that are converted to probabilities
using a softmax function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
if len(labels.shape) == len(output.shape):
labels = tf.squeeze(labels, axis=-1)
labels = tf.cast(labels, tf.int32)
return tf.nn.sparse_softmax_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
ce_loss = torch.nn.CrossEntropyLoss(reduction='none')
def loss(output, labels):
# Convert (batch_size, tasks, classes) to (batch_size, classes, tasks)
# CrossEntropyLoss only supports (batch_size, classes, tasks)
# This is for API consistency
if len(output.shape) == 3:
output = output.permute(0, 2, 1)
if len(labels.shape) == len(output.shape):
labels = labels.squeeze(-1)
return ce_loss(output, labels.long())
return loss
class VAE_ELBO(Loss):
"""The Variational AutoEncoder loss, KL Divergence Regularize + marginal log-likelihood.
This losses based on _[1].
ELBO(Evidence lower bound) lexically replaced Variational lower bound.
BCE means marginal log-likelihood, and KLD means KL divergence with normal distribution.
Added hyper parameter 'kl_scale' for KLD.
The logvar and mu should have shape (batch_size, hidden_space).
The x and reconstruction_x should have (batch_size, attribute).
The kl_scale should be float.
Examples
--------
Examples for calculating loss using constant tensor.
batch_size = 2,
hidden_space = 2,
num of original attribute = 3
>>> import numpy as np
>>> import torch
>>> import tensorflow as tf
>>> logvar = np.array([[1.0,1.3],[0.6,1.2]])
>>> mu = np.array([[0.2,0.7],[1.2,0.4]])
>>> x = np.array([[0.9,0.4,0.8],[0.3,0,1]])
>>> reconstruction_x = np.array([[0.8,0.3,0.7],[0.2,0,0.9]])
Case tensorflow
>>> VAE_ELBO()._compute_tf_loss(tf.constant(logvar), tf.constant(mu), tf.constant(x), tf.constant(reconstruction_x))
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.70165154, 0.76238271])>
Case pytorch
>>> (VAE_ELBO()._create_pytorch_loss())(torch.tensor(logvar), torch.tensor(mu), torch.tensor(x), torch.tensor(reconstruction_x))
tensor([0.7017, 0.7624], dtype=torch.float64)
References
----------
.. [1] Kingma, <NAME>., and <NAME>. "Auto-encoding variational bayes." arXiv preprint arXiv:1312.6114 (2013).
"""
def _compute_tf_loss(self, logvar, mu, x, reconstruction_x, kl_scale=1):
import tensorflow as tf
x, reconstruction_x = _make_tf_shapes_consistent(x, reconstruction_x)
x, reconstruction_x = _ensure_float(x, reconstruction_x)
BCE = tf.keras.losses.binary_crossentropy(x, reconstruction_x)
KLD = VAE_KLDivergence()._compute_tf_loss(logvar, mu)
return BCE + kl_scale * KLD
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCELoss(reduction='none')
def loss(logvar, mu, x, reconstruction_x, kl_scale=1):
x, reconstruction_x = _make_pytorch_shapes_consistent(
x, reconstruction_x)
BCE = torch.mean(bce(reconstruction_x, x), dim=-1)
KLD = (VAE_KLDivergence()._create_pytorch_loss())(logvar, mu)
return BCE + kl_scale * KLD
return loss
class VAE_KLDivergence(Loss):
"""The KL_divergence between hidden distribution and normal distribution.
This loss represents KL divergence losses between normal distribution(using parameter of distribution)
based on _[1].
The logvar should have shape (batch_size, hidden_space) and each term represents
standard deviation of hidden distribution. The mean shuold have
(batch_size, hidden_space) and each term represents mean of hidden distribtuon.
Examples
--------
Examples for calculating loss using constant tensor.
batch_size = 2,
hidden_space = 2,
>>> import numpy as np
>>> import torch
>>> import tensorflow as tf
>>> logvar = np.array([[1.0,1.3],[0.6,1.2]])
>>> mu = np.array([[0.2,0.7],[1.2,0.4]])
Case tensorflow
>>> VAE_KLDivergence()._compute_tf_loss(tf.constant(logvar), tf.constant(mu))
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.17381787, 0.51425203])>
Case pytorch
>>> (VAE_KLDivergence()._create_pytorch_loss())(torch.tensor(logvar), torch.tensor(mu))
tensor([0.1738, 0.5143], dtype=torch.float64)
References
----------
.. [1] Kingma, <NAME>., and <NAME>. "Auto-encoding variational bayes." arXiv preprint arXiv:1312.6114 (2013).
"""
def _compute_tf_loss(self, logvar, mu):
import tensorflow as tf
logvar, mu = _make_tf_shapes_consistent(logvar, mu)
logvar, mu = _ensure_float(logvar, mu)
return 0.5 * tf.reduce_mean(
tf.square(mu) + tf.square(logvar) -
tf.math.log(1e-20 + tf.square(logvar)) - 1, -1)
def _create_pytorch_loss(self):
import torch
def loss(logvar, mu):
logvar, mu = _make_pytorch_shapes_consistent(logvar, mu)
return 0.5 * torch.mean(
torch.square(mu) + torch.square(logvar) -
torch.log(1e-20 + torch.square(logvar)) - 1, -1)
return loss
class ShannonEntropy(Loss):
"""The ShannonEntropy of discrete-distribution.
This loss represents shannon entropy based on _[1].
The inputs should have shape (batch size, num of variable) and represents
probabilites distribution.
Examples
--------
Examples for calculating loss using constant tensor.
batch_size = 2,
num_of variable = variable,
>>> import numpy as np
>>> import torch
>>> import tensorflow as tf
>>> inputs = np.array([[0.7,0.3],[0.9,0.1]])
Case tensorflow
>>> ShannonEntropy()._compute_tf_loss(tf.constant(inputs))
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.30543215, 0.16254149])>
Case pytorch
>>> (ShannonEntropy()._create_pytorch_loss())(torch.tensor(inputs))
tensor([0.3054, 0.1625], dtype=torch.float64)
References
----------
.. [1] Chen, <NAME>. "A Brief Introduction to Shannon’s Information Theory." arXiv preprint arXiv:1612.09316 (2016).
"""
def _compute_tf_loss(self, inputs):
import tensorflow as tf
# extended one of probabilites to binary distribution
if inputs.shape[-1] == 1:
inputs = tf.concat([inputs, 1 - inputs], axis=-1)
return tf.reduce_mean(-inputs * tf.math.log(1e-20 + inputs), -1)
def _create_pytorch_loss(self):
import torch
def loss(inputs):
# extended one of probabilites to binary distribution
if inputs.shape[-1] == 1:
inputs = torch.cat((inputs, 1 - inputs), dim=-1)
return torch.mean(-inputs * torch.log(1e-20 + inputs), -1)
return loss
class GlobalMutualInformationLoss(Loss):
"""
Global-global encoding loss (comparing two full graphs).
Compares the encodings of two molecular graphs and returns the loss between them based on the measure specified.
The encodings are generated by two separate encoders in order to maximize the mutual information between the two encodings.
Parameters:
----------
global_enc: torch.Tensor
Features from a graph convolutional encoder.
global_enc2: torch.Tensor
Another set of features from a graph convolutional encoder.
measure: str
The divergence measure to use for the unsupervised loss. Options are 'GAN', 'JSD', 'KL', 'RKL', 'X2', 'DV', 'H2', or 'W1'.
average_loss: bool
Whether to average the loss over the batch
Returns:
-------
loss: torch.Tensor
Measure of mutual information between the encodings of the two graphs.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME>, “InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Maximization.” arXiv, Jan. 17, 2020. http://arxiv.org/abs/1908.01000
Examples
--------
>>> import numpy as np
>>> import deepchem.models.losses as losses
>>> from deepchem.feat.graph_data import BatchGraphData, GraphData
>>> from deepchem.models.torch_models.infograph import InfoGraphEncoder
>>> from deepchem.models.torch_models.layers import MultilayerPerceptron
>>> graph_list = []
>>> for i in range(3):
... node_features = np.random.rand(5, 10)
... edge_index = np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]], dtype=np.int64)
... edge_features = np.random.rand(5, 5)
... graph_list.append(GraphData(node_features, edge_index, edge_features))
>>> batch = BatchGraphData(graph_list).numpy_to_torch()
>>> num_feat = 10
>>> edge_dim = 5
>>> dim = 4
>>> encoder = InfoGraphEncoder(num_feat, edge_dim, dim)
>>> encoding, feature_map = encoder(batch)
>>> g_enc = MultilayerPerceptron(2 * dim, dim)(encoding)
>>> g_enc2 = MultilayerPerceptron(2 * dim, dim)(encoding)
>>> globalloss = losses.GlobalMutualInformationLoss()
>>> loss = globalloss._create_pytorch_loss()(g_enc, g_enc2).detach().numpy()
"""
def _create_pytorch_loss(self, measure='JSD', average_loss=True):
import torch
def loss(global_enc, global_enc2):
device = global_enc.device
num_graphs = global_enc.shape[0]
pos_mask = torch.eye(num_graphs).to(device)
neg_mask = 1 - pos_mask
res = torch.mm(global_enc, global_enc2.t())
E_pos = get_positive_expectation(res * pos_mask, measure,
average_loss)
E_pos = (E_pos * pos_mask).sum() / pos_mask.sum()
E_neg = get_negative_expectation(res * neg_mask, measure,
average_loss)
E_neg = (E_neg * neg_mask).sum() / neg_mask.sum()
return E_neg - E_pos
return loss
class LocalMutualInformationLoss(Loss):
"""
Local-global encoding loss (comparing a subgraph to the full graph).
Compares the encodings of two molecular graphs and returns the loss between them based on the measure specified.
The encodings are generated by two separate encoders in order to maximize the mutual information between the two encodings.
Parameters:
----------
local_enc: torch.Tensor
Features from a graph convolutional encoder.
global_enc: torch.Tensor
Another set of features from a graph convolutional encoder.
batch_graph_index: graph_index: np.ndarray or torch.tensor, dtype int
This vector indicates which graph the node belongs with shape [num_nodes,]. Only present in BatchGraphData, not in GraphData objects.
measure: str
The divergence measure to use for the unsupervised loss. Options are 'GAN', 'JSD', 'KL', 'RKL', 'X2', 'DV', 'H2', or 'W1'.
average_loss: bool
Whether to average the loss over the batch
Returns:
-------
loss: torch.Tensor
Measure of mutual information between the encodings of the two graphs.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME>, “InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Maximization.” arXiv, Jan. 17, 2020. http://arxiv.org/abs/1908.01000
Example
-------
>>> import numpy as np
>>> import deepchem.models.losses as losses
>>> from deepchem.feat.graph_data import BatchGraphData, GraphData
>>> from deepchem.models.torch_models.infograph import InfoGraphEncoder
>>> from deepchem.models.torch_models.layers import MultilayerPerceptron
>>> graph_list = []
>>> for i in range(3):
... node_features = np.random.rand(5, 10)
... edge_index = np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]], dtype=np.int64)
... edge_features = np.random.rand(5, 5)
... graph_list.append(GraphData(node_features, edge_index, edge_features))
>>> batch = BatchGraphData(graph_list).numpy_to_torch()
>>> num_feat = 10
>>> edge_dim = 5
>>> dim = 4
>>> encoder = InfoGraphEncoder(num_feat, edge_dim, dim)
>>> encoding, feature_map = encoder(batch)
>>> g_enc = MultilayerPerceptron(2 * dim, dim)(encoding)
>>> l_enc = MultilayerPerceptron(dim, dim)(feature_map)
>>> localloss = losses.LocalMutualInformationLoss()
>>> loss = localloss._create_pytorch_loss()(l_enc, g_enc, batch.graph_index).detach().numpy()
"""
def _create_pytorch_loss(self, measure='JSD', average_loss=True):
import torch
def loss(local_enc, global_enc, batch_graph_index):
device = local_enc.device
num_graphs = global_enc.shape[0]
num_nodes = local_enc.shape[0]
pos_mask = torch.zeros((num_nodes, num_graphs)).to(device)
neg_mask = torch.ones((num_nodes, num_graphs)).to(device)
for nodeidx, graphidx in enumerate(batch_graph_index):
pos_mask[nodeidx][graphidx] = 1.
neg_mask[nodeidx][graphidx] = 0.
res = torch.mm(local_enc, global_enc.t())
E_pos = get_positive_expectation(res * pos_mask, measure,
average_loss)
E_pos = (E_pos * pos_mask).sum() / pos_mask.sum()
E_neg = get_negative_expectation(res * neg_mask, measure,
average_loss)
E_neg = (E_neg * neg_mask).sum() / neg_mask.sum()
return E_neg - E_pos
return loss
def get_positive_expectation(p_samples, measure='JSD', average_loss=True):
"""Computes the positive part of a divergence / difference.
Parameters:
----------
p_samples: torch.Tensor
Positive samples.
measure: str
The divergence measure to use for the unsupervised loss. Options are 'GAN', 'JSD', 'KL', 'RKL', 'X2', 'DV', 'H2', or 'W1'.
average: bool
Average the result over samples.
Returns:
-------
Ep: torch.Tensor
Positive part of the divergence / difference.
Example
-------
>>> import numpy as np
>>> import torch
>>> from deepchem.models.losses import get_positive_expectation
>>> p_samples = torch.tensor([0.5, 1.0, -0.5, -1.0])
>>> measure = 'JSD'
>>> result = get_positive_expectation(p_samples, measure)
"""
import math
import torch
log_2 = math.log(2.)
if measure == 'GAN':
Ep = -torch.nn.functional.softplus(-p_samples)
elif measure == 'JSD':
Ep = log_2 - torch.nn.functional.softplus(-p_samples)
elif measure == 'X2':
Ep = p_samples**2
elif measure == 'KL':
Ep = p_samples + 1.
elif measure == 'RKL':
Ep = -torch.exp(-p_samples)
elif measure == 'DV':
Ep = p_samples
elif measure == 'H2':
Ep = 1. - torch.exp(-p_samples)
elif measure == 'W1':
Ep = p_samples
else:
raise ValueError('Unknown measure: {}'.format(measure))
if average_loss:
return Ep.mean()
else:
return Ep
def get_negative_expectation(q_samples, measure='JSD', average_loss=True):
"""Computes the negative part of a divergence / difference.
Parameters:
----------
q_samples: torch.Tensor
Negative samples.
measure: str
average: bool
Average the result over samples.
Returns:
-------
Ep: torch.Tensor
Negative part of the divergence / difference.
Example
-------
>>> import numpy as np
>>> import torch
>>> from deepchem.models.losses import get_negative_expectation
>>> q_samples = torch.tensor([0.5, 1.0, -0.5, -1.0])
>>> measure = 'JSD'
>>> result = get_negative_expectation(q_samples, measure)
"""
import math
import torch
log_2 = math.log(2.)
if measure == 'GAN':
Eq = torch.nn.functional.softplus(-q_samples) + q_samples
elif measure == 'JSD':
Eq = torch.nn.functional.softplus(-q_samples) + q_samples - log_2
elif measure == 'X2':
Eq = -0.5 * ((torch.sqrt(q_samples**2) + 1.)**2)
elif measure == 'KL':
Eq = torch.exp(q_samples)
elif measure == 'RKL':
Eq = q_samples - 1.
elif measure == 'DV':
Eq = log_sum_exp(q_samples, 0) - math.log(q_samples.size(0))
elif measure == 'H2':
Eq = torch.exp(q_samples) - 1.
elif measure == 'W1':
Eq = q_samples
else:
raise ValueError('Unknown measure: {}'.format(measure))
if average_loss:
return Eq.mean()
else:
return Eq
def log_sum_exp(x, axis=None):
"""Log sum exp function.
Parameters
----------
x: torch.Tensor
Input tensor
axis: int
Axis to perform sum over
Returns
-------
y: torch.Tensor
Log sum exp of x
"""
import torch
x_max = torch.max(x, axis)[0]
y = torch.log((torch.exp(x - x_max)).sum(axis)) + x_max
return y
class GroverPretrainLoss(Loss):
"""GroverPretrainLoss
The Grover Pretraining consists learning of atom embeddings and bond embeddings for
a molecule. To this end, the learning consists of three tasks:
1. Learning of atom vocabulary from atom embeddings and bond embeddings
2. Learning of bond vocabulary from atom embeddings and bond embeddings
3. Learning to predict functional groups from atom embedings readout and bond embeddings readout
The loss function accepts atom vocabulary labels, bond vocabulary labels and functional group
predictions produced by Grover model during pretraining as a dictionary and applies negative
log-likelihood loss for atom vocabulary and bond vocabulary predictions and Binary Cross Entropy
loss for functional group prediction and sums these to get overall loss.
Example
-------
>>> import torch
>>> from deepchem.models.losses import GroverPretrainLoss
>>> loss = GroverPretrainLoss()
>>> loss_fn = loss._create_pytorch_loss()
>>> batch_size = 3
>>> output_dim = 10
>>> fg_size = 8
>>> atom_vocab_task_target = torch.ones(batch_size).type(torch.int64)
>>> bond_vocab_task_target = torch.ones(batch_size).type(torch.int64)
>>> fg_task_target = torch.ones(batch_size, fg_size)
>>> atom_vocab_task_atom_pred = torch.zeros(batch_size, output_dim)
>>> bond_vocab_task_atom_pred = torch.zeros(batch_size, output_dim)
>>> atom_vocab_task_bond_pred = torch.zeros(batch_size, output_dim)
>>> bond_vocab_task_bond_pred = torch.zeros(batch_size, output_dim)
>>> fg_task_atom_from_atom = torch.zeros(batch_size, fg_size)
>>> fg_task_atom_from_bond = torch.zeros(batch_size, fg_size)
>>> fg_task_bond_from_atom = torch.zeros(batch_size, fg_size)
>>> fg_task_bond_from_bond = torch.zeros(batch_size, fg_size)
>>> result = loss_fn(atom_vocab_task_atom_pred, atom_vocab_task_bond_pred,
... bond_vocab_task_atom_pred, bond_vocab_task_bond_pred, fg_task_atom_from_atom,
... fg_task_atom_from_bond, fg_task_bond_from_atom, fg_task_bond_from_bond,
... atom_vocab_task_target, bond_vocab_task_target, fg_task_target)
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
def _create_pytorch_loss(self):
import torch
import torch.nn as nn
def loss(atom_vocab_task_atom_pred: torch.Tensor,
atom_vocab_task_bond_pred: torch.Tensor,
bond_vocab_task_atom_pred: torch.Tensor,
bond_vocab_task_bond_pred: torch.Tensor,
fg_task_atom_from_atom: torch.Tensor,
fg_task_atom_from_bond: torch.Tensor,
fg_task_bond_from_atom: torch.Tensor,
fg_task_bond_from_bond: torch.Tensor,
atom_vocab_task_target: torch.Tensor,
bond_vocab_task_target: torch.Tensor,
fg_task_target: torch.Tensor,
weights: Optional[List[Sequence]] = None,
dist_coff=0.1):
"""
Parameters
----------
atom_vocab_task_atom_pred: torch.Tensor
Atom vocabulary prediction from atom embedding
atom_vocab_task_bond_pred: torch.Tensor
Atom vocabulary prediction from bond embedding
bond_vocab_task_atom_pred: torch.Tensor
Bond vocabulary prediction from atom embedding
bond_vocab_task_bond_pred: torch.Tensor
Bond vocabulary prediction from bond embedding
fg_task_atom_from_atom: torch.Tensor
Functional group prediction from atom embedding readout generated from atom embedding
fg_task_atom_from_bond: torch.Tensor
Functional group prediction from atom embedding readout generated from bond embedding
fg_task_bond_from_atom: torch.Tensor
Functional group prediction from bond embedding readout generated from atom embedding
fg_task_bond_from_bond: torch.Tensor
Functional group prediction from bond embedding readout generated from bond embedding
atom_vocab_task_target: torch.Tensor
Targets for atom vocabulary prediction
bond_vocab_task_target: torch.Tensor
Targets for bond vocabulary prediction
fg_task_target: torch.Tensor
Targets for functional groups
dist_coff: float, default 0.1
Loss term weight for weighting closeness between embedding generated from atom hidden state and bond hidden state in atom vocabulary and bond vocabulary prediction tasks.
Returns
-------
loss: torch.Tensor
loss value
"""
av_task_loss = nn.NLLLoss(reduction="mean") # same for av and bv
fg_task_loss = nn.BCEWithLogitsLoss(reduction="mean")
av_task_dist_loss = nn.MSELoss(reduction="mean")
fg_task_dist_loss = nn.MSELoss(reduction="mean")
sigmoid = nn.Sigmoid()
av_atom_loss = av_task_loss(atom_vocab_task_atom_pred,
atom_vocab_task_target)
av_bond_loss = av_task_loss(atom_vocab_task_bond_pred,
atom_vocab_task_target)
bv_atom_loss = av_task_loss(bond_vocab_task_atom_pred,
bond_vocab_task_target)
bv_bond_loss = av_task_loss(bond_vocab_task_bond_pred,
bond_vocab_task_target)
fg_atom_from_atom_loss = fg_task_loss(fg_task_atom_from_atom,
fg_task_target)
fg_atom_from_bond_loss = fg_task_loss(fg_task_atom_from_bond,
fg_task_target)
fg_bond_from_atom_loss = fg_task_loss(fg_task_bond_from_atom,
fg_task_target)
fg_bond_from_bond_loss = fg_task_loss(fg_task_bond_from_bond,
fg_task_target)
av_dist_loss = av_task_dist_loss(atom_vocab_task_atom_pred,
atom_vocab_task_bond_pred)
bv_dist_loss = av_task_dist_loss(bond_vocab_task_atom_pred,
bond_vocab_task_bond_pred)
fg_atom_dist_loss = fg_task_dist_loss(
sigmoid(fg_task_atom_from_atom),
sigmoid(fg_task_atom_from_bond))
fg_bond_dist_loss = fg_task_dist_loss(
sigmoid(fg_task_bond_from_atom),
sigmoid(fg_task_bond_from_bond))
av_bv_loss = av_atom_loss + av_bond_loss + bv_atom_loss + bv_bond_loss
fg_loss = fg_atom_from_atom_loss + fg_atom_from_bond_loss + fg_bond_from_atom_loss + fg_bond_from_bond_loss
fg_dist_loss = fg_atom_dist_loss + fg_bond_dist_loss
# NOTE The below comment is from original source code
# dist_loss = av_dist_loss + bv_dist_loss + fg_dist_loss
# return av_loss + fg_loss + dist_coff * dist_loss
overall_loss = av_bv_loss + fg_loss + dist_coff * (
av_dist_loss + bv_dist_loss + fg_dist_loss)
# return overall_loss, av_loss, bv_loss, fg_loss, av_dist_loss, bv_dist_loss, fg_dist_loss
# We just return overall_loss since TorchModel can handle only a single loss
return overall_loss
return loss
class EdgePredictionLoss(Loss):
"""
EdgePredictionLoss is an unsupervised graph edge prediction loss function that calculates the loss based on the similarity between node embeddings for positive and negative edge pairs. This loss function is designed for graph neural networks and is particularly useful for pre-training tasks.
This loss function encourages the model to learn node embeddings that can effectively distinguish between true edges (positive samples) and false edges (negative samples) in the graph.
The loss is computed by comparing the similarity scores (dot product) of node embeddings for positive and negative edge pairs. The goal is to maximize the similarity for positive pairs and minimize it for negative pairs.
To use this loss function, the input must be a BatchGraphData object transformed by the negative_edge_sampler. The loss function takes the node embeddings and the input graph data (with positive and negative edge pairs) as inputs and returns the edge prediction loss.
Examples
--------
>>> from deepchem.models.losses import EdgePredictionLoss
>>> from deepchem.feat.graph_data import BatchGraphData, GraphData
>>> from deepchem.models.torch_models.gnn import negative_edge_sampler
>>> import torch
>>> import numpy as np
>>> emb_dim = 8
>>> num_nodes_list, num_edge_list = [3, 4, 5], [2, 4, 5]
>>> num_node_features, num_edge_features = 32, 32
>>> edge_index_list = [
... np.array([[0, 1], [1, 2]]),
... np.array([[0, 1, 2, 3], [1, 2, 0, 2]]),
... np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]),
... ]
>>> graph_list = [
... GraphData(node_features=np.random.random_sample(
... (num_nodes_list[i], num_node_features)),
... edge_index=edge_index_list[i],
... edge_features=np.random.random_sample(
... (num_edge_list[i], num_edge_features)),
... node_pos_features=None) for i in range(len(num_edge_list))
... ]
>>> batched_graph = BatchGraphData(graph_list)
>>> batched_graph = batched_graph.numpy_to_torch()
>>> neg_sampled = negative_edge_sampler(batched_graph)
>>> embedding = np.random.random((sum(num_nodes_list), emb_dim))
>>> embedding = torch.from_numpy(embedding)
>>> loss_func = EdgePredictionLoss()._create_pytorch_loss()
>>> loss = loss_func(embedding, neg_sampled)
References
----------
.. [1] <NAME>. et al. Strategies for Pre-training Graph Neural Networks. Preprint at https://doi.org/10.48550/arXiv.1905.12265 (2020).
"""
def _create_pytorch_loss(self):
import torch
self.criterion = torch.nn.BCEWithLogitsLoss()
def loss(node_emb, inputs):
positive_score = torch.sum(node_emb[inputs.edge_index[0, ::2]] *
node_emb[inputs.edge_index[1, ::2]],
dim=1)
negative_score = torch.sum(node_emb[inputs.negative_edge_index[0]] *
node_emb[inputs.negative_edge_index[1]],
dim=1)
edge_pred_loss = self.criterion(
positive_score,
torch.ones_like(positive_score)) + self.criterion(
negative_score, torch.zeros_like(negative_score))
return edge_pred_loss
return loss
class GraphNodeMaskingLoss(Loss):
"""
GraphNodeMaskingLoss is an unsupervised graph node masking loss function that calculates the loss based on the predicted node labels and true node labels. This loss function is designed for graph neural networks and is particularly useful for pre-training tasks.
This loss function encourages the model to learn node embeddings that can effectively predict the masked node labels in the graph.
The loss is computed using the CrossEntropyLoss between the predicted node labels and the true node labels.
To use this loss function, the input must be a BatchGraphData object transformed by the mask_nodes function. The loss function takes the predicted node labels, predicted edge labels, and the input graph data (with masked node labels) as inputs and returns the node masking loss.
Parameters
----------
pred_node: torch.Tensor
Predicted node labels
pred_edge: Optional(torch.Tensor)
Predicted edge labels
inputs: BatchGraphData
Input graph data with masked node and edge labels
Examples
--------
>>> from deepchem.models.losses import GraphNodeMaskingLoss
>>> from deepchem.feat.graph_data import BatchGraphData, GraphData
>>> from deepchem.models.torch_models.gnn import mask_nodes
>>> import torch
>>> import numpy as np
>>> num_nodes_list, num_edge_list = [3, 4, 5], [2, 4, 5]
>>> num_node_features, num_edge_features = 32, 32
>>> edge_index_list = [
... np.array([[0, 1], [1, 2]]),
... np.array([[0, 1, 2, 3], [1, 2, 0, 2]]),
... np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]),
... ]
>>> graph_list = [
... GraphData(node_features=np.random.random_sample(
... (num_nodes_list[i], num_node_features)),
... edge_index=edge_index_list[i],
... edge_features=np.random.random_sample(
... (num_edge_list[i], num_edge_features)),
... node_pos_features=None) for i in range(len(num_edge_list))
... ]
>>> batched_graph = BatchGraphData(graph_list)
>>> batched_graph = batched_graph.numpy_to_torch()
>>> masked_graph = mask_nodes(batched_graph, 0.1)
>>> pred_node = torch.randn((sum(num_nodes_list), num_node_features))
>>> pred_edge = torch.randn((sum(num_edge_list), num_edge_features))
>>> loss_func = GraphNodeMaskingLoss()._create_pytorch_loss()
>>> loss = loss_func(pred_node[masked_graph.masked_node_indices],
... pred_edge[masked_graph.connected_edge_indices], masked_graph)
References
----------
.. [1] <NAME>. et al. Strategies for Pre-training Graph Neural Networks. Preprint at https://doi.org/10.48550/arXiv.1905.12265 (2020).
"""
def _create_pytorch_loss(self, mask_edge=True):
import torch
self.mask_edge = mask_edge
self.criterion = torch.nn.CrossEntropyLoss()
def loss(pred_node, pred_edge, inputs):
# loss for nodes
loss = self.criterion(pred_node, inputs.mask_node_label)
if self.mask_edge:
loss += self.criterion(pred_edge, inputs.mask_edge_label)
return loss
return loss
class GraphEdgeMaskingLoss(Loss):
"""
GraphEdgeMaskingLoss is an unsupervised graph edge masking loss function that calculates the loss based on the predicted edge labels and true edge labels. This loss function is designed for graph neural networks and is particularly useful for pre-training tasks.
This loss function encourages the model to learn node embeddings that can effectively predict the masked edge labels in the graph.
The loss is computed using the CrossEntropyLoss between the predicted edge labels and the true edge labels.
To use this loss function, the input must be a BatchGraphData object transformed by the mask_edges function. The loss function takes the predicted edge labels and the true edge labels as inputs and returns the edge masking loss.
Parameters
----------
pred_edge: torch.Tensor
Predicted edge labels.
inputs: BatchGraphData
Input graph data (with masked edge labels).
Examples
--------
>>> from deepchem.models.losses import GraphEdgeMaskingLoss
>>> from deepchem.feat.graph_data import BatchGraphData, GraphData
>>> from deepchem.models.torch_models.gnn import mask_edges
>>> import torch
>>> import numpy as np
>>> num_nodes_list, num_edge_list = [3, 4, 5], [2, 4, 5]
>>> num_node_features, num_edge_features = 32, 32
>>> edge_index_list = [
... np.array([[0, 1], [1, 2]]),
... np.array([[0, 1, 2, 3], [1, 2, 0, 2]]),
... np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]),
... ]
>>> graph_list = [
... GraphData(node_features=np.random.random_sample(
... (num_nodes_list[i], num_node_features)),
... edge_index=edge_index_list[i],
... edge_features=np.random.random_sample(
... (num_edge_list[i], num_edge_features)),
... node_pos_features=None) for i in range(len(num_edge_list))
... ]
>>> batched_graph = BatchGraphData(graph_list)
>>> batched_graph = batched_graph.numpy_to_torch()
>>> masked_graph = mask_edges(batched_graph, .1)
>>> pred_edge = torch.randn((sum(num_edge_list), num_edge_features))
>>> loss_func = GraphEdgeMaskingLoss()._create_pytorch_loss()
>>> loss = loss_func(pred_edge[masked_graph.masked_edge_idx], masked_graph)
References
----------
.. [1] <NAME>. et al. Strategies for Pre-training Graph Neural Networks. Preprint at https://doi.org/10.48550/arXiv.1905.12265 (2020).
"""
def _create_pytorch_loss(self):
import torch
self.criterion = torch.nn.CrossEntropyLoss()
def loss(pred_edge, inputs):
# converting the binary classification to multiclass classification
labels = torch.argmax(inputs.mask_edge_label, dim=1)
loss = self.criterion(pred_edge, labels)
return loss
return loss
class DeepGraphInfomaxLoss(Loss):
"""
Loss that maximizes mutual information between local node representations and a pooled global graph representation. This is to encourage nearby nodes to have similar embeddings.
Parameters
----------
positive_score: torch.Tensor
Positive score. This score measures the similarity between the local node embeddings (`node_emb`) and the global graph representation (`positive_expanded_summary_emb`) derived from the same graph.
The goal is to maximize this score, as it indicates that the local node embeddings and the global graph representation are highly correlated, capturing the mutual information between them.
negative_score: torch.Tensor
Negative score. This score measures the similarity between the local node embeddings (`node_emb`) and the global graph representation (`negative_expanded_summary_emb`) derived from a different graph (shifted by one position in this case).
The goal is to minimize this score, as it indicates that the local node embeddings and the global graph representation from different graphs are not correlated, ensuring that the model learns meaningful representations that are specific to each graph.
Examples
--------
>>> import torch
>>> import numpy as np
>>> from deepchem.feat.graph_data import GraphData
>>> from torch_geometric.nn import global_mean_pool
>>> from deepchem.models.losses import DeepGraphInfomaxLoss
>>> x = np.array([[1, 0], [0, 1], [1, 1], [0, 0]])
>>> edge_index = np.array([[0, 1, 2, 0, 3], [1, 0, 1, 3, 2]])
>>> graph_index = np.array([0, 0, 1, 1])
>>> data = GraphData(node_features=x, edge_index=edge_index, graph_index=graph_index).numpy_to_torch()
>>> graph_infomax_loss = DeepGraphInfomaxLoss()._create_pytorch_loss()
>>> # Initialize node_emb randomly
>>> num_nodes = data.num_nodes
>>> embedding_dim = 8
>>> node_emb = torch.randn(num_nodes, embedding_dim)
>>> # Compute the global graph representation
>>> summary_emb = global_mean_pool(node_emb, data.graph_index)
>>> # Compute positive and negative scores
>>> positive_score = torch.matmul(node_emb, summary_emb.t())
>>> negative_score = torch.matmul(node_emb, summary_emb.roll(1, dims=0).t())
>>> loss = graph_infomax_loss(positive_score, negative_score)
References
----------
.. [1] <NAME> al. Deep Graph Infomax. Preprint at https://doi.org/10.48550/arXiv.1809.10341 (2018).
"""
def _create_pytorch_loss(self):
import torch
self.criterion = torch.nn.BCEWithLogitsLoss()
def loss(positive_score, negative_score):
return self.criterion(
positive_score,
torch.ones_like(positive_score)) + self.criterion(
negative_score, torch.zeros_like(negative_score))
return loss
class GraphContextPredLoss(Loss):
"""
GraphContextPredLoss is a loss function designed for graph neural networks that aims to predict the context of a node given its substructure. The context of a node is essentially the ring of nodes around it outside of an inner k1-hop diameter and inside an outer k2-hop diameter.
This loss compares the representation of a node's neighborhood with the representation of the node's context. It then uses negative sampling to compare the representation of the node's neighborhood with the representation of a random node's context.
Parameters
----------
mode: str
The mode of the model. It can be either "cbow" (continuous bag of words) or "skipgram".
neg_samples: int
The number of negative samples to use for negative sampling.
Examples
--------
>>> import torch
>>> from deepchem.models.losses import GraphContextPredLoss
>>> substruct_rep = torch.randn(4, 8)
>>> overlapped_node_rep = torch.randn(8, 8)
>>> context_rep = torch.randn(4, 8)
>>> neg_context_rep = torch.randn(2 * 4, 8)
>>> overlapped_context_size = torch.tensor([2, 2, 2, 2])
>>> mode = "cbow"
>>> neg_samples = 2
>>> graph_context_pred_loss = GraphContextPredLoss()._create_pytorch_loss(mode, neg_samples)
>>> loss = graph_context_pred_loss(substruct_rep, overlapped_node_rep, context_rep, neg_context_rep, overlapped_context_size)
"""
def _create_pytorch_loss(self, mode, neg_samples):
import torch
from deepchem.models.torch_models.gnn import cycle_index
self.mode = mode
self.neg_samples = neg_samples
self.criterion = torch.nn.BCEWithLogitsLoss()
def loss(substruct_rep, overlapped_node_rep, context_rep,
neg_context_rep, overlap_size):
if self.mode == "cbow":
# positive context prediction is the dot product of substructure representation and true context representation
pred_pos = torch.sum(substruct_rep * context_rep, dim=1)
# negative context prediction is the dot product of substructure representation and negative (random) context representation.
pred_neg = torch.sum(substruct_rep.repeat(
(self.neg_samples, 1)) * neg_context_rep,
dim=1)
elif self.mode == "skipgram":
expanded_substruct_rep = torch.cat(
[substruct_rep[i].repeat((i, 1)) for i in overlap_size],
dim=0)
# positive substructure prediction is the dot product of expanded substructure representation and true overlapped node representation.
pred_pos = torch.sum(expanded_substruct_rep *
overlapped_node_rep,
dim=1)
# shift indices of substructures to create negative examples
shifted_expanded_substruct_rep = []
for j in range(self.neg_samples):
shifted_substruct_rep = substruct_rep[cycle_index(
len(substruct_rep), j + 1)]
shifted_expanded_substruct_rep.append(
torch.cat([
shifted_substruct_rep[i].repeat((i, 1))
for i in overlap_size
],
dim=0))
shifted_expanded_substruct_rep = torch.cat(
shifted_expanded_substruct_rep, dim=0)
# negative substructure prediction is the dot product of shifted expanded substructure representation and true overlapped node representation.
pred_neg = torch.sum(shifted_expanded_substruct_rep *
overlapped_node_rep.repeat(
(self.neg_samples, 1)),
dim=1)
else:
raise ValueError(
"Invalid mode. Must be either cbow or skipgram.")
# Compute the loss for positive and negative context representations
loss_pos = self.criterion(
pred_pos.double(),
torch.ones(len(pred_pos)).to(pred_pos.device).double())
loss_neg = self.criterion(
pred_neg.double(),
torch.zeros(len(pred_neg)).to(pred_neg.device).double())
# The final loss is the sum of positive and negative context losses
loss = loss_pos + self.neg_samples * loss_neg
return loss
return loss
class DensityProfileLoss(Loss):
"""
Loss for the density profile entry type for Quantum Chemistry calculations.
It is an integration of the squared difference between ground truth and calculated
values, at all spaces in the integration grid.
Examples
--------
>>> from deepchem.models.losses import DensityProfileLoss
>>> import torch
>>> volume = torch.Tensor([2.0])
>>> output = torch.Tensor([3.0])
>>> labels = torch.Tensor([4.0])
>>> loss = (DensityProfileLoss()._create_pytorch_loss(volume))(output, labels)
>>> # Generating volume tensor for an entry object:
>>> from deepchem.feat.dft_data import DFTEntry
>>> e_type = 'dens'
>>> true_val = 0
>>> systems =[{'moldesc': 'H 0.86625 0 0; F -0.86625 0 0','basis' : '6-311++G(3df,3pd)'}]
>>> dens_entry_for_HF = DFTEntry.create(e_type, true_val, systems)
>>> grid = (dens_entry_for_HF).get_integration_grid()
>>> volume = grid.get_dvolume()
References
----------
Kasim, <NAME>., and <NAME>. "Learning the exchange-correlation
functional from nature with fully differentiable density functional
theory." Physical Review Letters 127.12 (2021): 126403.
https://github.com/deepchem/deepchem/blob/0bc3139bb99ae7700ba2325a6756e33b6c327842/deepchem/models/dft/dftxc.py
"""
def _create_pytorch_loss(self, volume):
"""
Parameters
----------
volume: torch.Tensor
Shape of the tensor depends on the molecule/crystal and the integration grid
"""
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.sum((labels - output)**2 * volume)
return loss
class NTXentMultiplePositives(Loss):
"""
This is a modification of the NTXent loss function from Chen [1]_. This loss is designed for contrastive learning of molecular representations, comparing the similarity of a molecule's latent representation to positive and negative samples.
The modifications proposed in [2]_ enable multiple conformers to be used as positive samples.
This loss function is designed for graph neural networks and is particularly useful for unsupervised pre-training tasks.
Parameters
----------
norm : bool, optional (default=True)
Whether to normalize the similarity matrix.
tau : float, optional (default=0.5)
Temperature parameter for the similarity matrix.
uniformity_reg : float, optional (default=0)
Regularization weight for the uniformity loss.
variance_reg : float, optional (default=0)
Regularization weight for the variance loss.
covariance_reg : float, optional (default=0)
Regularization weight for the covariance loss.
conformer_variance_reg : float, optional (default=0)
Regularization weight for the conformer variance loss.
Examples
--------
>>> import torch
>>> from deepchem.models.losses import NTXentMultiplePositives
>>> z1 = torch.randn(4, 8)
>>> z2 = torch.randn(4 * 3, 8)
>>> ntxent_loss = NTXentMultiplePositives(norm=True, tau=0.5)
>>> loss_fn = ntxent_loss._create_pytorch_loss()
>>> loss = loss_fn(z1, z2)
References
----------
.. [1] <NAME>., <NAME>., <NAME>. & <NAME>. A Simple Framework for Contrastive Learning of Visual Representations. Preprint at https://doi.org/10.48550/arXiv.2002.05709 (2020).
.. [2] <NAME>. et al. 3D Infomax improves GNNs for Molecular Property Prediction. Preprint at https://doi.org/10.48550/arXiv.2110.04126 (2022).
"""
def __init__(self,
norm: bool = True,
tau: float = 0.5,
uniformity_reg=0,
variance_reg=0,
covariance_reg=0,
conformer_variance_reg=0) -> None:
super(NTXentMultiplePositives, self).__init__()
self.norm = norm
self.tau = tau
self.uniformity_reg = uniformity_reg
self.variance_reg = variance_reg
self.covariance_reg = covariance_reg
self.conformer_variance_reg = conformer_variance_reg
def _create_pytorch_loss(self):
import torch
from torch import Tensor
def std_loss(x: Tensor) -> Tensor:
"""
Compute the standard deviation loss.
Parameters
----------
x : torch.Tensor
Input tensor.
Returns
-------
loss : torch.Tensor
The standard deviation loss.
"""
std = torch.sqrt(x.var(dim=0) + 1e-04)
return torch.mean(torch.relu(1 - std))
def uniformity_loss(x1: Tensor, x2: Tensor, t=2) -> Tensor:
"""
Compute the uniformity loss.
Parameters
----------
x1 : torch.Tensor
First input tensor.
x2 : torch.Tensor
Second input tensor.
t : int, optional (default=2)
Exponent for the squared Euclidean distance.
Returns
-------
loss : torch.Tensor
The uniformity loss.
"""
sq_pdist_x1 = torch.pdist(x1, p=2).pow(2)
uniformity_x1 = sq_pdist_x1.mul(-t).exp().mean().log()
sq_pdist_x2 = torch.pdist(x2, p=2).pow(2)
uniformity_x2 = sq_pdist_x2.mul(-t).exp().mean().log()
return (uniformity_x1 + uniformity_x2) / 2
def cov_loss(x: Tensor) -> Tensor:
"""
Compute the covariance loss.
Parameters
----------
x : torch.Tensor
Input tensor.
Returns
-------
loss : torch.Tensor
The covariance loss.
"""
batch_size, metric_dim = x.size()
x = x - x.mean(dim=0)
cov = (x.T @ x) / (batch_size - 1)
off_diag_cov = cov.flatten()[:-1].view(metric_dim - 1, metric_dim +
1)[:, 1:].flatten()
return off_diag_cov.pow_(2).sum() / metric_dim
def loss(z1: Tensor, z2: Tensor) -> Tensor:
"""
Compute the NTXentMultiplePositives loss.
Parameters
----------
z1 : torch.Tensor
First input tensor with shape (batch_size, metric_dim).
z2 : torch.Tensor
Second input tensor with shape (batch_size * num_conformers, metric_dim).
Returns
-------
loss : torch.Tensor
The NTXentMultiplePositives loss.
"""
batch_size, metric_dim = z1.size()
z2 = z2.view(batch_size, -1,
metric_dim) # [batch_size, num_conformers, metric_dim]
z2 = z2.view(batch_size, -1,
metric_dim) # [batch_size, num_conformers, metric_dim]
sim_matrix = torch.einsum(
'ik,juk->iju', z1,
z2) # [batch_size, batch_size, num_conformers]
if self.norm:
z1_abs = z1.norm(dim=1)
z2_abs = z2.norm(dim=2)
sim_matrix = sim_matrix / torch.einsum('i,ju->iju', z1_abs,
z2_abs)
sim_matrix = torch.exp(
sim_matrix /
self.tau) # [batch_size, batch_size, num_conformers]
sim_matrix = sim_matrix.sum(dim=2) # [batch_size, batch_size]
pos_sim = torch.diagonal(sim_matrix) # [batch_size]
loss = pos_sim / (sim_matrix.sum(dim=1) - pos_sim)
loss = -torch.log(loss).mean()
if self.variance_reg > 0:
loss += self.variance_reg * (std_loss(z1) + std_loss(z2))
if self.conformer_variance_reg > 0:
std = torch.sqrt(z2.var(dim=1) + 1e-04)
std_conf_loss = torch.mean(torch.relu(1 - std))
loss += self.conformer_variance_reg * std_conf_loss
if self.covariance_reg > 0:
loss += self.covariance_reg * (cov_loss(z1) + cov_loss(z2))
if self.uniformity_reg > 0:
loss += self.uniformity_reg * uniformity_loss(z1, z2)
return loss
return loss
def _make_tf_shapes_consistent(output, labels):
"""Try to make inputs have the same shape by adding dimensions of size 1."""
import tensorflow as tf
shape1 = output.shape
shape2 = labels.shape
len1 = len(shape1)
len2 = len(shape2)
if len1 == len2:
return (output, labels)
if isinstance(shape1, tf.TensorShape):
shape1 = tuple(shape1.as_list())
if isinstance(shape2, tf.TensorShape):
shape2 = tuple(shape2.as_list())
if len1 > len2 and all(i == 1 for i in shape1[len2:]):
for i in range(len1 - len2):
labels = tf.expand_dims(labels, -1)
return (output, labels)
if len2 > len1 and all(i == 1 for i in shape2[len1:]):
for i in range(len2 - len1):
output = tf.expand_dims(output, -1)
return (output, labels)
raise ValueError(
"Incompatible shapes for outputs and labels: %s versus %s" %
(str(shape1), str(shape2)))
def _make_pytorch_shapes_consistent(output, labels):
"""Try to make inputs have the same shape by adding dimensions of size 1."""
import torch
shape1 = output.shape
shape2 = labels.shape
len1 = len(shape1)
len2 = len(shape2)
if len1 == len2:
return (output, labels)
shape1 = tuple(shape1)
shape2 = tuple(shape2)
if len1 > len2 and all(i == 1 for i in shape1[len2:]):
for i in range(len1 - len2):
labels = torch.unsqueeze(labels, -1)
return (output, labels)
if len2 > len1 and all(i == 1 for i in shape2[len1:]):
for i in range(len2 - len1):
output = torch.unsqueeze(output, -1)
return (output, labels)
raise ValueError(
"Incompatible shapes for outputs and labels: %s versus %s" %
(str(shape1), str(shape2)))
def _ensure_float(output, labels):
"""Make sure the outputs and labels are both floating point types."""
import tensorflow as tf
if output.dtype not in (tf.float32, tf.float64):
output = tf.cast(output, tf.float32)
if labels.dtype not in (tf.float32, tf.float64):
labels = tf.cast(labels, tf.float32)
return (output, labels)
<file_sep># This script creates the new deepchem enviroment
# This script works on only Bash and Zsh
set -e # Exit if any command fails.
CMDNAME=`basename ${BASH_SOURCE:-$0}`
if [ $# -ne 2 ]; then
echo "Please set two arguments."
echo "Usage) source $CMDNAME python_version cpu_or_gpu" 1>&2
echo "Example) source $CMDNAME 3.6 gpu" 1>&2
return 1
fi
# This command is nearly equal to `conda init` command
# Need to use `conda activate` command
eval "$(conda shell.bash hook)"
# Create deepchem environment
conda config --set always_yes yes
conda create --name deepchem python=$1
conda install -c conda-forge conda-merge
dir="$PWD/requirements"
if [ "$2" = "gpu" ];
then
# We expect the CUDA vesion is 10.1.
conda-merge $dir/env_common.yml $dir/torch/env_torch.gpu.yml $dir/env_test.yml $dir/jax/env_jax.gpu.yml > $PWD/env.yml
echo "Installing DeepChem in the GPU environment"
else
if [ "$(uname)" = 'Darwin' ]; then
conda-merge $dir/env_common.yml $dir/env_mac.yml $dir/env_test.yml $dir/tensorflow/env_tensorflow.cpu.yml $dir/torch/env_torch.mac.cpu.yml $dir/jax/env_jax.cpu.yml > $PWD/env.yml
elif [ "$(uname)" = 'Linux' ]; then
conda-merge $dir/env_common.yml $dir/env_test.yml $dir/env_ubuntu.yml $dir/tensorflow/env_tensorflow.cpu.yml $dir/torch/env_torch.cpu.yml $dir/jax/env_jax.cpu.yml > $PWD/env.yml
fi
echo "Installing DeepChem in the CPU environment"
fi
# Install all dependencies
conda env update --file $PWD/env.yml
<file_sep>"""
Imports all submodules
"""
# If you push the tag, please remove `.dev`
__version__ = '2.7.2.dev'
import deepchem.data
import deepchem.feat
import deepchem.hyper
import deepchem.metalearning
import deepchem.metrics
import deepchem.models
import deepchem.splits
import deepchem.trans
import deepchem.utils
import deepchem.dock
import deepchem.molnet
import deepchem.rl
<file_sep>"""
Contains wrapper class for datasets.
"""
import json
import os
import csv
import math
import random
import logging
import tempfile
import time
import shutil
import multiprocessing
from multiprocessing.dummy import Pool
from ast import literal_eval as make_tuple
from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
from numpy.typing import ArrayLike
import pandas as pd
import deepchem as dc
from deepchem.utils.typing import OneOrMany, Shape
from deepchem.utils.data_utils import save_to_disk, load_from_disk, load_image_files
Batch = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
logger = logging.getLogger(__name__)
def sparsify_features(X: np.ndarray) -> np.ndarray:
"""Extracts a sparse feature representation from dense feature array.
Parameters
----------
X: np.ndarray
A numpy array of shape `(n_samples, ...)`.
Returns
-------
X_sparse: np.ndarray
A numpy array with `dtype=object` where `X_sparse[i]` is a
typle of `(nonzero_inds, nonzero_vals)` with nonzero indices and
values in the i-th sample of `X`.
"""
n_samples = len(X)
X_sparse = []
for i in range(n_samples):
nonzero_inds = np.nonzero(X[i])[0]
nonzero_vals = X[i][nonzero_inds]
X_sparse.append((nonzero_inds, nonzero_vals))
return np.array(X_sparse, dtype=object)
def densify_features(X_sparse: np.ndarray, num_features: int) -> np.ndarray:
"""Expands sparse feature representation to dense feature array.
Assumes that the sparse representation was constructed from an array
which had original shape `(n_samples, num_features)` so doesn't
support reconstructing multidimensional dense arrays.
Parameters
----------
X_sparse: np.ndarray
Must have `dtype=object`. `X_sparse[i]` must be a tuple of nonzero
indices and values.
num_features: int
Number of features in dense array.
Returns
-------
X: np.ndarray
A numpy array of shape `(n_samples, num_features)`.
"""
n_samples = len(X_sparse)
X = np.zeros((n_samples, num_features))
for i in range(n_samples):
nonzero_inds, nonzero_vals = X_sparse[i]
X[i][nonzero_inds.astype(int)] = nonzero_vals
return X
def pad_features(batch_size: int, X_b: np.ndarray) -> np.ndarray:
"""Pads a batch of features to have precisely batch_size elements.
Given an array of features with length less than or equal to
batch-size, pads it to `batch_size` length. It does this by
repeating the original features in tiled fashion. For illustration,
suppose that `len(X_b) == 3` and `batch_size == 10`.
>>> X_b = np.arange(3)
>>> X_b
array([0, 1, 2])
>>> batch_size = 10
>>> X_manual = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0])
>>> X_out = pad_features(batch_size, X_b)
>>> assert (X_manual == X_out).all()
This function is similar to `pad_batch` but doesn't handle labels
`y` or weights `w` and is intended to be used for inference-time
query processing.
Parameters
----------
batch_size: int
The number of datapoints in a batch
X_b: np.ndarray
Must be such that `len(X_b) <= batch_size`
Returns
-------
X_out: np.ndarray
A numpy array with `len(X_out) == batch_size`.
"""
num_samples = len(X_b)
if num_samples > batch_size:
raise ValueError("Cannot pad an array longer than `batch_size`")
elif num_samples == batch_size:
return X_b
else:
# By invariant of when this is called, can assume num_samples > 0
# and num_samples < batch_size
if len(X_b.shape) > 1:
feature_shape = X_b.shape[1:]
X_out = np.zeros((batch_size,) + feature_shape, dtype=X_b.dtype)
else:
X_out = np.zeros((batch_size,), dtype=X_b.dtype)
# Fill in batch arrays
start = 0
while start < batch_size:
num_left = batch_size - start
if num_left < num_samples:
increment = num_left
else:
increment = num_samples
X_out[start:start + increment] = X_b[:increment]
start += increment
return X_out
def pad_batch(batch_size: int, X_b: np.ndarray, y_b: np.ndarray,
w_b: np.ndarray, ids_b: np.ndarray) -> Batch:
"""Pads batch to have size precisely batch_size elements.
Given arrays of features `X_b`, labels `y_b`, weights `w_b`, and
identifiers `ids_b` all with length less than or equal to
batch-size, pads them to `batch_size` length. It does this by
repeating the original entries in tiled fashion. Note that `X_b,
y_b, w_b, ids_b` must all have the same length.
Parameters
----------
batch_size: int
The number of datapoints in a batch
X_b: np.ndarray
Must be such that `len(X_b) <= batch_size`
y_b: np.ndarray
Must be such that `len(y_b) <= batch_size`
w_b: np.ndarray
Must be such that `len(w_b) <= batch_size`
ids_b: np.ndarray
Must be such that `len(ids_b) <= batch_size`
Returns
-------
Batch
The batch is a tuple of `(X_out, y_out, w_out, ids_out)`,
all numpy arrays with length `batch_size`.
"""
num_samples = len(X_b)
if num_samples == batch_size:
return (X_b, y_b, w_b, ids_b)
# By invariant of when this is called, can assume num_samples > 0
# and num_samples < batch_size
if len(X_b.shape) > 1:
feature_shape = X_b.shape[1:]
X_out = np.zeros((batch_size,) + feature_shape, dtype=X_b.dtype)
else:
X_out = np.zeros((batch_size,), dtype=X_b.dtype)
if y_b is None:
y_out = None
elif len(y_b.shape) < 2:
y_out = np.zeros(batch_size, dtype=y_b.dtype)
else:
y_out = np.zeros((batch_size,) + y_b.shape[1:], dtype=y_b.dtype)
if w_b is None:
w_out = None
elif len(w_b.shape) < 2:
w_out = np.zeros(batch_size, dtype=w_b.dtype)
else:
w_out = np.zeros((batch_size,) + w_b.shape[1:], dtype=w_b.dtype)
ids_out = np.zeros((batch_size,), dtype=ids_b.dtype)
# Fill in batch arrays
start = 0
# Only the first set of copy will be counted in training loss
if w_out is not None:
w_out[start:start + num_samples] = w_b[:]
while start < batch_size:
num_left = batch_size - start
if num_left < num_samples:
increment = num_left
else:
increment = num_samples
X_out[start:start + increment] = X_b[:increment]
if y_out is not None:
y_out[start:start + increment] = y_b[:increment]
ids_out[start:start + increment] = ids_b[:increment]
start += increment
return (X_out, y_out, w_out, ids_out)
class Dataset(object):
"""Abstract base class for datasets defined by X, y, w elements.
`Dataset` objects are used to store representations of a dataset as
used in a machine learning task. Datasets contain features `X`,
labels `y`, weights `w` and identifiers `ids`. Different subclasses
of `Dataset` may choose to hold `X, y, w, ids` in memory or on disk.
The `Dataset` class attempts to provide for strong interoperability
with other machine learning representations for datasets.
Interconversion methods allow for `Dataset` objects to be converted
to and from numpy arrays, pandas dataframes, tensorflow datasets,
and pytorch datasets (only to and not from for pytorch at present).
Note that you can never instantiate a `Dataset` object directly.
Instead you will need to instantiate one of the concrete subclasses.
"""
def __init__(self) -> None:
raise NotImplementedError()
def __len__(self) -> int:
"""Get the number of elements in the dataset.
Returns
-------
int
The number of elements in the dataset.
"""
raise NotImplementedError()
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids
arrays.
Returns
-------
Tuple
The tuple contains four elements, which are the shapes of
the X, y, w, and ids arrays.
"""
raise NotImplementedError()
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
raise NotImplementedError()
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `X`.
Note
----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `y`.
Note
----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `ids`.
Note
----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of weights `w`.
Note
----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
def __repr__(self) -> str:
"""Convert self to REPL print representation."""
threshold = dc.utils.get_print_threshold()
task_str = np.array2string(np.array(self.get_task_names()),
threshold=threshold)
X_shape, y_shape, w_shape, _ = self.get_shape()
if self.__len__() < dc.utils.get_max_print_size():
id_str = np.array2string(self.ids, threshold=threshold)
return "<%s X.shape: %s, y.shape: %s, w.shape: %s, ids: %s, task_names: %s>" % (
self.__class__.__name__, str(X_shape), str(y_shape),
str(w_shape), id_str, task_str)
else:
return "<%s X.shape: %s, y.shape: %s, w.shape: %s, task_names: %s>" % (
self.__class__.__name__, str(X_shape), str(y_shape),
str(w_shape), task_str)
def __str__(self) -> str:
"""Convert self to str representation."""
return self.__repr__()
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, optional (default 1)
Number of epochs to walk over dataset.
deterministic: bool, optional (default False)
If True, follow deterministic order.
pad_batches: bool, optional (default False)
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
raise NotImplementedError()
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Examples
--------
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
raise NotImplementedError()
def transform(self, transformer: "dc.trans.Transformer",
**args) -> "Dataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple
times with different subsets of the data. Each time it is called,
it should transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset.
Returns
-------
Dataset
A newly constructed Dataset object.
"""
raise NotImplementedError()
def select(self,
indices: Union[Sequence[int], np.ndarray],
select_dir: Optional[str] = None) -> "Dataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Path to new directory that the selected indices will be copied to.
"""
raise NotImplementedError()
def get_statistics(self,
X_stats: bool = True,
y_stats: bool = True) -> Tuple[np.ndarray, ...]:
"""Compute and return statistics of this dataset.
Uses `self.itersamples()` to compute means and standard deviations
of the dataset. Can compute on large datasets that don't fit in
memory.
Parameters
----------
X_stats: bool, optional (default True)
If True, compute feature-level mean and standard deviations.
y_stats: bool, optional (default True)
If True, compute label-level mean and standard deviations.
Returns
-------
Tuple
- If `X_stats == True`, returns `(X_means, X_stds)`.
- If `y_stats == True`, returns `(y_means, y_stds)`.
- If both are true, returns `(X_means, X_stds, y_means, y_stds)`.
"""
x_shape, y_shape, w_shape, ids_shape = self.get_shape()
X_means = np.zeros(x_shape[1:])
X_m2 = np.zeros(x_shape[1:])
y_means = np.zeros(y_shape[1:])
y_m2 = np.zeros(y_shape[1:])
n = 0
for X, y, _, _ in self.itersamples():
n += 1
if X_stats:
dx = X - X_means
X_means += dx / n
X_m2 += dx * (X - X_means)
if y_stats:
dy = y - y_means
y_means += dy / n
y_m2 += dy * (y - y_means)
if n < 2:
X_stds = np.zeros(x_shape[1:])
y_stds = np.zeros(y_shape[1:])
else:
X_stds = np.sqrt(X_m2 / n)
y_stds = np.sqrt(y_m2 / n)
if X_stats and not y_stats:
return X_means, X_stds
elif y_stats and not X_stats:
return y_means, y_stds
elif X_stats and y_stats:
return X_means, X_stds, y_means, y_stds
else:
return tuple()
def make_tf_dataset(self,
batch_size: int = 100,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False):
"""Create a tf.data.Dataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y,
w) for one batch.
Parameters
----------
batch_size: int, default 100
The number of samples to include in each batch.
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
pad_batches: bool, default False
If True, batches are padded as necessary to make the size of
each batch exactly equal batch_size.
Returns
-------
tf.data.Dataset
TensorFlow Dataset that iterates over the same data.
Note
----
This class requires TensorFlow to be installed.
"""
try:
import tensorflow as tf
except:
raise ImportError(
"This method requires TensorFlow to be installed.")
# Retrieve the first sample so we can determine the dtypes.
X, y, w, ids = next(self.itersamples())
dtypes = (tf.as_dtype(X.dtype), tf.as_dtype(y.dtype),
tf.as_dtype(w.dtype))
shapes = (
tf.TensorShape([None] + list(X.shape)), # type: ignore
tf.TensorShape([None] + list(y.shape)), # type: ignore
tf.TensorShape([None] + list(w.shape))) # type: ignore
# Create a Tensorflow Dataset.
def gen_data():
for X, y, w, ids in self.iterbatches(batch_size, epochs,
deterministic, pad_batches):
yield (X, y, w)
return tf.data.Dataset.from_generator(gen_data, dtypes, shapes)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Note
----
This class requires PyTorch to be installed.
"""
raise NotImplementedError()
def to_dataframe(self) -> pd.DataFrame:
"""Construct a pandas DataFrame containing the data from this Dataset.
Returns
-------
pd.DataFrame
Pandas dataframe. If there is only a single feature per datapoint,
will have column "X" else will have columns "X1,X2,..." for
features. If there is only a single label per datapoint, will
have column "y" else will have columns "y1,y2,..." for labels. If
there is only a single weight per datapoint will have column "w"
else will have columns "w1,w2,...". Will have column "ids" for
identifiers.
"""
X = self.X
y = self.y
w = self.w
ids = self.ids
if len(X.shape) == 1 or X.shape[1] == 1:
columns = ['X']
else:
columns = [f'X{i+1}' for i in range(X.shape[1])]
X_df = pd.DataFrame(X, columns=columns)
if len(y.shape) == 1 or y.shape[1] == 1:
columns = ['y']
else:
columns = [f'y{i+1}' for i in range(y.shape[1])]
y_df = pd.DataFrame(y, columns=columns)
if len(w.shape) == 1 or w.shape[1] == 1:
columns = ['w']
else:
columns = [f'w{i+1}' for i in range(w.shape[1])]
w_df = pd.DataFrame(w, columns=columns)
ids_df = pd.DataFrame(ids, columns=['ids'])
return pd.concat([X_df, y_df, w_df, ids_df], axis=1, sort=False)
@staticmethod
def from_dataframe(df: pd.DataFrame,
X: Optional[OneOrMany[str]] = None,
y: Optional[OneOrMany[str]] = None,
w: Optional[OneOrMany[str]] = None,
ids: Optional[str] = None):
"""Construct a Dataset from the contents of a pandas DataFrame.
Parameters
----------
df: pd.DataFrame
The pandas DataFrame
X: str or List[str], optional (default None)
The name of the column or columns containing the X array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
y: str or List[str], optional (default None)
The name of the column or columns containing the y array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
w: str or List[str], optional (default None)
The name of the column or columns containing the w array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
ids: str, optional (default None)
The name of the column containing the ids. If this is None, it
will look for default column names that match those produced by
to_dataframe().
"""
# Find the X values.
if X is not None:
X_val = df[X]
elif 'X' in df.columns:
X_val = df['X']
else:
columns = []
i = 1
while f'X{i}' in df.columns:
columns.append(f'X{i}')
i += 1
X_val = df[columns]
if len(X_val.shape) == 1:
X_val = np.expand_dims(X_val, 1)
# Find the y values.
if y is not None:
y_val = df[y]
elif 'y' in df.columns:
y_val = df['y']
else:
columns = []
i = 1
while f'y{i}' in df.columns:
columns.append(f'y{i}')
i += 1
y_val = df[columns]
if len(y_val.shape) == 1:
y_val = np.expand_dims(y_val, 1)
# Find the w values.
if w is not None:
w_val = df[w]
elif 'w' in df.columns:
w_val = df['w']
else:
columns = []
i = 1
while f'w{i}' in df.columns:
columns.append(f'w{i}')
i += 1
w_val = df[columns]
if len(w_val.shape) == 1:
w_val = np.expand_dims(w_val, 1)
# Find the ids.
if ids is not None:
ids_val = df[ids]
elif 'ids' in df.columns:
ids_val = df['ids']
else:
ids_val = None
return NumpyDataset(X_val, y_val, w_val, ids_val)
def to_csv(self, path: str) -> None:
"""Write object to a comma-seperated values (CSV) file
Example
-------
>>> import numpy as np
>>> X = np.random.rand(10, 10)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
>>> dataset.to_csv('out.csv') # doctest: +SKIP
Parameters
----------
path: str
File path or object
Returns
-------
None
"""
columns = []
X_shape, y_shape, w_shape, id_shape = self.get_shape()
assert len(
X_shape) == 2, "dataset's X values should be scalar or 1-D arrays"
assert len(
y_shape) == 2, "dataset's y values should be scalar or 1-D arrays"
if X_shape[1] == 1:
columns.append('X')
else:
columns.extend([f'X{i+1}' for i in range(X_shape[1])])
if y_shape[1] == 1:
columns.append('y')
else:
columns.extend([f'y{i+1}' for i in range(y_shape[1])])
if w_shape[1] == 1:
columns.append('w')
else:
columns.extend([f'w{i+1}' for i in range(w_shape[1])])
columns.append('ids')
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(columns)
for (x, y, w, ids) in self.itersamples():
writer.writerow(list(x) + list(y) + list(w) + [ids])
return None
class NumpyDataset(Dataset):
"""A Dataset defined by in-memory numpy arrays.
This subclass of `Dataset` stores arrays `X,y,w,ids` in memory as
numpy arrays. This makes it very easy to construct `NumpyDataset`
objects.
Examples
--------
>>> import numpy as np
>>> dataset = NumpyDataset(X=np.random.rand(5, 3), y=np.random.rand(5,), ids=np.arange(5))
"""
def __init__(self,
X: ArrayLike,
y: Optional[ArrayLike] = None,
w: Optional[ArrayLike] = None,
ids: Optional[ArrayLike] = None,
n_tasks: int = 1) -> None:
"""Initialize this object.
Parameters
----------
X: np.ndarray
Input features. A numpy array of shape `(n_samples,...)`.
y: np.ndarray, optional (default None)
Labels. A numpy array of shape `(n_samples, ...)`. Note that each label can
have an arbitrary shape.
w: np.ndarray, optional (default None)
Weights. Should either be 1D array of shape `(n_samples,)` or if
there's more than one task, of shape `(n_samples, n_tasks)`.
ids: np.ndarray, optional (default None)
Identifiers. A numpy array of shape `(n_samples,)`
n_tasks: int, default 1
Number of learning tasks.
"""
n_samples = np.shape(X)[0]
if n_samples > 0:
if y is None:
# Set labels to be zero, with zero weights
y = np.zeros((n_samples, n_tasks), np.float32)
w = np.zeros((n_samples, 1), np.float32)
if ids is None:
ids = np.arange(n_samples)
if not isinstance(X, np.ndarray):
X = np.array(X)
if not isinstance(y, np.ndarray):
y = np.array(y)
if w is None:
if len(y.shape) == 1:
w = np.ones(y.shape[0], np.float32)
else:
w = np.ones((y.shape[0], 1), np.float32)
if not isinstance(w, np.ndarray):
w = np.array(w)
self._X = X
self._y = y
self._w = w
self._ids = np.array(ids, dtype=object)
def __len__(self) -> int:
"""Get the number of elements in the dataset."""
return len(self._y)
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
return self._X.shape, self._y.shape, self._w.shape, self._ids.shape
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
if len(self._y.shape) < 2:
return np.array([0])
return np.arange(self._y.shape[1])
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
return self._X
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
return self._y
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
return self._ids
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
return self._w
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, default 1
Number of epochs to walk over dataset.
deterministic: bool, optional (default False)
If True, follow deterministic order.
pad_batches: bool, optional (default False)
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
def iterate(dataset: NumpyDataset, batch_size: Optional[int],
epochs: int, deterministic: bool, pad_batches: bool):
n_samples = dataset._X.shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_batch = dataset._X[perm_indices]
y_batch = dataset._y[perm_indices]
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch,
ids_batch) = pad_batch(batch_size, X_batch, y_batch,
w_batch, ids_batch)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, epochs, deterministic, pad_batches)
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Iterator which yields tuples of four numpy arrays `(X, y, w, ids)`.
Examples
--------
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
n_samples = self._X.shape[0]
return ((self._X[i], self._y[i], self._w[i], self._ids[i])
for i in range(n_samples))
def transform(self, transformer: "dc.trans.Transformer",
**args) -> "NumpyDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple
times with different subsets of the data. Each time it is called,
it should transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset
Returns
-------
NumpyDataset
A newly constructed NumpyDataset object
"""
newx, newy, neww, newids = transformer.transform_array(
self._X, self._y, self._w, self._ids)
return NumpyDataset(newx, newy, neww, newids)
def select(self,
indices: Union[Sequence[int], np.ndarray],
select_dir: Optional[str] = None) -> "NumpyDataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: List[int]
List of indices to select.
select_dir: str, optional (default None)
Used to provide same API as `DiskDataset`. Ignored since
`NumpyDataset` is purely in-memory.
Returns
-------
NumpyDataset
A selected NumpyDataset object
"""
X = self.X[indices]
y = self.y[indices]
w = self.w[indices]
ids = self.ids[indices]
return NumpyDataset(X, y, w, ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Note
----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchNumpyDataset
except:
raise ImportError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchNumpyDataset(numpy_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
@staticmethod
def from_DiskDataset(ds: "DiskDataset") -> "NumpyDataset":
"""Convert DiskDataset to NumpyDataset.
Parameters
----------
ds: DiskDataset
DiskDataset to transform to NumpyDataset.
Returns
-------
NumpyDataset
A new NumpyDataset created from DiskDataset.
"""
return NumpyDataset(ds.X, ds.y, ds.w, ds.ids)
def to_json(self, fname: str) -> None:
"""Dump NumpyDataset to the json file .
Parameters
----------
fname: str
The name of the json file.
"""
d = {
'X': self.X.tolist(),
'y': self.y.tolist(),
'w': self.w.tolist(),
'ids': self.ids.tolist()
}
with open(fname, 'w') as fout:
json.dump(d, fout)
@staticmethod
def from_json(fname: str) -> "NumpyDataset":
"""Create NumpyDataset from the json file.
Parameters
----------
fname: str
The name of the json file.
Returns
-------
NumpyDataset
A new NumpyDataset created from the json file.
"""
with open(fname) as fin:
d = json.load(fin)
return NumpyDataset(d['X'], d['y'], d['w'], d['ids'])
@staticmethod
def merge(datasets: Sequence[Dataset]) -> "NumpyDataset":
"""Merge multiple NumpyDatasets.
Parameters
----------
datasets: List[Dataset]
List of datasets to merge.
Returns
-------
NumpyDataset
A single NumpyDataset containing all the samples from all datasets.
Example
-------
>>> X1, y1 = np.random.rand(5, 3), np.random.randn(5, 1)
>>> first_dataset = dc.data.NumpyDataset(X1, y1)
>>> X2, y2 = np.random.rand(5, 3), np.random.randn(5, 1)
>>> second_dataset = dc.data.NumpyDataset(X2, y2)
>>> merged_dataset = dc.data.NumpyDataset.merge([first_dataset, second_dataset])
>>> print(len(merged_dataset) == len(first_dataset) + len(second_dataset))
True
"""
X, y, w, ids = datasets[0].X, datasets[0].y, datasets[0].w, datasets[
0].ids
for dataset in datasets[1:]:
X = np.concatenate([X, dataset.X], axis=0)
y = np.concatenate([y, dataset.y], axis=0)
w = np.concatenate([w, dataset.w], axis=0)
ids = np.concatenate(
[ids, dataset.ids],
axis=0,
)
if y.ndim == 1:
y = y.reshape(-1, 1)
return NumpyDataset(X, y, w, ids, n_tasks=y.shape[1])
class _Shard(object):
def __init__(self, X, y, w, ids):
self.X = X
self.y = y
self.w = w
self.ids = ids
class DiskDataset(Dataset):
"""
A Dataset that is stored as a set of files on disk.
The DiskDataset is the workhorse class of DeepChem that facilitates analyses
on large datasets. Use this class whenever you're working with a large
dataset that can't be easily manipulated in RAM.
On disk, a `DiskDataset` has a simple structure. All files for a given
`DiskDataset` are stored in a `data_dir`. The contents of `data_dir` should
be laid out as follows:
| data_dir/
| |
| ---> metadata.csv.gzip
| |
| ---> tasks.json
| |
| ---> shard-0-X.npy
| |
| ---> shard-0-y.npy
| |
| ---> shard-0-w.npy
| |
| ---> shard-0-ids.npy
| |
| ---> shard-1-X.npy
| .
| .
| .
The metadata is constructed by static method
`DiskDataset._construct_metadata` and saved to disk by
`DiskDataset._save_metadata`. The metadata itself consists of a csv file
which has columns `('ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape',
'w_shape')`. `tasks.json` consists of a list of task names for this dataset.
The actual data is stored in `.npy` files (numpy array files) of the form
'shard-0-X.npy', 'shard-0-y.npy', etc.
The basic structure of `DiskDataset` is quite robust and will likely serve
you well for datasets up to about 100 GB or larger. However note that
`DiskDataset` has not been tested for very large datasets at the terabyte
range and beyond. You may be better served by implementing a custom
`Dataset` class for those use cases.
Examples
--------
Let's walk through a simple example of constructing a new `DiskDataset`.
>>> import deepchem as dc
>>> import numpy as np
>>> X = np.random.rand(10, 10)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
If you have already saved a `DiskDataset` to `data_dir`, you can reinitialize it with
>> data_dir = "/path/to/my/data"
>> dataset = dc.data.DiskDataset(data_dir)
Once you have a dataset you can access its attributes as follows
>>> X = np.random.rand(10, 10)
>>> y = np.random.rand(10,)
>>> w = np.ones_like(y)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
>>> X, y, w = dataset.X, dataset.y, dataset.w
One thing to beware of is that `dataset.X`, `dataset.y`, `dataset.w` are
loading data from disk! If you have a large dataset, these operations can be
extremely slow. Instead try iterating through the dataset instead.
>>> for (xi, yi, wi, idi) in dataset.itersamples():
... pass
Attributes
----------
data_dir: str
Location of directory where this `DiskDataset` is stored to disk
metadata_df: pd.DataFrame
Pandas Dataframe holding metadata for this `DiskDataset`
legacy_metadata: bool
Whether this `DiskDataset` uses legacy format.
Note
----
`DiskDataset` originally had a simpler metadata format without shape
information. Older `DiskDataset` objects had metadata files with columns
`('ids', 'X', 'y', 'w')` and not additional shape columns. `DiskDataset`
maintains backwards compatibility with this older metadata format, but we
recommend for performance reasons not using legacy metadata for new
projects.
"""
def __init__(self, data_dir: str) -> None:
"""Load a constructed DiskDataset from disk
Note that this method cannot construct a new disk dataset. Instead use
static methods `DiskDataset.create_dataset` or `DiskDataset.from_numpy`
for that purpose. Use this constructor instead to load a `DiskDataset`
that has already been created on disk.
Parameters
----------
data_dir: str
Location on disk of an existing `DiskDataset`.
"""
self.data_dir = data_dir
logger.info("Loading dataset from disk.")
tasks, self.metadata_df = self.load_metadata()
self.tasks = np.array(tasks)
if len(self.metadata_df.columns) == 4 and list(
self.metadata_df.columns) == ['ids', 'X', 'y', 'w']:
logger.info(
"Detected legacy metatadata on disk. You can upgrade from legacy metadata "
"to the more efficient current metadata by resharding this dataset "
"by calling the reshard() method of this object.")
self.legacy_metadata = True
elif len(self.metadata_df.columns) == 8 and list(
self.metadata_df.columns) == [
'ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape',
'w_shape'
]: # noqa
self.legacy_metadata = False
else:
raise ValueError(
"Malformed metadata on disk. Metadata must have columns 'ids', 'X', 'y', 'w', "
"'ids_shape', 'X_shape', 'y_shape', 'w_shape' (or if in legacy metadata format,"
"columns 'ids', 'X', 'y', 'w')")
self._cached_shards: Optional[List] = None
self._memory_cache_size = 20 * (1 << 20) # 20 MB
self._cache_used = 0
@staticmethod
def create_dataset(shard_generator: Iterable[Batch],
data_dir: Optional[str] = None,
tasks: Optional[ArrayLike] = None) -> "DiskDataset":
"""Creates a new DiskDataset
Parameters
----------
shard_generator: Iterable[Batch]
An iterable (either a list or generator) that provides tuples of data
(X, y, w, ids). Each tuple will be written to a separate shard on disk.
data_dir: str, optional (default None)
Filename for data directory. Creates a temp directory if none specified.
tasks: Sequence, optional (default [])
List of tasks for this dataset.
Returns
-------
DiskDataset
A new `DiskDataset` constructed from the given data
"""
if data_dir is None:
data_dir = tempfile.mkdtemp()
elif not os.path.exists(data_dir):
os.makedirs(data_dir)
metadata_rows = []
time1 = time.time()
for shard_num, (X, y, w, ids) in enumerate(shard_generator):
if shard_num == 0:
if tasks is None and y is not None:
# The line here assumes that y generated by shard_generator is a numpy array
tasks = np.array([0]) if y.ndim < 2 else np.arange(
y.shape[1])
basename = "shard-%d" % shard_num
metadata_rows.append(
DiskDataset.write_data_to_disk(data_dir, basename, X, y, w,
ids))
metadata_df = DiskDataset._construct_metadata(metadata_rows)
DiskDataset._save_metadata(metadata_df, data_dir, tasks)
time2 = time.time()
logger.info("TIMING: dataset construction took %0.3f s" %
(time2 - time1))
return DiskDataset(data_dir)
def load_metadata(self) -> Tuple[List[str], pd.DataFrame]:
"""Helper method that loads metadata from disk."""
try:
tasks_filename, metadata_filename = self._get_metadata_filename()
with open(tasks_filename) as fin:
tasks = json.load(fin)
metadata_df = pd.read_csv(metadata_filename,
compression='gzip',
dtype=object)
metadata_df = metadata_df.where((pd.notnull(metadata_df)), None)
return tasks, metadata_df
except Exception:
pass
# Load obsolete format -> save in new format
metadata_filename = os.path.join(self.data_dir, "metadata.joblib")
if os.path.exists(metadata_filename):
tasks, metadata_df = load_from_disk(metadata_filename)
del metadata_df['task_names']
del metadata_df['basename']
DiskDataset._save_metadata(metadata_df, self.data_dir, tasks)
return tasks, metadata_df
raise ValueError("No Metadata Found On Disk")
@staticmethod
def _save_metadata(metadata_df: pd.DataFrame, data_dir: str,
tasks: Optional[ArrayLike]) -> None:
"""Saves the metadata for a DiskDataset
Parameters
----------
metadata_df: pd.DataFrame
The dataframe which will be written to disk.
data_dir: str
Directory to store metadata.
tasks: Sequence, optional
Tasks of DiskDataset. If `None`, an empty list of tasks is written to
disk.
"""
if tasks is None:
tasks = []
elif isinstance(tasks, np.ndarray):
tasks = tasks.tolist()
metadata_filename = os.path.join(data_dir, "metadata.csv.gzip")
tasks_filename = os.path.join(data_dir, "tasks.json")
with open(tasks_filename, 'w') as fout:
json.dump(tasks, fout)
metadata_df.to_csv(metadata_filename, index=False, compression='gzip')
@staticmethod
def _construct_metadata(metadata_entries: List) -> pd.DataFrame:
"""Construct a dataframe containing metadata.
Parameters
----------
metadata_entries: List
`metadata_entries` should have elements returned by write_data_to_disk
above.
Returns
-------
pd.DataFrame
A Pandas Dataframe object contains metadata.
"""
columns = ('ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape',
'w_shape')
metadata_df = pd.DataFrame(metadata_entries, columns=columns)
return metadata_df
@staticmethod
def write_data_to_disk(data_dir: str,
basename: str,
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> List[Any]:
"""Static helper method to write data to disk.
This helper method is used to write a shard of data to disk.
Parameters
----------
data_dir: str
Data directory to write shard to.
basename: str
Basename for the shard in question.
X: np.ndarray, optional (default None)
The features array.
y: np.ndarray, optional (default None)
The labels array.
w: np.ndarray, optional (default None)
The weights array.
ids: np.ndarray, optional (default None)
The identifiers array.
Returns
-------
List[Optional[str]]
List with values `[out_ids, out_X, out_y, out_w, out_ids_shape,
out_X_shape, out_y_shape, out_w_shape]` with filenames of locations to
disk which these respective arrays were written.
"""
if X is not None:
out_X: Optional[str] = "%s-X.npy" % basename
save_to_disk(X, os.path.join(data_dir, out_X)) # type: ignore
out_X_shape: Optional[Tuple[int, ...]] = X.shape
else:
out_X = None
out_X_shape = None
if y is not None:
out_y: Optional[str] = "%s-y.npy" % basename
save_to_disk(y, os.path.join(data_dir, out_y)) # type: ignore
out_y_shape: Optional[Tuple[int, ...]] = y.shape
else:
out_y = None
out_y_shape = None
if w is not None:
out_w: Optional[str] = "%s-w.npy" % basename
save_to_disk(w, os.path.join(data_dir, out_w)) # type: ignore
out_w_shape: Optional[Tuple[int, ...]] = w.shape
else:
out_w = None
out_w_shape = None
if ids is not None:
out_ids: Optional[str] = "%s-ids.npy" % basename
save_to_disk(ids, os.path.join(data_dir, out_ids)) # type: ignore
out_ids_shape: Optional[Tuple[int, ...]] = ids.shape
else:
out_ids = None
out_ids_shape = None
# note that this corresponds to the _construct_metadata column order
return [
out_ids, out_X, out_y, out_w, out_ids_shape, out_X_shape,
out_y_shape, out_w_shape
]
def save_to_disk(self) -> None:
"""Save dataset to disk."""
DiskDataset._save_metadata(self.metadata_df, self.data_dir, self.tasks)
self._cached_shards = None
def move(self,
new_data_dir: str,
delete_if_exists: Optional[bool] = True) -> None:
"""Moves dataset to new directory.
Parameters
----------
new_data_dir: str
The new directory name to move this to dataset to.
delete_if_exists: bool, optional (default True)
If this option is set, delete the destination directory if it exists
before moving. This is set to True by default to be backwards compatible
with behavior in earlier versions of DeepChem.
Note
----
This is a stateful operation! `self.data_dir` will be moved into
`new_data_dir`. If `delete_if_exists` is set to `True` (by default this is
set `True`), then `new_data_dir` is deleted if it's a pre-existing
directory.
"""
if delete_if_exists and os.path.isdir(new_data_dir):
shutil.rmtree(new_data_dir)
shutil.move(self.data_dir, new_data_dir)
if delete_if_exists:
self.data_dir = new_data_dir
else:
self.data_dir = os.path.join(new_data_dir,
os.path.basename(self.data_dir))
def copy(self, new_data_dir: str) -> "DiskDataset":
"""Copies dataset to new directory.
Parameters
----------
new_data_dir: str
The new directory name to copy this to dataset to.
Returns
-------
DiskDataset
A copied DiskDataset object.
Note
----
This is a stateful operation! Any data at `new_data_dir` will be deleted
and `self.data_dir` will be deep copied into `new_data_dir`.
"""
if os.path.isdir(new_data_dir):
shutil.rmtree(new_data_dir)
shutil.copytree(self.data_dir, new_data_dir)
return DiskDataset(new_data_dir)
def get_task_names(self) -> np.ndarray:
"""Gets learning tasks associated with this dataset."""
return self.tasks
def reshard(self, shard_size: int) -> None:
"""Reshards data to have specified shard size.
Parameters
----------
shard_size: int
The size of shard.
Examples
--------
>>> import deepchem as dc
>>> import numpy as np
>>> X = np.random.rand(100, 10)
>>> d = dc.data.DiskDataset.from_numpy(X)
>>> d.reshard(shard_size=10)
>>> d.get_number_shards()
10
Note
----
If this `DiskDataset` is in `legacy_metadata` format, reshard will
convert this dataset to have non-legacy metadata.
"""
# Create temp directory to store resharded version
reshard_dir = tempfile.mkdtemp()
n_shards = self.get_number_shards()
# Get correct shapes for y/w
tasks = self.get_task_names()
_, y_shape, w_shape, _ = self.get_shape()
if len(y_shape) == 1:
y_shape = (len(y_shape), len(tasks))
if len(w_shape) == 1:
w_shape = (len(w_shape), len(tasks))
# Write data in new shards
def generator():
X_next = np.zeros((0,) + self.get_data_shape())
y_next = np.zeros((0,) + y_shape[1:])
w_next = np.zeros((0,) + w_shape[1:])
ids_next = np.zeros((0,), dtype=object)
for shard_num, (X, y, w, ids) in enumerate(self.itershards()):
logger.info("Resharding shard %d/%d" %
(shard_num + 1, n_shards))
# Handle shapes
X = np.reshape(X, (len(X),) + self.get_data_shape())
# Note that this means that DiskDataset resharding currently doesn't
# work for datasets that aren't regression/classification.
if y is None: # datasets without label
y = y_next
w = w_next
else:
y = np.reshape(y, (len(y),) + y_shape[1:])
w = np.reshape(w, (len(w),) + w_shape[1:])
X_next = np.concatenate([X_next, X], axis=0)
y_next = np.concatenate([y_next, y], axis=0)
w_next = np.concatenate([w_next, w], axis=0)
ids_next = np.concatenate([ids_next, ids])
while len(X_next) > shard_size:
X_batch, X_next = X_next[:shard_size], X_next[shard_size:]
y_batch, y_next = y_next[:shard_size], y_next[shard_size:]
w_batch, w_next = w_next[:shard_size], w_next[shard_size:]
ids_batch, ids_next = ids_next[:shard_size], ids_next[
shard_size:]
yield (X_batch, y_batch, w_batch, ids_batch)
# Handle spillover from last shard
yield (X_next, y_next, w_next, ids_next)
resharded_dataset = DiskDataset.create_dataset(generator(),
data_dir=reshard_dir,
tasks=self.tasks)
shutil.rmtree(self.data_dir)
shutil.move(reshard_dir, self.data_dir)
# Should have updated to non-legacy metadata
self.legacy_metadata = False
self.metadata_df = resharded_dataset.metadata_df
# Note that this resets the cache internally
self.save_to_disk()
def get_data_shape(self) -> Shape:
"""Gets array shape of datapoints in this dataset."""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
if self.legacy_metadata:
sample_X = load_from_disk(
os.path.join(self.data_dir,
next(self.metadata_df.iterrows())[1]['X']))
return np.shape(sample_X)[1:]
else:
X_shape, _, _, _ = self.get_shape()
return X_shape[1:]
def get_shard_size(self) -> int:
"""Gets size of shards on disk."""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
sample_ids = load_from_disk(
os.path.join(self.data_dir,
next(self.metadata_df.iterrows())[1]['ids']))
return len(sample_ids)
def _get_metadata_filename(self) -> Tuple[str, str]:
"""Get standard location for metadata file."""
metadata_filename = os.path.join(self.data_dir, "metadata.csv.gzip")
tasks_filename = os.path.join(self.data_dir, "tasks.json")
return tasks_filename, metadata_filename
def get_number_shards(self) -> int:
"""Returns the number of shards for this dataset."""
return self.metadata_df.shape[0]
def itershards(self) -> Iterator[Batch]:
"""Return an object that iterates over all shards in dataset.
Datasets are stored in sharded fashion on disk. Each call to next() for the
generator defined by this function returns the data from a particular shard.
The order of shards returned is guaranteed to remain fixed.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
return (self.get_shard(i) for i in range(self.get_number_shards()))
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
""" Get an object that iterates over minibatches from the dataset.
It is guaranteed that the number of batches returned is
`math.ceil(len(dataset)/batch_size)`. Each minibatch is returned as
a tuple of four numpy arrays: `(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in a batch. If None, then it yields batches
with size equal to the size of each individual shard.
epoch: int, default 1
Number of epochs to walk over dataset
deterministic: bool, default False
Whether or not we should should shuffle each shard before
generating the batches. Note that this is only local in the
sense that it does not ever mix between different shards.
pad_batches: bool, default False
Whether or not we should pad the last batch, globally, such that
it has exactly batch_size elements.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
shard_indices = list(range(self.get_number_shards()))
return self._iterbatches_from_shards(shard_indices, batch_size, epochs,
deterministic, pad_batches)
def _iterbatches_from_shards(self,
shard_indices: Sequence[int],
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over batches from a restricted set of shards."""
def iterate(dataset: DiskDataset, batch_size: Optional[int],
epochs: int):
num_shards = len(shard_indices)
if deterministic:
shard_perm = np.arange(num_shards)
# (ytz): Depending on the application, thread-based pools may be faster
# than process based pools, since process based pools need to pickle/serialize
# objects as an extra overhead. Also, as hideously as un-thread safe this looks,
# we're actually protected by the GIL.
# mp.dummy aliases ThreadPool to Pool
pool = Pool(1)
if batch_size is None:
num_global_batches = num_shards
else:
num_global_batches = math.ceil(dataset.get_shape()[0][0] /
batch_size)
for epoch in range(epochs):
if not deterministic:
shard_perm = np.random.permutation(num_shards)
next_shard = pool.apply_async(dataset.get_shard,
(shard_indices[shard_perm[0]],))
cur_global_batch = 0
cur_shard = 0
carry = None
while cur_global_batch < num_global_batches:
X, y, w, ids = next_shard.get()
if cur_shard < num_shards - 1:
next_shard = pool.apply_async(
dataset.get_shard,
(shard_indices[shard_perm[cur_shard + 1]],))
elif epoch == epochs - 1:
pool.close()
if carry is not None:
X = np.concatenate([carry[0], X], axis=0)
if y is not None:
y = np.concatenate([carry[1], y], axis=0)
if w is not None:
w = np.concatenate([carry[2], w], axis=0)
ids = np.concatenate([carry[3], ids], axis=0)
carry = None
n_shard_samples = X.shape[0]
cur_local_batch = 0
if batch_size is None:
shard_batch_size = n_shard_samples
else:
shard_batch_size = batch_size
if n_shard_samples == 0:
cur_shard += 1
if batch_size is None:
cur_global_batch += 1
continue
num_local_batches = math.ceil(n_shard_samples /
shard_batch_size)
if not deterministic:
sample_perm = np.random.permutation(n_shard_samples)
else:
sample_perm = np.arange(n_shard_samples)
while cur_local_batch < num_local_batches:
start = cur_local_batch * shard_batch_size
end = min(n_shard_samples,
(cur_local_batch + 1) * shard_batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_b = X[perm_indices]
if y is not None:
y_b = y[perm_indices]
else:
y_b = None
if w is not None:
w_b = w[perm_indices]
else:
w_b = None
ids_b = ids[perm_indices]
assert len(X_b) <= shard_batch_size
if len(
X_b
) < shard_batch_size and cur_shard != num_shards - 1:
assert carry is None
carry = [X_b, y_b, w_b, ids_b]
else:
# (ytz): this skips everything except possibly the last shard
if pad_batches:
(X_b, y_b, w_b,
ids_b) = pad_batch(shard_batch_size, X_b, y_b,
w_b, ids_b)
yield X_b, y_b, w_b, ids_b
cur_global_batch += 1
cur_local_batch += 1
cur_shard += 1
return iterate(self, batch_size, epochs)
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
Examples
--------
>>> dataset = DiskDataset.from_numpy(np.ones((2,2)), np.ones((2,1)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [1.0] [1.0] 0
[1.0, 1.0] [1.0] [1.0] 1
"""
def iterate(dataset):
for (X_shard, y_shard, w_shard, ids_shard) in dataset.itershards():
n_samples = X_shard.shape[0]
for i in range(n_samples):
def sanitize(elem):
if elem is None:
return None
else:
return elem[i]
yield map(sanitize, [X_shard, y_shard, w_shard, ids_shard])
return iterate(self)
def transform(self,
transformer: "dc.trans.Transformer",
parallel: bool = False,
out_dir: Optional[str] = None,
**args) -> "DiskDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times
with different subsets of the data. Each time it is called, it should
transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset.
parallel: bool, default False
If True, use multiple processes to transform the dataset in parallel.
out_dir: str, optional (default None)
The directory to save the new dataset in. If this is omitted, a
temporary directory is created automaticall.
Returns
-------
DiskDataset
A newly constructed Dataset object
"""
if out_dir is None:
out_dir = tempfile.mkdtemp()
tasks = self.get_task_names()
n_shards = self.get_number_shards()
time1 = time.time()
if parallel:
results = []
pool = multiprocessing.Pool()
for i in range(self.get_number_shards()):
row = self.metadata_df.iloc[i]
X_file = os.path.join(self.data_dir, row['X'])
if row['y'] is not None:
y_file: Optional[str] = os.path.join(
self.data_dir, row['y'])
else:
y_file = None
if row['w'] is not None:
w_file: Optional[str] = os.path.join(
self.data_dir, row['w'])
else:
w_file = None
ids_file = os.path.join(self.data_dir, row['ids'])
results.append(
pool.apply_async(DiskDataset._transform_shard,
(transformer, i, X_file, y_file, w_file,
ids_file, out_dir, tasks)))
pool.close()
metadata_rows = [r.get() for r in results]
metadata_df = DiskDataset._construct_metadata(metadata_rows)
DiskDataset._save_metadata(metadata_df, out_dir, tasks)
dataset = DiskDataset(out_dir)
else:
def generator():
for shard_num, row in self.metadata_df.iterrows():
logger.info("Transforming shard %d/%d" %
(shard_num, n_shards))
X, y, w, ids = self.get_shard(shard_num)
newx, newy, neww, newids = transformer.transform_array(
X, y, w, ids)
yield (newx, newy, neww, newids)
dataset = DiskDataset.create_dataset(generator(),
data_dir=out_dir,
tasks=tasks)
time2 = time.time()
logger.info("TIMING: transforming took %0.3f s" % (time2 - time1))
return dataset
@staticmethod
def _transform_shard(transformer: "dc.trans.Transformer", shard_num: int,
X_file: str, y_file: str, w_file: str, ids_file: str,
out_dir: str,
tasks: np.ndarray) -> List[Optional[str]]:
"""This is called by transform() to transform a single shard."""
X = None if X_file is None else np.array(load_from_disk(X_file))
y = None if y_file is None else np.array(load_from_disk(y_file))
w = None if w_file is None else np.array(load_from_disk(w_file))
ids = np.array(load_from_disk(ids_file))
X, y, w, ids = transformer.transform_array(X, y, w, ids)
basename = "shard-%d" % shard_num
return DiskDataset.write_data_to_disk(out_dir, basename, X, y, w, ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Note
----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchDiskDataset
except:
raise ImportError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchDiskDataset(disk_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
@staticmethod
def from_numpy(X: ArrayLike,
y: Optional[ArrayLike] = None,
w: Optional[ArrayLike] = None,
ids: Optional[ArrayLike] = None,
tasks: Optional[ArrayLike] = None,
data_dir: Optional[str] = None) -> "DiskDataset":
"""Creates a DiskDataset object from specified Numpy arrays.
Parameters
----------
X: np.ndarray
Feature array.
y: np.ndarray, optional (default None)
Labels array.
w: np.ndarray, optional (default None)
Weights array.
ids: np.ndarray, optional (default None)
Identifiers array.
tasks: Sequence, optional (default None)
Tasks in this dataset
data_dir: str, optional (default None)
The directory to write this dataset to. If none is specified, will use
a temporary directory instead.
Returns
-------
DiskDataset
A new `DiskDataset` constructed from the provided information.
"""
# To unify shape handling so from_numpy behaves like NumpyDataset, we just
# make a NumpyDataset under the hood
dataset = NumpyDataset(X, y, w, ids)
if tasks is None:
tasks = dataset.get_task_names()
# raw_data = (X, y, w, ids)
return DiskDataset.create_dataset(
[(dataset.X, dataset.y, dataset.w, dataset.ids)],
data_dir=data_dir,
tasks=tasks)
@staticmethod
def merge(datasets: Iterable["Dataset"],
merge_dir: Optional[str] = None) -> "DiskDataset":
"""Merges provided datasets into a merged dataset.
Parameters
----------
datasets: Iterable[Dataset]
List of datasets to merge.
merge_dir: str, optional (default None)
The new directory path to store the merged DiskDataset.
Returns
-------
DiskDataset
A merged DiskDataset.
"""
if merge_dir is not None:
if not os.path.exists(merge_dir):
os.makedirs(merge_dir)
else:
merge_dir = tempfile.mkdtemp()
# Protect against generator exhaustion
datasets = list(datasets)
# This ensures tasks are consistent for all datasets
tasks = []
for dataset in datasets:
try:
tasks.append(dataset.tasks) # type: ignore
except AttributeError:
pass
if tasks:
task_tuples = [tuple(task_list) for task_list in tasks]
if len(tasks) < len(datasets) or len(set(task_tuples)) > 1:
raise ValueError(
'Cannot merge datasets with different task specifications')
merge_tasks = tasks[0]
else:
merge_tasks = []
# determine the shard sizes of the datasets to merge
shard_sizes = []
for dataset in datasets:
if hasattr(dataset, 'get_shard_size'):
shard_sizes.append(dataset.get_shard_size()) # type: ignore
# otherwise the entire dataset is the "shard size"
else:
shard_sizes.append(len(dataset))
def generator():
for ind, dataset in enumerate(datasets):
logger.info("Merging in dataset %d/%d" % (ind, len(datasets)))
if hasattr(dataset, 'itershards'):
for (X, y, w, ids) in dataset.itershards():
yield (X, y, w, ids)
else:
yield (dataset.X, dataset.y, dataset.w, dataset.ids)
merged_dataset = DiskDataset.create_dataset(generator(),
data_dir=merge_dir,
tasks=merge_tasks)
# we must reshard the dataset to have a uniform size
# choose the smallest shard size
if len(set(shard_sizes)) > 1:
merged_dataset.reshard(min(shard_sizes))
return merged_dataset
def subset(self,
shard_nums: Sequence[int],
subset_dir: Optional[str] = None) -> "DiskDataset":
"""Creates a subset of the original dataset on disk.
Parameters
----------
shard_nums: Sequence[int]
The indices of shard to extract from the original DiskDataset.
subset_dir: str, optional (default None)
The new directory path to store the subset DiskDataset.
Returns
-------
DiskDataset
A subset DiskDataset.
"""
if subset_dir is not None:
if not os.path.exists(subset_dir):
os.makedirs(subset_dir)
else:
subset_dir = tempfile.mkdtemp()
tasks = self.get_task_names()
def generator():
for shard_num, row in self.metadata_df.iterrows():
if shard_num not in shard_nums:
continue
X, y, w, ids = self.get_shard(shard_num)
yield (X, y, w, ids)
return DiskDataset.create_dataset(generator(),
data_dir=subset_dir,
tasks=tasks)
def sparse_shuffle(self) -> None:
"""Shuffling that exploits data sparsity to shuffle large datasets.
If feature vectors are sparse, say circular fingerprints or any other
representation that contains few nonzero values, it can be possible to
exploit the sparsity of the vector to simplify shuffles. This method
implements a sparse shuffle by compressing sparse feature vectors down
into a compressed representation, then shuffles this compressed dataset in
memory and writes the results to disk.
Note
----
This method only works for 1-dimensional feature vectors (does not work
for tensorial featurizations). Note that this shuffle is performed in
place.
"""
time1 = time.time()
shard_size = self.get_shard_size()
num_shards = self.get_number_shards()
X_sparse_list: List[np.ndarray] = []
y_list: List[np.ndarray] = []
w_list: List[np.ndarray] = []
ids_list: List[np.ndarray] = []
num_features = -1
for i in range(num_shards):
logger.info("Sparsifying shard %d/%d" % (i, num_shards))
(X_s, y_s, w_s, ids_s) = self.get_shard(i)
if num_features == -1:
num_features = X_s.shape[1]
X_sparse = sparsify_features(X_s)
X_sparse_list, y_list, w_list, ids_list = (
X_sparse_list + [X_sparse], y_list + [y_s], w_list + [w_s],
ids_list + [np.atleast_1d(np.squeeze(ids_s))])
# Get full dataset in memory
(X_sparse, y, w, ids) = (np.vstack(X_sparse_list), np.vstack(y_list),
np.vstack(w_list), np.concatenate(ids_list))
# Shuffle in memory
num_samples = len(X_sparse)
permutation = np.random.permutation(num_samples)
X_sparse, y, w, ids = (X_sparse[permutation], y[permutation],
w[permutation], ids[permutation])
# Write shuffled shards out to disk
for i in range(num_shards):
logger.info("Sparse shuffling shard %d/%d" % (i, num_shards))
start, stop = i * shard_size, (i + 1) * shard_size
(X_sparse_s, y_s, w_s,
ids_s) = (X_sparse[start:stop], y[start:stop], w[start:stop],
ids[start:stop])
X_s = densify_features(X_sparse_s, num_features)
self.set_shard(i, X_s, y_s, w_s, ids_s)
time2 = time.time()
logger.info("TIMING: sparse_shuffle took %0.3f s" % (time2 - time1))
def complete_shuffle(self, data_dir: Optional[str] = None) -> Dataset:
"""Completely shuffle across all data, across all shards.
Note
----
The algorithm used for this complete shuffle is O(N^2) where N is the
number of shards. It simply constructs each shard of the output dataset
one at a time. Since the complete shuffle can take a long time, it's
useful to watch the logging output. Each shuffled shard is constructed
using select() which logs as it selects from each original shard. This
will results in O(N^2) logging statements, one for each extraction of
shuffled shard i's contributions from original shard j.
Parameters
----------
data_dir: Optional[str], (default None)
Directory to write the shuffled dataset to. If none is specified a
temporary directory will be used.
Returns
-------
DiskDataset
A DiskDataset whose data is a randomly shuffled version of this dataset.
"""
N = len(self)
perm = np.random.permutation(N).tolist()
shard_size = self.get_shard_size()
return self.select(perm, data_dir, shard_size)
def shuffle_each_shard(self,
shard_basenames: Optional[List[str]] = None) -> None:
"""Shuffles elements within each shard of the dataset.
Parameters
----------
shard_basenames: List[str], optional (default None)
The basenames for each shard. If this isn't specified, will assume the
basenames of form "shard-i" used by `create_dataset` and `reshard`.
"""
# Shuffle the arrays corresponding to each row in metadata_df
n_rows = len(self.metadata_df.index)
if shard_basenames is not None:
if len(shard_basenames) != n_rows:
raise ValueError(
"shard_basenames must provide a basename for each shard in this DiskDataset."
)
else:
shard_basenames = [
"shard-%d" % shard_num for shard_num in range(n_rows)
]
for i, basename in zip(range(n_rows), shard_basenames):
logger.info("Shuffling shard %d/%d" % (i, n_rows))
X, y, w, ids = self.get_shard(i)
n = X.shape[0]
permutation = np.random.permutation(n)
X, y, w, ids = (X[permutation], y[permutation], w[permutation],
ids[permutation])
DiskDataset.write_data_to_disk(self.data_dir, basename, X, y, w,
ids)
# Reset cache
self._cached_shards = None
def shuffle_shards(self) -> None:
"""Shuffles the order of the shards for this dataset."""
metadata_rows = self.metadata_df.values.tolist()
random.shuffle(metadata_rows)
self.metadata_df = DiskDataset._construct_metadata(metadata_rows)
self.save_to_disk()
def get_shard(self, i: int) -> Batch:
"""Retrieves data for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve batch from.
Returns
-------
Batch
A batch data for i-th shard.
"""
# See if we have a cached copy of this shard.
if self._cached_shards is None:
self._cached_shards = [None] * self.get_number_shards()
self._cache_used = 0
if self._cached_shards[i] is not None:
shard = self._cached_shards[i]
return (shard.X, shard.y, shard.w, shard.ids)
# We don't, so load it from disk.
row = self.metadata_df.iloc[i]
X = np.array(load_from_disk(os.path.join(self.data_dir, row['X'])))
if row['y'] is not None:
y: Optional[np.ndarray] = np.array(
load_from_disk(os.path.join(self.data_dir, row['y'])))
else:
y = None
if row['w'] is not None:
# TODO (ytz): Under what condition does this exist but the file itself doesn't?
w_filename = os.path.join(self.data_dir, row['w'])
if os.path.exists(w_filename):
w: Optional[np.ndarray] = np.array(load_from_disk(w_filename))
elif y is not None:
if len(y.shape) == 1:
w = np.ones(y.shape[0], np.float32)
else:
w = np.ones((y.shape[0], 1), np.float32)
else:
w = None
else:
w = None
ids = np.array(load_from_disk(os.path.join(self.data_dir, row['ids'])),
dtype=object)
# Try to cache this shard for later use. Since the normal usage pattern is
# a series of passes through the whole dataset, there's no point doing
# anything fancy. It never makes sense to evict another shard from the
# cache to make room for this one, because we'll probably want that other
# shard again before the next time we want this one. So just cache as many
# as we can and then stop.
shard = _Shard(X, y, w, ids)
shard_size = X.nbytes + ids.nbytes
if y is not None:
shard_size += y.nbytes
if w is not None:
shard_size += w.nbytes
if self._cache_used + shard_size < self._memory_cache_size:
self._cached_shards[i] = shard
self._cache_used += shard_size
return (shard.X, shard.y, shard.w, shard.ids)
def get_shard_ids(self, i: int) -> np.ndarray:
"""Retrieves the list of IDs for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve weights from.
Returns
-------
np.ndarray
A numpy array of ids for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[
i] is not None:
return self._cached_shards[i].ids
row = self.metadata_df.iloc[i]
return np.array(load_from_disk(os.path.join(self.data_dir, row['ids'])),
dtype=object)
def get_shard_y(self, i: int) -> np.ndarray:
"""Retrieves the labels for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve labels from.
Returns
-------
np.ndarray
A numpy array of labels for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[
i] is not None:
return self._cached_shards[i].y
row = self.metadata_df.iloc[i]
return np.array(load_from_disk(os.path.join(self.data_dir, row['y'])))
def get_shard_w(self, i: int) -> np.ndarray:
"""Retrieves the weights for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve weights from.
Returns
-------
np.ndarray
A numpy array of weights for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[
i] is not None:
return self._cached_shards[i].w
row = self.metadata_df.iloc[i]
return np.array(load_from_disk(os.path.join(self.data_dir, row['w'])))
def add_shard(self,
X: np.ndarray,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> None:
"""Adds a data shard.
Parameters
----------
X: np.ndarray
Feature array.
y: np.ndarray, optioanl (default None)
Labels array.
w: np.ndarray, optioanl (default None)
Weights array.
ids: np.ndarray, optioanl (default None)
Identifiers array.
"""
metadata_rows = self.metadata_df.values.tolist()
shard_num = len(metadata_rows)
basename = "shard-%d" % shard_num
metadata_rows.append(
DiskDataset.write_data_to_disk(self.data_dir, basename, X, y, w,
ids))
self.metadata_df = DiskDataset._construct_metadata(metadata_rows)
self.save_to_disk()
def set_shard(self,
shard_num: int,
X: np.ndarray,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> None:
"""Writes data shard to disk.
Parameters
----------
shard_num: int
Shard index for shard to set new data.
X: np.ndarray
Feature array.
y: np.ndarray, optioanl (default None)
Labels array.
w: np.ndarray, optioanl (default None)
Weights array.
ids: np.ndarray, optioanl (default None)
Identifiers array.
"""
basename = "shard-%d" % shard_num
DiskDataset.write_data_to_disk(self.data_dir, basename, X, y, w, ids)
self._cached_shards = None
self.legacy_metadata = True
def select(self,
indices: Union[Sequence[int], np.ndarray],
select_dir: Optional[str] = None,
select_shard_size: Optional[int] = None,
output_numpy_dataset: Optional[bool] = False) -> Dataset:
"""Creates a new dataset from a selection of indices from self.
Examples
--------
>>> import numpy as np
>>> X = np.random.rand(10, 10)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
>>> selected = dataset.select([1, 3, 4])
>>> len(selected)
3
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Path to new directory that the selected indices will be copied to.
select_shard_size: Optional[int], (default None)
If specified, the shard-size to use for output selected `DiskDataset`.
If not output_numpy_dataset, then this is set to this current dataset's
shard size if not manually specified.
output_numpy_dataset: Optional[bool], (default False)
If True, output an in-memory `NumpyDataset` instead of a `DiskDataset`.
Note that `select_dir` and `select_shard_size` must be `None` if this
is `True`
Returns
-------
Dataset
A dataset containing the selected samples. The default dataset is `DiskDataset`.
If `output_numpy_dataset` is True, the dataset is `NumpyDataset`.
"""
if output_numpy_dataset and (select_dir is not None or
select_shard_size is not None):
raise ValueError(
"If output_numpy_dataset is set, then select_dir and select_shard_size must both be None"
)
if output_numpy_dataset:
# When outputting a NumpyDataset, we have 1 in-memory shard
select_shard_size = len(indices)
else:
if select_dir is not None:
if not os.path.exists(select_dir):
os.makedirs(select_dir)
else:
select_dir = tempfile.mkdtemp()
if select_shard_size is None:
select_shard_size = self.get_shard_size()
tasks = self.get_task_names()
N = len(indices)
n_shards = self.get_number_shards()
# Handle edge case with empty indices
if not N:
if not output_numpy_dataset:
return DiskDataset.create_dataset([],
data_dir=select_dir,
tasks=tasks)
else:
return NumpyDataset(np.array([]), np.array([]), np.array([]),
np.array([]))
# We use two loops here. The outer while loop walks over selection shards
# (the chunks of the indices to select that should go into separate
# output shards), while the inner for loop walks over the shards in the
# source datasets to select out the shard indices from that source shard
def generator():
start = 0
select_shard_num = 0
while start < N:
logger.info("Constructing selection output shard %d" %
(select_shard_num + 1))
end = min(start + select_shard_size, N)
select_shard_indices = indices[start:end]
sorted_indices = np.array(
sorted(select_shard_indices)).astype(int)
Xs, ys, ws, ids_s = [], [], [], []
count, indices_count = 0, 0
for shard_num in range(self.get_number_shards()):
logger.info(
"Selecting from input shard %d/%d for selection output shard %d"
% (shard_num + 1, n_shards, select_shard_num + 1))
if self.legacy_metadata:
ids = self.get_shard_ids(shard_num)
shard_len = len(ids)
else:
shard_X_shape, _, _, _ = self._get_shard_shape(
shard_num)
if len(shard_X_shape) > 0:
shard_len = shard_X_shape[0]
else:
shard_len = 0
# Find indices which rest in this shard
num_shard_elts = 0
while sorted_indices[indices_count +
num_shard_elts] < count + shard_len:
num_shard_elts += 1
if (indices_count +
num_shard_elts) >= len(sorted_indices):
break
if num_shard_elts == 0:
count += shard_len
continue
else:
X, y, w, ids = self.get_shard(shard_num)
# Need to offset indices to fit within shard_size
shard_inds = sorted_indices[indices_count:indices_count +
num_shard_elts] - count
# Handle empty case where no data from this shard needed
X_sel = X[shard_inds]
# Handle the case of datasets with y/w missing
if y is not None:
y_sel = y[shard_inds]
else:
y_sel = np.array([])
if w is not None:
w_sel = w[shard_inds]
else:
w_sel = np.array([])
ids_sel = ids[shard_inds]
Xs.append(X_sel)
ys.append(y_sel)
ws.append(w_sel)
ids_s.append(ids_sel)
indices_count += num_shard_elts
count += shard_len
# Break if all indices have been used up already
if indices_count >= len(sorted_indices):
break
# Note these will be in the sorted order
X = np.concatenate(Xs, axis=0)
y = np.concatenate(ys, axis=0)
w = np.concatenate(ws, axis=0)
ids = np.concatenate(ids_s, axis=0)
# We need to recover the original ordering. We can do this by using
# np.where to find the locatios of the original indices in the sorted
# indices.
reverted_indices = np.array(
# We know there's only one match for np.where since this is a
# permutation, so the [0][0] pulls out the exact match location.
[
np.where(sorted_indices == orig_index)[0][0]
for orig_index in select_shard_indices
])
if y.size == 0:
tup_y = y
else:
tup_y = y[reverted_indices]
if w.size == 0:
tup_w = w
else:
tup_w = w[reverted_indices]
X, ids = X[reverted_indices], ids[reverted_indices]
yield (X, tup_y, tup_w, ids)
start = end
select_shard_num += 1
if not output_numpy_dataset:
return DiskDataset.create_dataset(generator(),
data_dir=select_dir,
tasks=tasks)
else:
X, y, w, ids = next(generator())
return NumpyDataset(X, y, w, ids)
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
if len(self) == 0:
return np.array([])
ids = []
for i in range(self.get_number_shards()):
ids.append(np.atleast_1d(np.squeeze(self.get_shard_ids(i))))
return np.concatenate(ids)
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
Xs = []
one_dimensional = False
for (X_b, _, _, _) in self.itershards():
Xs.append(X_b)
if len(X_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(Xs)
else:
return np.concatenate(Xs)
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
if len(self) == 0:
return np.array([])
ys = []
one_dimensional = False
for i in range(self.get_number_shards()):
y_b = self.get_shard_y(i)
ys.append(y_b)
if len(y_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(ys)
else:
return np.concatenate(ys)
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
ws = []
one_dimensional = False
for i in range(self.get_number_shards()):
w_b = self.get_shard_w(i)
ws.append(w_b)
if len(w_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(ws)
else:
return np.concatenate(ws)
@property
def memory_cache_size(self) -> int:
"""Get the size of the memory cache for this dataset, measured in bytes."""
return self._memory_cache_size
@memory_cache_size.setter
def memory_cache_size(self, size: int) -> None:
"""Get the size of the memory cache for this dataset, measured in bytes."""
self._memory_cache_size = size
if self._cache_used > size:
self._cached_shards = None
def __len__(self) -> int:
"""Finds number of elements in dataset."""
total = 0
for _, row in self.metadata_df.iterrows():
y = load_from_disk(os.path.join(self.data_dir, row['ids']))
total += len(y)
return total
def _get_shard_shape(self,
shard_num: int) -> Tuple[Shape, Shape, Shape, Shape]:
"""Finds the shape of the specified shard."""
if self.legacy_metadata:
raise ValueError(
"This function requires the new metadata format to be called. Please reshard this dataset by calling the reshard() method."
)
n_tasks = len(self.get_task_names())
row = self.metadata_df.iloc[shard_num]
if row['X_shape'] is not None:
shard_X_shape = make_tuple(str(row['X_shape']))
else:
shard_X_shape = tuple()
if n_tasks > 0:
if row['y_shape'] is not None:
shard_y_shape = make_tuple(str(row['y_shape']))
else:
shard_y_shape = tuple()
if row['w_shape'] is not None:
shard_w_shape = make_tuple(str(row['w_shape']))
else:
shard_w_shape = tuple()
else:
shard_y_shape = tuple()
shard_w_shape = tuple()
if row['ids_shape'] is not None:
shard_ids_shape = make_tuple(str(row['ids_shape']))
else:
shard_ids_shape = tuple()
X_shape, y_shape, w_shape, ids_shape = tuple(
np.array(shard_X_shape)), tuple(np.array(shard_y_shape)), tuple(
np.array(shard_w_shape)), tuple(np.array(shard_ids_shape))
return X_shape, y_shape, w_shape, ids_shape
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Finds shape of dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
n_tasks = len(self.get_task_names())
n_rows = len(self.metadata_df.index)
# If shape metadata is available use it to directly compute shape from
# metadata
if not self.legacy_metadata:
for shard_num in range(n_rows):
shard_X_shape, shard_y_shape, shard_w_shape, shard_ids_shape = self._get_shard_shape(
shard_num)
if shard_num == 0:
X_shape, y_shape, w_shape, ids_shape = np.array(
shard_X_shape), np.array(shard_y_shape), np.array(
shard_w_shape), np.array(shard_ids_shape)
else:
X_shape[0] += shard_X_shape[0]
if n_tasks > 0:
y_shape[0] += shard_y_shape[0]
w_shape[0] += shard_w_shape[0]
ids_shape[0] += shard_ids_shape[0]
return tuple(X_shape), tuple(y_shape), tuple(w_shape), tuple(
ids_shape)
# In absense of shape metadata, fall back to loading data from disk to
# find shape.
else:
for shard_num, (X, y, w, ids) in enumerate(self.itershards()):
if shard_num == 0:
X_shape = np.array(X.shape)
if n_tasks > 0:
y_shape = np.array(y.shape)
w_shape = np.array(w.shape)
else:
y_shape = np.array([])
w_shape = np.array([])
ids_shape = np.array(ids.shape)
else:
X_shape[0] += np.array(X.shape)[0]
if n_tasks > 0:
y_shape[0] += np.array(y.shape)[0]
w_shape[0] += np.array(w.shape)[0]
ids_shape[0] += np.array(ids.shape)[0]
return tuple(X_shape), tuple(y_shape), tuple(w_shape), tuple(
ids_shape)
def get_label_means(self) -> pd.DataFrame:
"""Return pandas series of label means."""
return self.metadata_df["y_means"]
def get_label_stds(self) -> pd.DataFrame:
"""Return pandas series of label stds."""
return self.metadata_df["y_stds"]
class ImageDataset(Dataset):
"""A Dataset that loads data from image files on disk."""
def __init__(self,
X: Union[np.ndarray, List[str]],
y: Optional[Union[np.ndarray, List[str]]],
w: Optional[ArrayLike] = None,
ids: Optional[ArrayLike] = None) -> None:
"""Create a dataset whose X and/or y array is defined by image files on disk.
Parameters
----------
X: np.ndarray or List[str]
The dataset's input data. This may be either a single NumPy
array directly containing the data, or a list containing the
paths to the image files
y: np.ndarray or List[str]
The dataset's labels. This may be either a single NumPy array
directly containing the data, or a list containing the paths to
the image files
w: np.ndarray, optional (default None)
a 1D or 2D array containing the weights for each sample or
sample/task pair
ids: np.ndarray, optional (default None)
the sample IDs
"""
n_samples = len(X)
if y is None:
y = np.zeros((n_samples,))
self._X_shape = self._find_array_shape(X)
self._y_shape = self._find_array_shape(y)
if w is None:
if len(self._y_shape) == 0:
# Case n_samples should be 1
if n_samples != 1:
raise ValueError("y can only be a scalar if n_samples == 1")
w = np.ones_like(y)
elif len(self._y_shape) == 1:
w = np.ones(self._y_shape[0], np.float32)
else:
w = np.ones((self._y_shape[0], 1), np.float32)
if ids is None:
if not isinstance(X, np.ndarray):
ids = X
elif not isinstance(y, np.ndarray):
ids = y
else:
ids = np.arange(n_samples)
self._X = X
self._y = y
self._w = np.asarray(w)
self._ids = np.array(ids, dtype=object)
def _find_array_shape(self, array: Union[np.ndarray, List[str]]) -> Shape:
if isinstance(array, np.ndarray):
return array.shape
image_shape = load_image_files([array[0]]).shape[1:]
return tuple(np.concatenate([[len(array)], image_shape]))
def __len__(self) -> int:
"""Get the number of elements in the dataset."""
return self._X_shape[0]
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
return self._X_shape, self._y_shape, self._w.shape, self._ids.shape
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
if len(self._y_shape) < 2:
return np.array([0])
return np.arange(self._y_shape[1])
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
if isinstance(self._X, np.ndarray):
return self._X
return load_image_files(self._X)
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
if isinstance(self._y, np.ndarray):
return self._y
return load_image_files(self._y)
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
return self._ids
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
return self._w
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, default 1
Number of epochs to walk over dataset.
deterministic: bool, default False
If True, follow deterministic order.
pad_batches: bool, default False
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
def iterate(dataset, batch_size, epochs, deterministic, pad_batches):
n_samples = dataset._X_shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
if isinstance(dataset._X, np.ndarray):
X_batch = dataset._X[perm_indices]
else:
X_batch = load_image_files(
[dataset._X[i] for i in perm_indices])
if isinstance(dataset._y, np.ndarray):
y_batch = dataset._y[perm_indices]
else:
y_batch = load_image_files(
[dataset._y[i] for i in perm_indices])
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch,
ids_batch) = pad_batch(batch_size, X_batch, y_batch,
w_batch, ids_batch)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, epochs, deterministic, pad_batches)
def _get_image(self, array: Union[np.ndarray, List[str]],
indices: Union[int, np.ndarray]) -> np.ndarray:
"""Method for loading an image
Parameters
----------
array: Union[np.ndarray, List[str]]
A numpy array which contains images or List of image filenames
indices: Union[int, np.ndarray]
Index you want to get the images
Returns
-------
np.ndarray
Loaded images
"""
if isinstance(array, np.ndarray):
return array[indices]
if isinstance(indices, np.ndarray):
return load_image_files([array[i] for i in indices])
return load_image_files([array[indices]])[0]
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Iterator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
n_samples = self._X_shape[0]
return ((self._get_image(self._X, i), self._get_image(self._y, i),
self._w[i], self._ids[i]) for i in range(n_samples))
def transform(
self,
transformer: "dc.trans.Transformer",
**args,
) -> "NumpyDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times with
different subsets of the data. Each time it is called, it should transform
the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset
Returns
-------
NumpyDataset
A newly constructed NumpyDataset object
"""
newx, newy, neww, newids = transformer.transform_array(
self.X, self.y, self.w, self.ids)
return NumpyDataset(newx, newy, neww, newids)
def select(self,
indices: Union[Sequence[int], np.ndarray],
select_dir: Optional[str] = None) -> "ImageDataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Used to provide same API as `DiskDataset`. Ignored since
`ImageDataset` is purely in-memory.
Returns
-------
ImageDataset
A selected ImageDataset object
"""
X: Union[List[str], np.ndarray]
y: Union[List[str], np.ndarray]
if isinstance(self._X, np.ndarray):
X = self._X[indices]
else:
X = [self._X[i] for i in indices]
if isinstance(self._y, np.ndarray):
y = self._y[indices]
else:
y = [self._y[i] for i in indices]
w = self._w[indices]
ids = self._ids[indices]
return ImageDataset(X, y, w, ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Note
----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchImageDataset
except:
raise ValueError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchImageDataset(image_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
class Databag(object):
"""A utility class to iterate through multiple datasets together.
A `Databag` is useful when you have multiple datasets that you want
to iterate in locksteps. This might be easiest to grasp with a
simple code example.
>>> ones_dataset = NumpyDataset(X=np.ones((5, 3)))
>>> zeros_dataset = NumpyDataset(X=np.zeros((5, 3)))
>>> databag = Databag({"ones": ones_dataset, "zeros": zeros_dataset})
>>> for sample_dict in databag.iterbatches(batch_size=1):
... print(sample_dict)
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
Note how we get a batch at a time from each of the datasets in the
`Databag`. This can be useful for training models that combine data
from multiple `Dataset` objects at a time.
"""
def __init__(self, datasets: Optional[Dict[Any, Dataset]] = None) -> None:
"""Initialize this `Databag`.
Parameters
----------
datasets: dict, optional (default None)
A dictionary mapping keys to `Dataset` objects.
"""
if datasets is None:
self.datasets = dict()
else:
self.datasets = datasets
def add_dataset(self, key: Any, dataset: Dataset) -> None:
"""Adds a dataset to this databag.
Parameters
----------
key: Any, hashable value
Key to be added
dataset: Dataset
The dataset that `key` should point to.
"""
self.datasets[key] = dataset
def iterbatches(self, **kwargs) -> Iterator[Dict[str, np.ndarray]]:
"""Loop through all internal datasets in the same order.
Parameters
----------
batch_size: int
Number of samples from each dataset to return
epochs: int
Number of times to loop through the datasets
pad_batches: bool
Should all batches==batch_size
Returns
-------
Iterator[Dict[str, np.ndarray]]
Generator which yields a dictionary {key: dataset.X[batch]}
"""
key_order = [x for x in self.datasets.keys()]
if "epochs" in kwargs:
epochs = kwargs['epochs']
del kwargs['epochs']
else:
epochs = 1
kwargs['deterministic'] = True
for epoch in range(epochs):
iterators = [
self.datasets[x].iterbatches(**kwargs) for x in key_order
]
for tup in zip(*iterators):
m_d = {key_order[i]: tup[i][0] for i in range(len(key_order))}
yield m_d
<file_sep>from deepchem.feat.huggingface_featurizer import HuggingFaceFeaturizer
def testHuggingFaceFeaturizer():
# NOTE: The test depends on the the pretrained vocabulary
# (seyonec/PubChem10M_SMILES_BPE_60k). If the pretrained vocabulary is modified
# (which can be since it is an external resource), the test might fail.
from transformers import RobertaTokenizerFast
hf_tokenizer = RobertaTokenizerFast.from_pretrained(
"seyonec/PubChem10M_SMILES_BPE_60k")
featurizer = HuggingFaceFeaturizer(tokenizer=hf_tokenizer)
output = featurizer.featurize(['CC(=O)C', 'CC'])
assert len(output) == 2
assert output[0]['input_ids'] == [0, 262, 263, 51, 13, 39, 2]
assert output[0]['attention_mask'] == [1, 1, 1, 1, 1, 1, 1]
assert output[1]['input_ids'] == [0, 262, 2]
assert output[1]['attention_mask'] == [1, 1, 1]
<file_sep>import os
import tempfile
import deepchem as dc
def test_load_singleton_csv():
fin = tempfile.NamedTemporaryFile(mode='w', delete=False)
fin.write("smiles,endpoint\nc1ccccc1,1")
fin.close()
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["endpoint"]
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
X = loader.create_dataset(fin.name)
assert len(X) == 1
os.remove(fin.name)
<file_sep>from rdkit import Chem
import numpy as np
import logging
from typing import List, Tuple, Union, Dict, Set, Sequence, Optional
from deepchem.utils.typing import RDKitAtom, RDKitMol, RDKitBond
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.feat.graph_data import GraphData
from deepchem.feat.molecule_featurizers.circular_fingerprint import CircularFingerprint
from deepchem.feat.molecule_featurizers.rdkit_descriptors import RDKitDescriptors
from deepchem.utils.molecule_feature_utils import one_hot_encode
from deepchem.utils.molecule_feature_utils import get_atom_total_degree_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_formal_charge_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_total_num_Hs_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_hybridization_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_is_in_aromatic_one_hot
from deepchem.feat.graph_features import bond_features as b_Feats
logger = logging.getLogger(__name__)
class GraphConvConstants(object):
"""
A class for holding featurization parameters.
"""
MAX_ATOMIC_NUM = 100
ATOM_FEATURES: Dict[str, List[int]] = {
'atomic_num': list(range(MAX_ATOMIC_NUM)),
'degree': [0, 1, 2, 3, 4, 5],
'formal_charge': [-1, -2, 1, 2, 0],
'chiral_tag': [0, 1, 2, 3],
'num_Hs': [0, 1, 2, 3, 4]
}
ATOM_FEATURES_HYBRIDIZATION: List[str] = [
"SP", "SP2", "SP3", "SP3D", "SP3D2"
]
# Dimension of atom feature vector
ATOM_FDIM = sum(len(choices) + 1 for choices in ATOM_FEATURES.values()
) + len(ATOM_FEATURES_HYBRIDIZATION) + 1 + 2
# len(choices) +1 and len(ATOM_FEATURES_HYBRIDIZATION) +1 to include room for unknown set
# + 2 at end for is_in_aromatic and mass
BOND_FDIM = 14
# dictionary of available feature generators
FEATURE_GENERATORS: Dict[str, MolecularFeaturizer] = {
"morgan":
CircularFingerprint(radius=2, size=2048, sparse=False),
"morgan_count":
CircularFingerprint(radius=2,
size=2048,
sparse=False,
is_counts_based=True),
"rdkit_desc":
RDKitDescriptors(use_bcut2d=False),
"rdkit_desc_normalized":
RDKitDescriptors(use_bcut2d=False, is_normalized=True)
}
def get_atomic_num_one_hot(atom: RDKitAtom,
allowable_set: List[int],
include_unknown_set: bool = True) -> List[float]:
"""Get a one-hot feature about atomic number of the given atom.
Parameters
---------
atom: RDKitAtom
RDKit atom object
allowable_set: List[int]
The range of atomic numbers to consider.
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of atomic number of the given atom.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetAtomicNum() - 1, allowable_set,
include_unknown_set)
def get_atom_chiral_tag_one_hot(
atom: RDKitAtom,
allowable_set: List[int],
include_unknown_set: bool = True) -> List[float]:
"""Get a one-hot feature about chirality of the given atom.
Parameters
---------
atom: RDKitAtom
RDKit atom object
allowable_set: List[int]
The list of chirality tags to consider.
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of chirality of the given atom.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetChiralTag(), allowable_set,
include_unknown_set)
def get_atom_mass(atom: RDKitAtom) -> List[float]:
"""Get vector feature containing downscaled mass of the given atom.
Parameters
---------
atom: RDKitAtom
RDKit atom object
Returns
-------
List[float]
A vector of downscaled mass of the given atom.
"""
return [atom.GetMass() * 0.01]
def atom_features(
atom: RDKitAtom,
functional_groups: Optional[List[int]] = None,
only_atom_num: bool = False) -> Sequence[Union[bool, int, float]]:
"""Helper method used to compute atom feature vector.
Deepchem already contains an atom_features function, however we are defining a new one here due to the need to handle features specific to DMPNN.
Parameters
----------
atom: RDKitAtom
Atom to compute features on.
functional_groups: List[int]
A k-hot vector indicating the functional groups the atom belongs to.
Default value is None
only_atom_num: bool
Toggle to build a feature vector for an atom containing only the atom number information.
Returns
-------
features: Sequence[Union[bool, int, float]]
A list of atom features.
Examples
--------
>>> import deepchem as dc
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('C')
>>> atom = mol.GetAtoms()[0]
>>> features = dc.feat.molecule_featurizers.dmpnn_featurizer.atom_features(atom)
>>> type(features)
<class 'list'>
>>> len(features)
133
"""
if atom is None:
features: Sequence[Union[bool, int,
float]] = [0] * GraphConvConstants.ATOM_FDIM
elif only_atom_num:
features = []
features += get_atomic_num_one_hot(
atom, GraphConvConstants.ATOM_FEATURES['atomic_num'])
features += [0] * (
GraphConvConstants.ATOM_FDIM - GraphConvConstants.MAX_ATOMIC_NUM - 1
) # set other features to zero
else:
features = []
features += get_atomic_num_one_hot(
atom, GraphConvConstants.ATOM_FEATURES['atomic_num'])
features += get_atom_total_degree_one_hot(
atom, GraphConvConstants.ATOM_FEATURES['degree'])
features += get_atom_formal_charge_one_hot(
atom, GraphConvConstants.ATOM_FEATURES['formal_charge'])
features += get_atom_chiral_tag_one_hot(
atom, GraphConvConstants.ATOM_FEATURES['chiral_tag'])
features += get_atom_total_num_Hs_one_hot(
atom, GraphConvConstants.ATOM_FEATURES['num_Hs'])
features += get_atom_hybridization_one_hot(
atom, GraphConvConstants.ATOM_FEATURES_HYBRIDIZATION, True)
features += get_atom_is_in_aromatic_one_hot(atom)
features = [int(feature) for feature in features]
features += get_atom_mass(atom)
if functional_groups is not None:
features += functional_groups
return features
def bond_features(bond: RDKitBond) -> Sequence[Union[bool, int, float]]:
"""wrapper function for bond_features() already available in deepchem, used to compute bond feature vector.
Parameters
----------
bond: RDKitBond
Bond to compute features on.
Returns
-------
features: Sequence[Union[bool, int, float]]
A list of bond features.
Examples
--------
>>> import deepchem as dc
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('CC')
>>> bond = mol.GetBondWithIdx(0)
>>> b_features = dc.feat.molecule_featurizers.dmpnn_featurizer.bond_features(bond)
>>> type(b_features)
<class 'list'>
>>> len(b_features)
14
"""
if bond is None:
b_features: Sequence[Union[
bool, int, float]] = [1] + [0] * (GraphConvConstants.BOND_FDIM - 1)
else:
b_features = [0] + b_Feats(bond, use_extended_chirality=True)
return b_features
def map_reac_to_prod(
mol_reac: RDKitMol,
mol_prod: RDKitMol) -> Tuple[Dict[int, int], List[int], List[int]]:
"""Function to build a dictionary of mapping atom indices in the reactants to the products.
Parameters
----------
mol_reac: RDKitMol
An RDKit molecule of the reactants.
mol_prod: RDKitMol
An RDKit molecule of the products.
Returns
-------
mappings: Tuple[Dict[int,int],List[int],List[int]]
A tuple containing a dictionary of corresponding reactant and product atom indices,
list of atom ids of product not part of the mapping and
list of atom ids of reactant not part of the mapping
"""
only_prod_ids: List[int] = []
prod_map_to_id: Dict[int, int] = {}
mapnos_reac: Set[int] = set(
[atom.GetAtomMapNum() for atom in mol_reac.GetAtoms()])
for atom in mol_prod.GetAtoms():
mapno = atom.GetAtomMapNum()
if (mapno > 0):
prod_map_to_id[mapno] = atom.GetIdx()
if (mapno not in mapnos_reac):
only_prod_ids.append(atom.GetIdx())
else:
only_prod_ids.append(atom.GetIdx())
only_reac_ids: List[int] = []
reac_id_to_prod_id: Dict[int, int] = {}
for atom in mol_reac.GetAtoms():
mapno = atom.GetAtomMapNum()
if (mapno > 0):
try:
reac_id_to_prod_id[atom.GetIdx()] = prod_map_to_id[mapno]
except KeyError:
only_reac_ids.append(atom.GetIdx())
else:
only_reac_ids.append(atom.GetIdx())
mappings: Tuple[Dict[int, int], List[int],
List[int]] = (reac_id_to_prod_id, only_prod_ids,
only_reac_ids)
return mappings
def generate_global_features(mol: RDKitMol,
features_generators: List[str]) -> np.ndarray:
"""Helper function for generating global features for a RDKit mol based on the given list of feature generators to be used.
Parameters
----------
mol: RDKitMol
RDKit molecule to be featurized
features_generators: List[str]
List of names of the feature generators to be used featurization
Returns
-------
global_features_array: np.ndarray
Array of global features
Examples
--------
>>> from rdkit import Chem
>>> import deepchem as dc
>>> mol = Chem.MolFromSmiles('C')
>>> features_generators = ['morgan']
>>> global_features = dc.feat.molecule_featurizers.dmpnn_featurizer.generate_global_features(mol, features_generators)
>>> type(global_features)
<class 'numpy.ndarray'>
>>> len(global_features)
2048
>>> nonzero_features_indices = global_features.nonzero()[0]
>>> nonzero_features_indices
array([1264])
>>> global_features[nonzero_features_indices[0]]
1.0
"""
global_features: List[np.ndarray] = []
available_generators = GraphConvConstants.FEATURE_GENERATORS
for generator in features_generators:
if generator in available_generators:
global_featurizer = available_generators[generator]
if mol.GetNumHeavyAtoms() > 0:
global_features.extend(global_featurizer.featurize(mol)[0])
# for H2
elif mol.GetNumHeavyAtoms() == 0:
# not all features are equally long, so used methane as dummy molecule to determine length
global_features.extend(
np.zeros(
len(
global_featurizer.featurize(
Chem.MolFromSmiles('C'))[0])))
else:
logger.warning(f"{generator} generator is not available in DMPNN")
global_features_array: np.ndarray = np.asarray(global_features)
# Fix nans in features
replace_token = 0
global_features_array = np.where(np.isnan(global_features_array),
replace_token, global_features_array)
return global_features_array
class DMPNNFeaturizer(MolecularFeaturizer):
"""This class is a featurizer for Directed Message Passing Neural Network (D-MPNN) implementation
The default node(atom) and edge(bond) representations are based on
`Analyzing Learned Molecular Representations for Property Prediction paper <https://arxiv.org/pdf/1904.01561.pdf>`_.
The default node representation are constructed by concatenating the following values,
and the feature length is 133.
- Atomic num: A one-hot vector of this atom, in a range of first 100 atoms.
- Degree: A one-hot vector of the degree (0-5) of this atom.
- Formal charge: Integer electronic charge, -1, -2, 1, 2, 0.
- Chirality: A one-hot vector of the chirality tag (0-3) of this atom.
- Number of Hydrogens: A one-hot vector of the number of hydrogens (0-4) that this atom connected.
- Hybridization: A one-hot vector of "SP", "SP2", "SP3", "SP3D", "SP3D2".
- Aromatic: A one-hot vector of whether the atom belongs to an aromatic ring.
- Mass: Atomic mass * 0.01
The default edge representation are constructed by concatenating the following values,
and the feature length is 14.
- Bond type: A one-hot vector of the bond type, "single", "double", "triple", or "aromatic".
- Same ring: A one-hot vector of whether the atoms in the pair are in the same ring.
- Conjugated: A one-hot vector of whether this bond is conjugated or not.
- Stereo: A one-hot vector of the stereo configuration (0-5) of a bond.
If you want to know more details about features, please check the paper [1]_ and
utilities in deepchem.utils.molecule_feature_utils.py.
Examples
--------
>>> smiles = ["C1=CC=CN=C1", "C1CCC1"]
>>> featurizer = DMPNNFeaturizer()
>>> out = featurizer.featurize(smiles)
>>> type(out[0])
<class 'deepchem.feat.graph_data.GraphData'>
>>> out[0].num_nodes
6
>>> out[0].num_node_features
133
>>> out[0].node_features.shape
(6, 133)
>>> out[0].num_edge_features
14
>>> out[0].num_edges
12
>>> out[0].edge_features.shape
(12, 14)
References
----------
.. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond fingerprints."
Journal of computer-aided molecular design 30.8 (2016):595-608.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
features_generators: Optional[List[str]] = None,
is_adding_hs: bool = False,
use_original_atom_ranks: bool = False):
"""
Parameters
----------
features_generator: List[str], default None
List of global feature generators to be used.
is_adding_hs: bool, default False
Whether to add Hs or not.
use_original_atom_ranks: bool, default False
Whether to use original atom mapping or canonical atom mapping
"""
self.features_generators = features_generators
self.is_adding_hs = is_adding_hs
super().__init__(use_original_atom_ranks)
def _construct_bond_index(self, datapoint: RDKitMol) -> np.ndarray:
"""
Construct edge (bond) index
Parameters
----------
datapoint: RDKitMol
RDKit mol object.
Returns
-------
edge_index: np.ndarray
Edge (Bond) index
"""
src: List[int] = []
dest: List[int] = []
for bond in datapoint.GetBonds():
# add edge list considering a directed graph
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
src += [start, end]
dest += [end, start]
return np.asarray([src, dest], dtype=int)
def _get_bond_features(self, datapoint: RDKitMol) -> np.ndarray:
"""
Construct bond(edge) features for the given datapoint
For each bond index, 2 bond feature arrays are added to the main features array,
for the current bond and its reverse bond respectively.
Note: This method of generating bond features ensures that the shape of the bond features array
is always equal to (number of bonds, number of bond features), even if the number of bonds
is equal to 0.
Parameters
----------
datapoint: RDKitMol
RDKit mol object.
Returns
-------
f_bonds: np.ndarray
Bond features array
"""
bonds: Chem.rdchem._ROBondSeq = datapoint.GetBonds()
bond_fdim: int = GraphConvConstants.BOND_FDIM
number_of_bonds: int = len(
bonds) * 2 # Note the value is doubled to account for reverse bonds
f_bonds: np.ndarray = np.empty((number_of_bonds, bond_fdim))
for index in range(0, number_of_bonds, 2):
bond_id: int = index // 2
bond_feature: np.ndarray = np.asarray(bond_features(bonds[bond_id]),
dtype=float)
f_bonds[index] = bond_feature # bond
f_bonds[index + 1] = bond_feature # reverse bond
return f_bonds
def _featurize(self, datapoint: RDKitMol, **kwargs) -> GraphData:
"""Calculate molecule graph features from RDKit mol object.
Parameters
----------
datapoint: RDKitMol
RDKit mol object.
Returns
-------
graph: GraphData
A molecule graph object with features:
- node_features: Node feature matrix with shape [num_nodes, num_node_features]
- edge_index: Graph connectivity in COO format with shape [2, num_edges]
- edge_features: Edge feature matrix with shape [num_edges, num_edge_features]
- global_features: Array of global molecular features
"""
if isinstance(datapoint, Chem.rdchem.Mol):
if self.is_adding_hs:
datapoint = Chem.AddHs(datapoint)
else:
raise ValueError(
"Feature field should contain smiles for DMPNN featurizer!")
# get atom features
f_atoms: np.ndarray = np.asarray(
[atom_features(atom) for atom in datapoint.GetAtoms()], dtype=float)
# get edge(bond) features
f_bonds: np.ndarray = self._get_bond_features(datapoint)
# get edge index
edge_index: np.ndarray = self._construct_bond_index(datapoint)
# get global features
global_features: np.ndarray = np.empty(0)
if self.features_generators is not None:
global_features = generate_global_features(datapoint,
self.features_generators)
return GraphData(node_features=f_atoms,
edge_index=edge_index,
edge_features=f_bonds,
global_features=global_features)
<file_sep>"""PyTorch implementation of fully connected networks.
"""
import logging
import numpy as np
import torch
import torch.nn.functional as F
from collections.abc import Sequence as SequenceCollection
import deepchem as dc
from deepchem.models.torch_models.torch_model import TorchModel
from deepchem.models.losses import _make_pytorch_shapes_consistent
from deepchem.metrics import to_one_hot
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Union
from deepchem.utils.typing import ActivationFn, LossFn, OneOrMany
from deepchem.utils.pytorch_utils import get_activation
logger = logging.getLogger(__name__)
class MultitaskClassifier(TorchModel):
"""A fully connected network for multitask classification.
This class provides lots of options for customizing aspects of the model: the
number and widths of layers, the activation functions, regularization methods,
etc.
It optionally can compose the model from pre-activation residual blocks, as
described in https://arxiv.org/abs/1603.05027, rather than a simple stack of
dense layers. This often leads to easier training, especially when using a
large number of layers. Note that residual blocks can only be used when
successive layers have the same width. Wherever the layer width changes, a
simple dense layer will be used even if residual=True.
"""
def __init__(self,
n_tasks: int,
n_features: int,
layer_sizes: Sequence[int] = [1000],
weight_init_stddevs: OneOrMany[float] = 0.02,
bias_init_consts: OneOrMany[float] = 1.0,
weight_decay_penalty: float = 0.0,
weight_decay_penalty_type: str = 'l2',
dropouts: OneOrMany[float] = 0.5,
activation_fns: OneOrMany[ActivationFn] = 'relu',
n_classes: int = 2,
residual: bool = False,
**kwargs) -> None:
"""Create a MultitaskClassifier.
In addition to the following arguments, this class also accepts
all the keyword arguments from TensorGraph.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of
this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight
initialization of each layer. The length of this list should
equal len(layer_sizes). Alternatively this may be a single
value instead of a list, in which case the same value is used
for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The
length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in
which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the PyTorch activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer. Standard activation functions from torch.nn.functional can be specified by name.
n_classes: int
the number of classes
residual: bool
if True, the model will be composed of pre-activation residual blocks instead
of a simple stack of dense layers.
"""
self.n_tasks = n_tasks
self.n_features = n_features
self.n_classes = n_classes
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if isinstance(
activation_fns,
str) or not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
activation_fns = [get_activation(f) for f in activation_fns]
# Define the PyTorch Module that implements the model.
class PytorchImpl(torch.nn.Module):
def __init__(self):
super(PytorchImpl, self).__init__()
self.layers = torch.nn.ModuleList()
prev_size = n_features
for size, weight_stddev, bias_const in zip(
layer_sizes, weight_init_stddevs, bias_init_consts):
layer = torch.nn.Linear(prev_size, size)
torch.nn.init.normal_(layer.weight, 0, weight_stddev)
torch.nn.init.constant_(layer.bias, bias_const)
self.layers.append(layer)
prev_size = size
self.output_layer = torch.nn.Linear(prev_size,
n_tasks * n_classes)
torch.nn.init.xavier_uniform_(self.output_layer.weight)
torch.nn.init.constant_(self.output_layer.bias, 0)
def forward(self, x):
prev_size = n_features
next_activation = None
for size, layer, dropout, activation_fn, in zip(
layer_sizes, self.layers, dropouts, activation_fns):
y = x
if next_activation is not None:
y = next_activation(x)
y = layer(y)
if dropout > 0.0 and self.training:
y = F.dropout(y, dropout)
if residual and prev_size == size:
y = x + y
x = y
prev_size = size
next_activation = activation_fn
if next_activation is not None:
y = next_activation(y)
neural_fingerprint = y
y = self.output_layer(y)
logits = torch.reshape(y, (-1, n_tasks, n_classes))
output = F.softmax(logits, dim=2)
return (output, logits, neural_fingerprint)
model = PytorchImpl()
regularization_loss: Optional[Callable]
if weight_decay_penalty != 0:
weights = [layer.weight for layer in model.layers]
if weight_decay_penalty_type == 'l1':
regularization_loss = lambda: weight_decay_penalty * torch.sum( # noqa: E731
torch.stack([torch.abs(w).sum() for w in weights]))
else:
regularization_loss = lambda: weight_decay_penalty * torch.sum( # noqa: E731
torch.stack([torch.square(w).sum() for w in weights]))
else:
regularization_loss = None
super(MultitaskClassifier,
self).__init__(model,
dc.models.losses.SoftmaxCrossEntropy(),
output_types=['prediction', 'loss', 'embedding'],
regularization_loss=regularization_loss,
**kwargs)
def default_generator(
self,
dataset: dc.data.Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None:
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
yield ([X_b], [y_b], [w_b])
class MultitaskRegressor(TorchModel):
"""A fully connected network for multitask regression.
This class provides lots of options for customizing aspects of the model: the
number and widths of layers, the activation functions, regularization methods,
etc.
It optionally can compose the model from pre-activation residual blocks, as
described in https://arxiv.org/abs/1603.05027, rather than a simple stack of
dense layers. This often leads to easier training, especially when using a
large number of layers. Note that residual blocks can only be used when
successive layers have the same width. Wherever the layer width changes, a
simple dense layer will be used even if residual=True.
"""
def __init__(self,
n_tasks: int,
n_features: int,
layer_sizes: Sequence[int] = [1000],
weight_init_stddevs: OneOrMany[float] = 0.02,
bias_init_consts: OneOrMany[float] = 1.0,
weight_decay_penalty: float = 0.0,
weight_decay_penalty_type: str = 'l2',
dropouts: OneOrMany[float] = 0.5,
activation_fns: OneOrMany[ActivationFn] = 'relu',
uncertainty: bool = False,
residual: bool = False,
**kwargs) -> None:
"""Create a MultitaskRegressor.
In addition to the following arguments, this class also accepts all the keywork arguments
from TensorGraph.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer.
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1.
The final element corresponds to the output layer. Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the PyTorch activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer. Standard activation functions from torch.nn.functional can be specified by name.
uncertainty: bool
if True, include extra outputs and loss terms to enable the uncertainty
in outputs to be predicted
residual: bool
if True, the model will be composed of pre-activation residual blocks instead
of a simple stack of dense layers.
"""
self.n_tasks = n_tasks
self.n_features = n_features
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1)
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * (n_layers + 1)
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if isinstance(
activation_fns,
str) or not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
activation_fns = [get_activation(f) for f in activation_fns]
if uncertainty:
if any(d == 0.0 for d in dropouts):
raise ValueError(
'Dropout must be included in every layer to predict uncertainty'
)
# Define the PyTorch Module that implements the model.
class PytorchImpl(torch.nn.Module):
def __init__(self):
super(PytorchImpl, self).__init__()
self.layers = torch.nn.ModuleList()
prev_size = n_features
for size, weight_stddev, bias_const in zip(
layer_sizes, weight_init_stddevs, bias_init_consts):
layer = torch.nn.Linear(prev_size, size)
torch.nn.init.normal_(layer.weight, 0, weight_stddev)
torch.nn.init.constant_(layer.bias, bias_const)
self.layers.append(layer)
prev_size = size
self.output_layer = torch.nn.Linear(prev_size, n_tasks)
torch.nn.init.normal_(self.output_layer.weight, 0,
weight_init_stddevs[-1])
torch.nn.init.constant_(self.output_layer.bias,
bias_init_consts[-1])
self.uncertainty_layer = torch.nn.Linear(prev_size, n_tasks)
torch.nn.init.normal_(self.output_layer.weight, 0,
weight_init_stddevs[-1])
torch.nn.init.constant_(self.output_layer.bias, 0)
def forward(self, inputs):
x, dropout_switch = inputs
prev_size = n_features
next_activation = None
for size, layer, dropout, activation_fn, in zip(
layer_sizes, self.layers, dropouts, activation_fns):
y = x
if next_activation is not None:
y = next_activation(x)
y = layer(y)
if dropout > 0.0 and dropout_switch:
y = F.dropout(y, dropout)
if residual and prev_size == size:
y = x + y
x = y
prev_size = size
next_activation = activation_fn
if next_activation is not None:
y = next_activation(y)
neural_fingerprint = y
output = torch.reshape(self.output_layer(y), (-1, n_tasks, 1))
if uncertainty:
log_var = torch.reshape(self.uncertainty_layer(y),
(-1, n_tasks, 1))
var = torch.exp(log_var)
return (output, var, output, log_var, neural_fingerprint)
else:
return (output, neural_fingerprint)
model = PytorchImpl()
regularization_loss: Optional[Callable]
if weight_decay_penalty != 0:
weights = [layer.weight for layer in model.layers]
if weight_decay_penalty_type == 'l1':
regularization_loss = lambda: weight_decay_penalty * torch.sum( # noqa: E731
torch.stack([torch.abs(w).sum() for w in weights]))
else:
regularization_loss = lambda: weight_decay_penalty * torch.sum( # noqa: E731
torch.stack([torch.square(w).sum() for w in weights]))
else:
regularization_loss = None
loss: Union[dc.models.losses.Loss, LossFn]
if uncertainty:
output_types = [
'prediction', 'variance', 'loss', 'loss', 'embedding'
]
def loss(outputs, labels, weights):
output, labels = _make_pytorch_shapes_consistent(
outputs[0], labels[0])
diff = labels - output
losses = diff * diff / torch.exp(outputs[1]) + outputs[1]
w = weights[0]
if len(w.shape) < len(losses.shape):
if isinstance(w, torch.Tensor):
shape = tuple(w.shape)
else:
shape = w.shape
shape = tuple(-1 if x is None else x for x in shape)
w = w.reshape(shape + (1,) *
(len(losses.shape) - len(w.shape)))
loss = losses * w
loss = loss.mean()
if regularization_loss is not None:
loss += regularization_loss()
return loss
else:
output_types = ['prediction', 'embedding']
loss = dc.models.losses.L2Loss()
super(MultitaskRegressor,
self).__init__(model,
loss,
output_types=output_types,
regularization_loss=regularization_loss,
**kwargs)
def default_generator(
self,
dataset: dc.data.Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if mode == 'predict':
dropout = np.array(0.0)
else:
dropout = np.array(1.0)
yield ([X_b, dropout], [y_b], [w_b])
class MultitaskFitTransformRegressor(MultitaskRegressor):
"""Implements a MultitaskRegressor that performs on-the-fly transformation during fit/predict.
Examples
--------
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
>>> model = dc.models.MultitaskFitTransformRegressor(n_tasks, [n_features, n_features],
... dropouts=[0.], learning_rate=0.003, weight_init_stddevs=[np.sqrt(6)/np.sqrt(1000)],
... batch_size=n_samples, fit_transformers=fit_transformers)
>>> model.n_features
12
"""
def __init__(self,
n_tasks: int,
n_features: int,
fit_transformers: Sequence[dc.trans.Transformer] = [],
batch_size: int = 50,
**kwargs):
"""Create a MultitaskFitTransformRegressor.
In addition to the following arguments, this class also accepts all the keywork arguments
from MultitaskRegressor.
Parameters
----------
n_tasks: int
number of tasks
n_features: list or int
number of features
fit_transformers: list
List of dc.trans.FitTransformer objects
"""
self.fit_transformers = fit_transformers
# Run fit transformers on dummy dataset to determine n_features after transformation
if isinstance(n_features, list):
X_b = np.ones([batch_size] + n_features)
elif isinstance(n_features, int):
X_b = np.ones([batch_size, n_features])
else:
raise ValueError("n_features should be list or int")
empty: np.ndarray = np.array([])
for transformer in fit_transformers:
assert transformer.transform_X and not (transformer.transform_y or
transformer.transform_w)
X_b, _, _, _ = transformer.transform_array(X_b, empty, empty, empty)
n_features = X_b.shape[1]
logger.info("n_features after fit_transform: %d", int(n_features))
super(MultitaskFitTransformRegressor,
self).__init__(n_tasks,
n_features,
batch_size=batch_size,
**kwargs)
def default_generator(
self,
dataset: dc.data.Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
empty: np.ndarray = np.array([])
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None:
y_b = y_b.reshape(-1, self.n_tasks, 1)
if X_b is not None:
if mode == 'fit':
for transformer in self.fit_transformers:
X_b, _, _, _ = transformer.transform_array(
X_b, empty, empty, empty)
if mode == 'predict':
dropout = np.array(0.0)
else:
dropout = np.array(1.0)
yield ([X_b, dropout], [y_b], [w_b])
def predict_on_generator(
self,
generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[dc.trans.Transformer] = [],
output_types: Optional[OneOrMany[str]] = None
) -> OneOrMany[np.ndarray]:
def transform_generator():
for inputs, labels, weights in generator:
X_t = inputs[0]
for transformer in self.fit_transformers:
X_t = transformer.X_transform(X_t)
yield ([X_t] + inputs[1:], labels, weights)
return super(MultitaskFitTransformRegressor,
self).predict_on_generator(transform_generator(),
transformers, output_types)
<file_sep>import deepchem as dc
import pytest
try:
import tensorflow as tf # noqa: F401
from tensorflow.python.eager import context # noqa: F401
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def test_interatomic_l2_distance():
N_atoms = 10
M_nbrs = 15
ndim = 20
layer = dc.models.layers.InteratomicL2Distances(N_atoms=N_atoms,
M_nbrs=M_nbrs,
ndim=ndim)
config = layer.get_config()
layer_copied = dc.models.layers.InteratomicL2Distances.from_config(config)
assert layer_copied.N_atoms == layer.N_atoms
assert layer_copied.M_nbrs == layer.M_nbrs
assert layer_copied.ndim == layer.ndim
@pytest.mark.tensorflow
def test_graph_conv():
out_channel = 10
min_deg = 0,
max_deg = 10,
activation_fn = 'relu'
layer = dc.models.layers.GraphConv(out_channel=out_channel,
min_deg=min_deg,
max_deg=max_deg,
activation_fn=activation_fn)
config = layer.get_config()
layer_copied = dc.models.layers.GraphConv.from_config(config)
assert layer_copied.out_channel == layer.out_channel
assert layer_copied.activation_fn == layer.activation_fn
assert layer_copied.max_degree == layer.max_degree
assert layer_copied.min_degree == layer.min_degree
@pytest.mark.tensorflow
def test_graph_gather():
batch_size = 10
activation_fn = 'relu'
layer_copied = dc.models.layers.GraphGather(batch_size=batch_size,
activation_fn=activation_fn)
config = layer_copied.get_config()
layer_copied = dc.models.layers.GraphGather.from_config(config)
assert layer_copied.batch_size == layer_copied.batch_size
assert layer_copied.activation_fn == layer_copied.activation_fn
@pytest.mark.tensorflow
def test_graph_pool():
min_degree = 0
max_degree = 10
layer_copied = dc.models.layers.GraphPool(min_degree=min_degree,
max_degree=max_degree)
config = layer_copied.get_config()
layer_copied = dc.models.layers.GraphPool.from_config(config)
assert layer_copied.max_degree == layer_copied.max_degree
assert layer_copied.min_degree == layer_copied.min_degree
@pytest.mark.tensorflow
def test_lstmstep():
output_dim = 100
input_dim = 50
init_fn = 'glorot_uniform'
inner_init_fn = 'orthogonal'
activation_fn = 'tanh'
inner_activation_fn = 'hard_sigmoid'
layer = dc.models.layers.LSTMStep(output_dim, input_dim, init_fn,
inner_init_fn, activation_fn,
inner_activation_fn)
config = layer.get_config()
layer_copied = dc.models.layers.LSTMStep.from_config(config)
assert layer_copied.output_dim == layer.output_dim
assert layer_copied.input_dim == layer.input_dim
assert layer_copied.init == layer.init
assert layer_copied.inner_init == layer.inner_init
assert layer_copied.activation == layer.activation
assert layer_copied.inner_activation == layer.inner_activation
@pytest.mark.tensorflow
def test_attn_lstm_embedding():
n_test = 10
n_support = 100
n_feat = 20
max_depth = 3
layer = dc.models.layers.AttnLSTMEmbedding(n_test, n_support, n_feat,
max_depth)
config = layer.get_config()
layer_copied = dc.models.layers.AttnLSTMEmbedding.from_config(config)
assert layer_copied.n_test == layer.n_test
assert layer_copied.n_support == layer.n_support
assert layer_copied.n_feat == layer.n_feat
assert layer_copied.max_depth == layer.max_depth
@pytest.mark.tensorflow
def test_iterref_lstm_embedding():
n_test = 10
n_support = 100
n_feat = 20
max_depth = 3
layer = dc.models.layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,
max_depth)
config = layer.get_config()
layer_copied = dc.models.layers.IterRefLSTMEmbedding.from_config(config)
assert layer_copied.n_test == layer.n_test
assert layer_copied.n_support == layer.n_support
assert layer_copied.n_feat == layer.n_feat
assert layer_copied.max_depth == layer.max_depth
@pytest.mark.tensorflow
def test_switched_dropout():
rate = 0.1
layer = dc.models.layers.SwitchedDropout(rate=rate)
config = layer.get_config()
layer_copied = dc.models.layers.SwitchedDropout.from_config(config)
assert layer_copied.rate == layer.rate
@pytest.mark.tensorflow
def test_weighted_linearcombo():
std = 0.1
layer = dc.models.layers.WeightedLinearCombo(std=std)
config = layer.get_config()
layer_copied = dc.models.layers.WeightedLinearCombo.from_config(config)
assert layer_copied.std == layer.std
@pytest.mark.tensorflow
def test_combine_mean_std():
training_only = True
noise_epsilon = 0.001
layer = dc.models.layers.CombineMeanStd(training_only, noise_epsilon)
config = layer.get_config()
layer_copied = dc.models.layers.CombineMeanStd.from_config(config)
assert layer_copied.training_only == layer.training_only
assert layer_copied.noise_epsilon == layer.noise_epsilon
@pytest.mark.tensorflow
def test_stack():
axis = 2
layer = dc.models.layers.Stack(axis=axis)
config = layer.get_config()
layer_copied = dc.models.layers.Stack.from_config(config)
assert layer_copied.axis == layer.axis
@pytest.mark.tensorflow
def test_variable():
initial_value = 10
layer = dc.models.layers.Variable(initial_value)
config = layer.get_config()
layer_copied = dc.models.layers.Variable.from_config(config)
assert layer_copied.initial_value == layer.initial_value
@pytest.mark.tensorflow
def test_vina_free_energy():
N_atoms = 10
M_nbrs = 15
ndim = 20
nbr_cutoff = 5
start = 1
stop = 7
stddev = 0.3
Nrot = 1
layer = dc.models.layers.VinaFreeEnergy(N_atoms, M_nbrs, ndim, nbr_cutoff,
start, stop, stddev, Nrot)
config = layer.get_config()
layer_copied = dc.models.layers.VinaFreeEnergy.from_config(config)
assert layer_copied.N_atoms == layer.N_atoms
assert layer_copied.M_nbrs == layer.M_nbrs
assert layer_copied.ndim == layer.ndim
assert layer_copied.nbr_cutoff == layer.nbr_cutoff
assert layer_copied.start == layer.start
assert layer_copied.stop == layer.stop
assert layer_copied.stddev == layer.stddev
assert layer_copied.Nrot == layer_copied.Nrot
@pytest.mark.tensorflow
def test_neighbor_list():
N_atoms = 10
M_nbrs = 15
ndim = 20
nbr_cutoff = 5
start = 1
stop = 7
layer = dc.models.layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff,
start, stop)
config = layer.get_config()
layer_copied = dc.models.layers.VinaFreeEnergy.from_config(config)
assert layer_copied.N_atoms == layer.N_atoms
assert layer_copied.M_nbrs == layer.M_nbrs
assert layer_copied.ndim == layer.ndim
assert layer_copied.nbr_cutoff == layer.nbr_cutoff
assert layer_copied.start == layer.start
assert layer_copied.stop == layer.stop
@pytest.mark.tensorflow
def test_atomic_convolution():
atom_types = None
radial_params = list()
boxsize = None
layer = dc.models.layers.AtomicConvolution(atom_types, radial_params,
boxsize)
config = layer.get_config()
layer_copied = dc.models.layers.AtomicConvolution.from_config(config)
assert layer_copied.atom_types == layer.atom_types
assert layer_copied.radial_params == layer.radial_params
assert layer_copied.boxsize == layer.boxsize
@pytest.mark.tensorflow
def test_ani_feat():
max_atoms = 23
radial_cutoff = 4.6
angular_cutoff = 3.1
radial_length = 32
angular_length = 8
atom_cases = [1, 6, 7, 8, 16]
atomic_number_differentiated = True
coordinates_in_bohr = True
layer = dc.models.layers.ANIFeat(max_atoms, radial_cutoff, angular_cutoff,
radial_length, angular_length, atom_cases,
atomic_number_differentiated,
coordinates_in_bohr)
config = layer.get_config()
layer_copied = dc.models.layers.ANIFeat.from_config(config)
assert layer_copied.max_atoms == layer.max_atoms
assert layer_copied.radial_cutoff == layer.radial_cutoff
assert layer_copied.angular_cutoff == layer.angular_cutoff
assert layer_copied.radial_length == layer.radial_length
assert layer_copied.angular_length == layer.angular_length
assert layer_copied.atom_cases == layer.atom_cases
assert layer_copied.atomic_number_differentiated == layer.atomic_number_differentiated
assert layer_copied.coordinates_in_bohr == layer.coordinates_in_bohr
@pytest.mark.tensorflow
def test_graph_embed_pool():
num_vertices = 100
layer = dc.models.layers.GraphEmbedPoolLayer(num_vertices)
config = layer.get_config()
layer_copied = dc.models.layers.GraphEmbedPoolLayer.from_config(config)
assert layer_copied.num_vertices == layer.num_vertices
@pytest.mark.tensorflow
def test_graph_cnn():
num_filters = 20
layer = dc.models.layers.GraphCNN(num_filters)
config = layer.get_config()
layer_copied = dc.models.layers.GraphCNN.from_config(config)
assert layer_copied.num_filters == layer.num_filters
@pytest.mark.tensorflow
def test_highway():
activation_fn = 'relu'
biases_initializer = 'zeros'
weights_initializer = None
layer = dc.models.layers.Highway(activation_fn, biases_initializer,
weights_initializer)
config = layer.get_config()
layer_copied = dc.models.layers.Highway.from_config(config)
assert layer_copied.activation_fn == layer.activation_fn
assert layer_copied.biases_initializer == layer.biases_initializer
assert layer_copied.weights_initializer == layer.weights_initializer
@pytest.mark.tensorflow
def test_weave():
n_atom_input_feat = 75
n_pair_input_feat = 14
n_atom_output_feat = 50
n_pair_output_feat = 50
n_hidden_AA = 50
n_hidden_PA = 50
n_hidden_AP = 50
n_hidden_PP = 50
update_pair = True
init = 'glorot_uniform'
activation = 'relu'
batch_normalize = True
batch_normalize_kwargs = {"renorm": True}
layer = dc.models.layers.WeaveLayer(n_atom_input_feat, n_pair_input_feat,
n_atom_output_feat, n_pair_output_feat,
n_hidden_AA, n_hidden_PA, n_hidden_AP,
n_hidden_PP, update_pair, init,
activation, batch_normalize,
batch_normalize_kwargs)
config = layer.get_config()
layer_copied = dc.models.layers.WeaveLayer.from_config(config)
assert layer_copied.n_atom_input_feat == layer.n_atom_input_feat
assert layer_copied.n_pair_input_feat == layer.n_pair_input_feat
assert layer_copied.n_atom_output_feat == layer.n_atom_output_feat
assert layer_copied.n_pair_output_feat == layer.n_pair_output_feat
assert layer_copied.n_hidden_AA == layer.n_hidden_AA
assert layer_copied.n_hidden_PA == layer.n_hidden_PA
assert layer_copied.n_hidden_AP == layer.n_hidden_AP
assert layer_copied.n_hidden_PP == layer.n_hidden_PP
assert layer_copied.update_pair == layer.update_pair
assert layer_copied.init == layer.init
assert layer_copied.activation == layer.activation
assert layer_copied.batch_normalize == layer.batch_normalize
assert layer_copied.batch_normalize_kwargs == layer.batch_normalize_kwargs
@pytest.mark.tensorflow
def test_weave_gather():
batch_size = 32
n_input = 128
gaussian_expand = True
compress_post_gaussian_expansion = False
init = 'glorot_uniform'
activation = 'tanh'
layer = dc.models.layers.WeaveGather(batch_size, n_input, gaussian_expand,
compress_post_gaussian_expansion, init,
activation)
config = layer.get_config()
layer_copied = dc.models.layers.WeaveGather.from_config(config)
assert layer_copied.batch_size == layer.batch_size
assert layer_copied.n_input == layer.n_input
assert layer_copied.gaussian_expand == layer.gaussian_expand
assert layer_copied.compress_post_gaussian_expansion == layer.compress_post_gaussian_expansion
assert layer_copied.init == layer.init
assert layer_copied.activation == layer.activation
@pytest.mark.tensorflow
def test_dtnn_embedding():
n_embedding = 30
periodic_table_length = 30
init = 'glorot_uniform'
layer = dc.models.layers.DTNNEmbedding(n_embedding, periodic_table_length,
init)
config = layer.get_config()
layer_copied = dc.models.layers.DTNNEmbedding.from_config(config)
assert layer_copied.n_embedding == layer.n_embedding
assert layer_copied.periodic_table_length == layer.periodic_table_length
assert layer_copied.init == layer.init
@pytest.mark.tensorflow
def test_dtnn_step():
n_embedding = 30
n_distance = 100
n_hidden = 60
init = 'glorot_uniform'
activation = 'tanh'
layer = dc.models.layers.DTNNStep(n_embedding, n_distance, n_hidden, init,
activation)
config = layer.get_config()
layer_copied = dc.models.layers.DTNNStep.from_config(config)
assert layer_copied.n_embedding == layer.n_embedding
assert layer_copied.n_distance == layer.n_distance
assert layer_copied.n_hidden == layer.n_hidden
assert layer_copied.init == layer.init
assert layer_copied.activation == layer.activation
@pytest.mark.tensorflow
def test_dtnn_gather():
n_embedding = 30
n_outputs = 100
layer_sizes = [100]
output_activation = True
init = 'glorot_uniform'
activation = 'tanh'
layer = dc.models.layers.DTNNGather(n_embedding, n_outputs, layer_sizes,
output_activation, init, activation)
config = layer.get_config()
layer_copied = dc.models.layers.DTNNGather.from_config(config)
assert layer_copied.n_embedding == layer.n_embedding
assert layer_copied.n_outputs == layer.n_outputs
assert layer_copied.layer_sizes == layer.layer_sizes
assert layer_copied.output_activation == layer.output_activation
assert layer_copied.init == layer.init
assert layer_copied.activation == layer.activation
@pytest.mark.tensorflow
def test_dag():
n_graph_feat = 30
n_atom_feat = 75
max_atoms = 50
layer_sizes = [100]
init = 'glorot_uniform'
activation = 'relu'
dropout = None
batch_size = 64
layer = dc.models.layers.DAGLayer(n_graph_feat, n_atom_feat, max_atoms,
layer_sizes, init, activation, dropout,
batch_size)
config = layer.get_config()
layer_copied = dc.models.layers.DAGLayer.from_config(config)
assert layer_copied.n_graph_feat == layer.n_graph_feat
assert layer_copied.n_atom_feat == layer.n_atom_feat
assert layer_copied.max_atoms == layer.max_atoms
assert layer_copied.layer_sizes == layer.layer_sizes
assert layer_copied.init == layer.init
assert layer_copied.activation == layer.activation
assert layer_copied.dropout == layer.dropout
assert layer_copied.batch_size == layer.batch_size
@pytest.mark.tensorflow
def test_dag_gather():
n_graph_feat = 30
n_outputs = 30
max_atoms = 50
layer_sizes = [100]
init = 'glorot_uniform'
activation = 'relu'
dropout = None
layer = dc.models.layers.DAGGather(n_graph_feat, n_outputs, max_atoms,
layer_sizes, init, activation, dropout)
config = layer.get_config()
layer_copied = dc.models.layers.DAGGather.from_config(config)
assert layer_copied.n_graph_feat == layer.n_graph_feat
assert layer_copied.n_outputs == layer.n_outputs
assert layer_copied.max_atoms == layer.max_atoms
assert layer_copied.layer_sizes == layer.layer_sizes
assert layer_copied.init == layer.init
assert layer_copied.activation == layer.activation
assert layer_copied.dropout == layer.dropout
@pytest.mark.tensorflow
def test_message_passing():
T = 20
message_fn = 'enn'
update_fn = 'gru'
n_hidden = 100
layer = dc.models.layers.MessagePassing(T, message_fn, update_fn, n_hidden)
config = layer.get_config()
layer_copied = dc.models.layers.MessagePassing.from_config(config)
assert layer_copied.T == layer.T
assert layer_copied.message_fn == layer.message_fn
assert layer_copied.update_fn == layer.update_fn
assert layer_copied.n_hidden == layer.n_hidden
@pytest.mark.tensorflow
def test_edge_network():
n_pair_features = 8
n_hidden = 100
init = 'glorot_uniform'
layer = dc.models.layers.EdgeNetwork(n_pair_features, n_hidden, init)
config = layer.get_config()
layer_copied = dc.models.layers.EdgeNetwork.from_config(config)
assert layer_copied.n_pair_features == layer.n_pair_features
assert layer_copied.n_hidden == layer.n_hidden
assert layer_copied.init == layer.init
@pytest.mark.tensorflow
def test_gru():
n_hidden = 100
init = 'glorot_uniform'
layer = dc.models.layers.GatedRecurrentUnit(n_hidden, init)
config = layer.get_config()
layer_copied = dc.models.layers.GatedRecurrentUnit.from_config(config)
assert layer_copied.n_hidden == layer.n_hidden
assert layer_copied.init == layer.init
@pytest.mark.tensorflow
def test_set_gather():
M = 10
batch_size = 16
n_hidden = 100
init = 'orthogonal'
layer = dc.models.layers.SetGather(M, batch_size, n_hidden, init)
config = layer.get_config()
layer_copied = dc.models.layers.SetGather.from_config(config)
assert layer_copied.M == layer.M
assert layer_copied.batch_size == layer.batch_size
assert layer_copied.n_hidden == layer.n_hidden
assert layer_copied.init == layer.init
<file_sep>"""
DGL-based GCN for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
from typing import Optional
class GCN(nn.Module):
"""Model for Graph Property Prediction Based on Graph Convolution Networks (GCN).
This model proceeds as follows:
* Update node representations in graphs with a variant of GCN
* For each graph, compute its representation by 1) a weighted sum of the node
representations in the graph, where the weights are computed by applying a
gating function to the node representations 2) a max pooling of the node
representations 3) concatenating the output of 1) and 2)
* Perform the final prediction using an MLP
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models import GCN
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.MolGraphConvFeaturizer()
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph(self_loop=True) for i in range(len(graphs))]
>>> # Batch two graphs into a graph of two connected components
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = GCN(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] <NAME> and <NAME>. "Semi-Supervised Classification with Graph
Convolutional Networks." ICLR 2017.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
This model is different from deepchem.models.GraphConvModel as follows:
* For each graph convolution, the learnable weight in this model is shared across all nodes.
``GraphConvModel`` employs separate learnable weights for nodes of different degrees. A
learnable weight is shared across all nodes of a particular degree.
* For ``GraphConvModel``, there is an additional GraphPool operation after each
graph convolution. The operation updates the representation of a node by applying an
element-wise maximum over the representations of its neighbors and itself.
* For computing graph-level representations, this model computes a weighted sum and an
element-wise maximum of the representations of all nodes in a graph and concatenates them.
The node weights are obtained by using a linear/dense layer followd by a sigmoid function.
For ``GraphConvModel``, the sum over node representations is unweighted.
* There are various minor differences in using dropout, skip connection and batch
normalization.
"""
def __init__(self,
n_tasks: int,
graph_conv_layers: Optional[list] = None,
activation=None,
residual: bool = True,
batchnorm: bool = False,
dropout: float = 0.,
predictor_hidden_feats: int = 128,
predictor_dropout: float = 0.,
mode: str = 'regression',
number_atom_features: int = 30,
n_classes: int = 2,
nfeat_name: str = 'x'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
graph_conv_layers: list of int
Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel
for the i-th GCN layer. If not specified, the default value will be [64, 64].
activation: callable
The activation function to apply to the output of each GCN layer.
By default, no activation function will be applied.
residual: bool
Whether to add a residual connection within each GCN layer. Default to True.
batchnorm: bool
Whether to apply batch normalization to the output of each GCN layer.
Default to False.
dropout: float
The dropout probability for the output of each GCN layer. Default to 0.
predictor_hidden_feats: int
The size for hidden representations in the output MLP predictor. Default to 128.
predictor_dropout: float
The dropout probability in the output MLP predictor. Default to 0.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
"""
try:
import dgl # noqa: F401
except:
raise ImportError('This class requires dgl.')
try:
import dgllife # noqa: F401
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
super(GCN, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import GCNPredictor as DGLGCNPredictor
if graph_conv_layers is None:
graph_conv_layers = [64, 64]
num_gnn_layers = len(graph_conv_layers)
if activation is not None:
activation = [activation] * num_gnn_layers
self.model = DGLGCNPredictor(
in_feats=number_atom_features,
hidden_feats=graph_conv_layers,
activation=activation,
residual=[residual] * num_gnn_layers,
batchnorm=[batchnorm] * num_gnn_layers,
dropout=[dropout] * num_gnn_layers,
n_tasks=out_size,
predictor_hidden_feats=predictor_hidden_feats,
predictor_dropout=predictor_dropout)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)``
if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if
self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
out = self.model(g, node_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class GCNModel(TorchModel):
"""Model for Graph Property Prediction Based on Graph Convolution Networks (GCN).
This model proceeds as follows:
* Update node representations in graphs with a variant of GCN
* For each graph, compute its representation by 1) a weighted sum of the node
representations in the graph, where the weights are computed by applying a
gating function to the node representations 2) a max pooling of the node
representations 3) concatenating the output of 1) and 2)
* Perform the final prediction using an MLP
Examples
--------
>>> import deepchem as dc
>>> from deepchem.models import GCNModel
>>> # preparing dataset
>>> smiles = ["C1CCC1", "CCC"]
>>> labels = [0., 1.]
>>> featurizer = dc.feat.MolGraphConvFeaturizer()
>>> X = featurizer.featurize(smiles)
>>> dataset = dc.data.NumpyDataset(X=X, y=labels)
>>> # training model
>>> model = GCNModel(mode='classification', n_tasks=1,
... batch_size=16, learning_rate=0.001)
>>> loss = model.fit(dataset, nb_epoch=5)
References
----------
.. [1] <NAME> and <NAME>. "Semi-Supervised Classification with Graph
Convolutional Networks." ICLR 2017.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
This model is different from deepchem.models.GraphConvModel as follows:
* For each graph convolution, the learnable weight in this model is shared across all nodes.
``GraphConvModel`` employs separate learnable weights for nodes of different degrees. A
learnable weight is shared across all nodes of a particular degree.
* For ``GraphConvModel``, there is an additional GraphPool operation after each
graph convolution. The operation updates the representation of a node by applying an
element-wise maximum over the representations of its neighbors and itself.
* For computing graph-level representations, this model computes a weighted sum and an
element-wise maximum of the representations of all nodes in a graph and concatenates them.
The node weights are obtained by using a linear/dense layer followd by a sigmoid function.
For ``GraphConvModel``, the sum over node representations is unweighted.
* There are various minor differences in using dropout, skip connection and batch
normalization.
"""
def __init__(self,
n_tasks: int,
graph_conv_layers: Optional[list] = None,
activation=None,
residual: bool = True,
batchnorm: bool = False,
dropout: float = 0.,
predictor_hidden_feats: int = 128,
predictor_dropout: float = 0.,
mode: str = 'regression',
number_atom_features=30,
n_classes: int = 2,
self_loop: bool = True,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
graph_conv_layers: list of int
Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel
for the i-th GCN layer. If not specified, the default value will be [64, 64].
activation: callable
The activation function to apply to the output of each GCN layer.
By default, no activation function will be applied.
residual: bool
Whether to add a residual connection within each GCN layer. Default to True.
batchnorm: bool
Whether to apply batch normalization to the output of each GCN layer.
Default to False.
dropout: float
The dropout probability for the output of each GCN layer. Default to 0.
predictor_hidden_feats: int
The size for hidden representations in the output MLP predictor. Default to 128.
predictor_dropout: float
The dropout probability in the output MLP predictor. Default to 0.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
self_loop: bool
Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
When input graphs have isolated nodes, self loops allow preserving the original feature
of them in message passing. Default to True.
kwargs
This can include any keyword argument of TorchModel.
"""
model = GCN(n_tasks=n_tasks,
graph_conv_layers=graph_conv_layers,
activation=activation,
residual=residual,
batchnorm=batchnorm,
dropout=dropout,
predictor_hidden_feats=predictor_hidden_feats,
predictor_dropout=predictor_dropout,
mode=mode,
number_atom_features=number_atom_features,
n_classes=n_classes)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(GCNModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
self._self_loop = self_loop
def _prepare_batch(self, batch):
"""Create batch data for GCN.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [
graph.to_dgl_graph(self_loop=self._self_loop) for graph in inputs[0]
]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(GCNModel, self)._prepare_batch(
([], labels, weights))
return inputs, labels, weights
<file_sep>import os
import numpy as np
import deepchem as dc
def test_DAG_transformer():
"""Tests the DAG transformer."""
np.random.seed(123)
# Load mini log-solubility dataset.
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(
current_dir, "../../models/tests/assets/example_regression.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
# The transformer generates n DAGs for a molecule with n
# atoms. These are denoted the "parents"
for idm, mol in enumerate(dataset.X):
assert dataset.X[idm].get_num_atoms() == len(dataset.X[idm].parents)
<file_sep># DeepChem ADME
ADME (Absorption, Distribution, Metabolism, Excretion) is a core part of the drug discovery process. In-silico models\
for ADME tasks span a wide variety of pharmacokinetics endpoints across multiple species.
The ADME benchmark contains three of the larger datasets that were released by AstraZenica on ChEMBL: human plasma pr\
otein binding (PPB), lipophilicity, and human clearance. While this data is small relative to full industrial dataset\
s, it is high quality and diverse.
Note that PPB dataset labels are transformed using %bound -> log(1 - %bound).
| Dataset | Examples | GC-DNN Val R2 (Scaffold Split) |
| ------ | ------ | ------ |
| Lipophilicty | 4200 | .653 |
| PPB | 1614 | .404 |
| Clearance | 1102 | .319 |
# Running Benchmark
```sh
$ python run_benchmark.py model split dataset
```
- models: {GraphConv, PDNN, RF, SVR}
- splits: {scaffold, random, index}
- dataset: {az_clearance.csv, az_hppb.csv, az_logd.csv}
Paper
----
www.arxiv.org/00000000
License
----
MIT<file_sep>"""
Clinical Toxicity (clintox) dataset loader.
@author <NAME>
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
CLINTOX_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/clintox.csv.gz"
CLINTOX_TASKS = ['FDA_APPROVED', 'CT_TOX']
class _ClintoxLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "clintox.csv.gz")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=CLINTOX_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_clintox(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load ClinTox dataset
The ClinTox dataset compares drugs approved by the FDA and
drugs that have failed clinical trials for toxicity reasons.
The dataset includes two classification tasks for 1491 drug
compounds with known chemical structures:
#. clinical trial toxicity (or absence of toxicity)
#. FDA approval status.
List of FDA-approved drugs are compiled from the SWEETLEAD
database, and list of drugs that failed clinical trials for
toxicity reasons are compiled from the Aggregate Analysis of
ClinicalTrials.gov(AACT) database.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "smiles" - SMILES representation of the molecular structure
- "FDA_APPROVED" - FDA approval status
- "CT_TOX" - Clinical trial results
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Gayvert, <NAME>., <NAME>, and <NAME>.
"A data-driven approach to predicting successes and failures of clinical
trials."
Cell chemical biology 23.10 (2016): 1294-1301.
.. [2] <NAME>., et al. "Integrated deep learned transcriptomic and
structure-based predictor of clinical trials outcomes." bioRxiv (2016):
095653.
.. [3] <NAME>., et al. "SWEETLEAD: an in silico database of approved
drugs, regulated chemicals, and herbal isolates for computer-aided drug
discovery." PloS one 8.11 (2013): e79568.
.. [4] Aggregate Analysis of ClincalTrials.gov (AACT) Database.
https://www.ctti-clinicaltrials.org/aact-database
"""
loader = _ClintoxLoader(featurizer, splitter, transformers, CLINTOX_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('clintox', reload)
<file_sep>"""
Script that trains Tensorflow models on PDBbind dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import numpy as np
import tensorflow as tf
# For stable runs
np.random.seed(123)
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_pdbbind_grid
split = "random"
subset = "full"
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind_grid(
split=split, subset=subset)
train_dataset, valid_dataset, test_dataset = pdbbind_datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
current_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(current_dir, "%s_%s_DNN" % (split, subset))
n_features = train_dataset.X.shape[1]
model = dc.models.MultitaskRegressor(
len(pdbbind_tasks),
n_features,
model_dir=model_dir,
dropouts=[.25],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=64)
# Fit trained model
model.fit(train_dataset, nb_epoch=100)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Script that trains Sklearn RF models on PDBbind dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import deepchem as dc
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from deepchem.molnet import load_pdbbind
# For stable runs
np.random.seed(123)
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind(
featurizer="grid", split="random", subset="core")
train_dataset, valid_dataset, test_dataset = pdbbind_datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
sklearn_model = RandomForestRegressor(n_estimators=500)
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
print("Fitting model on train dataset")
model.fit(train_dataset)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep># 2017 DeepCrystal Technologies - <NAME>
#
# Message Passing Neural Network SELU [MPNN-S] for Chemical Multigraphs
#
# MIT License - have fun!!
# ===========================================================
import math
import deepchem as dc
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import r2_score
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing
import numpy as np
import random
from collections import OrderedDict
from scipy.stats import pearsonr
import donkey
random.seed(2)
torch.manual_seed(2)
np.random.seed(2)
DATASET = 'az_ppb.csv'
print(DATASET)
T = 3
BATCH_SIZE = 48
MAXITER = 40000
LIMIT = 0
LR = 5e-4
R = nn.Linear(150, 128)
U = {0: nn.Linear(156, 75), 1: nn.Linear(156, 75), 2: nn.Linear(156, 75)}
V = {0: nn.Linear(75, 75), 1: nn.Linear(75, 75), 2: nn.Linear(75, 75)}
E = nn.Linear(6, 6)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by .8 every 5 epochs"""
lr = LR * (0.9 ** (epoch // 10))
print('new lr [%.5f]' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def load_dataset():
train_features, train_labels, val_features, val_labels = donkey.load_dataset(DATASET)
scaler = preprocessing.StandardScaler().fit(train_labels)
train_labels = scaler.transform(train_labels)
val_labels = scaler.transform(val_labels)
train_labels = Variable(torch.FloatTensor(train_labels), requires_grad=False)
val_labels = Variable(torch.FloatTensor(val_labels), requires_grad=False)
return train_features, train_labels, val_features, val_labels
def readout(h, h2):
catted_reads = map(lambda x: torch.cat([h[x[0]], h2[x[1]]], 1), zip(h2.keys(), h.keys()))
activated_reads = map(lambda x: F.selu( R(x) ), catted_reads)
readout = Variable(torch.zeros(1, 128))
for read in activated_reads:
readout = readout + read
return F.tanh( readout )
def message_pass(g, h, k):
for v in g.keys():
neighbors = g[v]
for neighbor in neighbors:
e_vw = neighbor[0] # feature variable
w = neighbor[1]
m_w = V[k](h[w])
m_e_vw = E(e_vw)
reshaped = torch.cat( (h[v], m_w, m_e_vw), 1)
h[v] = F.selu(U[k](reshaped))
def construct_multigraph(smile):
g = OrderedDict({})
h = OrderedDict({})
molecule = Chem.MolFromSmiles(smile)
for i in xrange(0, molecule.GetNumAtoms()):
atom_i = molecule.GetAtomWithIdx(i)
h[i] = Variable(torch.FloatTensor(dc.feat.graph_features.atom_features(atom_i))).view(1, 75)
for j in xrange(0, molecule.GetNumAtoms()):
e_ij = molecule.GetBondBetweenAtoms(i, j)
if e_ij != None:
e_ij = map(lambda x: 1 if x == True else 0, dc.feat.graph_features.bond_features(e_ij)) # ADDED edge feat
e_ij = Variable(torch.FloatTensor(e_ij).view(1, 6))
atom_j = molecule.GetAtomWithIdx(j)
if i not in g:
g[i] = []
g[i].append( (e_ij, j) )
return g, h
train_smiles, train_labels, val_smiles, val_labels = load_dataset()
linear = nn.Linear(128, 1)
params = [{'params': R.parameters()},
{'params': U[0].parameters()},
{'params': U[1].parameters()},
{'params': U[2].parameters()},
{'params': E.parameters()},
{'params': V[0].parameters()},
{'params': V[1].parameters()},
{'params': V[2].parameters()},
{'params': linear.parameters()}]
num_epoch = 0
optimizer = optim.Adam(params, lr=LR, weight_decay=1e-4)
for i in xrange(0, MAXITER):
optimizer.zero_grad()
train_loss = Variable(torch.zeros(1, 1))
y_hats_train = []
for j in xrange(0, BATCH_SIZE):
sample_index = random.randint(0, len(train_smiles) - 2)
smile = train_smiles[sample_index]
g, h = construct_multigraph(smile) # TODO: cache this
g2, h2 = construct_multigraph(smile)
for k in xrange(0, T):
message_pass(g, h, k)
x = readout(h, h2)
#x = F.selu( fc(x) )
y_hat = linear(x)
y = train_labels[sample_index]
y_hats_train.append(y_hat)
error = (y_hat - y)*(y_hat - y) / Variable(torch.FloatTensor([BATCH_SIZE])).view(1, 1)
train_loss = train_loss + error
train_loss.backward()
optimizer.step()
if i % int(len(train_smiles) / BATCH_SIZE) == 0:
val_loss = Variable(torch.zeros(1, 1), requires_grad=False)
y_hats_val = []
for j in xrange(0, len(val_smiles)):
g, h = construct_multigraph(val_smiles[j])
g2, h2 = construct_multigraph(val_smiles[j])
for k in xrange(0, T):
message_pass(g, h, k)
x = readout(h, h2)
#x = F.selu( fc(x) )
y_hat = linear(x)
y = val_labels[j]
y_hats_val.append(y_hat)
error = (y_hat - y)*(y_hat - y) / Variable(torch.FloatTensor([len(val_smiles)])).view(1, 1)
val_loss = val_loss + error
y_hats_val = np.array(map(lambda x: x.data.numpy(), y_hats_val))
y_val = np.array(map(lambda x: x.data.numpy(), val_labels))
y_hats_val = y_hats_val.reshape(-1, 1)
y_val = y_val.reshape(-1, 1)
r2_val_old = r2_score(y_val, y_hats_val)
r2_val_new = pearsonr(y_val, y_hats_val)[0]**2
train_loss_ = train_loss.data.numpy()[0]
val_loss_ = val_loss.data.numpy()[0]
print 'epoch [%i/%i] train_loss [%f] val_loss [%f] r2_val_old [%.4f], r2_val_new [%.4f]' \
% (num_epoch, 100, train_loss_, val_loss_, r2_val_old, r2_val_new)
num_epoch += 1
<file_sep>"""
Making it easy to import in classes.
"""
# flake8: noqa
# base classes for featurizers
from deepchem.feat.base_classes import Featurizer
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.feat.base_classes import MaterialStructureFeaturizer
from deepchem.feat.base_classes import MaterialCompositionFeaturizer
from deepchem.feat.base_classes import ComplexFeaturizer
from deepchem.feat.base_classes import UserDefinedFeaturizer
from deepchem.feat.base_classes import DummyFeaturizer
from deepchem.feat.graph_features import ConvMolFeaturizer
from deepchem.feat.graph_features import WeaveFeaturizer
from deepchem.feat.graph_data import GraphData
from deepchem.feat.binding_pocket_features import BindingPocketFeaturizer
# molecule featurizers
from deepchem.feat.molecule_featurizers import AtomicCoordinates
from deepchem.feat.molecule_featurizers import BPSymmetryFunctionInput
from deepchem.feat.molecule_featurizers import CircularFingerprint
from deepchem.feat.molecule_featurizers import CoulombMatrix
from deepchem.feat.molecule_featurizers import CoulombMatrixEig
from deepchem.feat.molecule_featurizers import MACCSKeysFingerprint
from deepchem.feat.molecule_featurizers import MordredDescriptors
from deepchem.feat.molecule_featurizers import Mol2VecFingerprint
from deepchem.feat.molecule_featurizers import MolGraphConvFeaturizer
from deepchem.feat.molecule_featurizers import PagtnMolGraphFeaturizer
from deepchem.feat.molecule_featurizers import MolGanFeaturizer
from deepchem.feat.molecule_featurizers import OneHotFeaturizer
from deepchem.feat.molecule_featurizers import SparseMatrixOneHotFeaturizer
from deepchem.feat.molecule_featurizers import PubChemFingerprint
from deepchem.feat.molecule_featurizers import RawFeaturizer
from deepchem.feat.molecule_featurizers import RDKitDescriptors
from deepchem.feat.molecule_featurizers import SmilesToImage
from deepchem.feat.molecule_featurizers import SmilesToSeq, create_char_to_idx
from deepchem.feat.molecule_featurizers import MATFeaturizer
from deepchem.feat.molecule_featurizers import DMPNNFeaturizer
from deepchem.feat.molecule_featurizers import GroverFeaturizer
from deepchem.feat.molecule_featurizers import SNAPFeaturizer
from deepchem.feat.molecule_featurizers import RDKitConformerFeaturizer
from deepchem.feat.molecule_featurizers import MXMNetFeaturizer
# complex featurizers
from deepchem.feat.complex_featurizers import RdkitGridFeaturizer
from deepchem.feat.complex_featurizers import NeighborListAtomicCoordinates
from deepchem.feat.complex_featurizers import NeighborListComplexAtomicCoordinates
from deepchem.feat.complex_featurizers import AtomicConvFeaturizer
from deepchem.feat.complex_featurizers import ComplexNeighborListFragmentAtomicCoordinates
from deepchem.feat.complex_featurizers import ContactCircularFingerprint
from deepchem.feat.complex_featurizers import ContactCircularVoxelizer
from deepchem.feat.complex_featurizers import SplifFingerprint
from deepchem.feat.complex_featurizers import SplifVoxelizer
from deepchem.feat.complex_featurizers import ChargeVoxelizer
from deepchem.feat.complex_featurizers import SaltBridgeVoxelizer
from deepchem.feat.complex_featurizers import CationPiVoxelizer
from deepchem.feat.complex_featurizers import PiStackVoxelizer
from deepchem.feat.complex_featurizers import HydrogenBondVoxelizer
from deepchem.feat.complex_featurizers import HydrogenBondCounter
# material featurizers
from deepchem.feat.material_featurizers import ElementPropertyFingerprint
from deepchem.feat.material_featurizers import SineCoulombMatrix
from deepchem.feat.material_featurizers import CGCNNFeaturizer
from deepchem.feat.material_featurizers import ElemNetFeaturizer
from deepchem.feat.material_featurizers import LCNNFeaturizer
from deepchem.feat.atomic_conformation import AtomicConformation
from deepchem.feat.atomic_conformation import AtomicConformationFeaturizer
from deepchem.feat.huggingface_featurizer import HuggingFaceFeaturizer
# tokenizers
try:
from deepchem.feat.smiles_tokenizer import SmilesTokenizer
from deepchem.feat.smiles_tokenizer import BasicSmilesTokenizer
from deepchem.feat.bert_tokenizer import BertFeaturizer
from deepchem.feat.roberta_tokenizer import RobertaFeaturizer
from deepchem.feat.reaction_featurizer import RxnFeaturizer
except ModuleNotFoundError:
pass
from deepchem.feat.vocabulary_builders import HuggingFaceVocabularyBuilder
# support classes
from deepchem.feat.molecule_featurizers import GraphMatrix
<file_sep>import unittest
import numpy as np
from deepchem.trans.transformers import RxnSplitTransformer
reactions: np.ndarray = np.array([
"CC(C)C[Mg+].CON(C)C(=O)c1ccc(O)nc1>C1CCOC1.[Cl-]>CC(C)CC(=O)c1ccc(O)nc1",
"CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(N)cc3)cc21.O=CO>>CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(NC=O)cc3)cc21"
],
dtype=object)
split: np.ndarray = np.array(
[[
"CC(C)C[Mg+].CON(C)C(=O)c1ccc(O)nc1>C1CCOC1.[Cl-]",
"CC(C)CC(=O)c1ccc(O)nc1"
],
[
"CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(N)cc3)cc21.O=CO>",
"CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(NC=O)cc3)cc21"
]],
dtype=object)
sep: np.ndarray = np.array(
[[
"CC(C)C[Mg+].CON(C)C(=O)c1ccc(O)nc1.C1CCOC1.[Cl-]>",
"CC(C)CC(=O)c1ccc(O)nc1"
],
[
"CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(N)cc3)cc21.O=CO>",
"CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(NC=O)cc3)cc21"
]],
dtype=object)
class TestRxnSplitTransformer(unittest.TestCase):
"""
Tests the Reaction split transformer for the source/target splitting and
for the reagent mixing operation.
"""
def test_split(self):
"""Tests the source/target split from an input reaction SMILES."""
trans = RxnSplitTransformer(sep_reagent=True)
split_reactions = trans.transform_array(X=reactions,
y=np.array([]),
w=np.array([]),
ids=np.array([]))
assert split_reactions[0].shape == (2, 2)
assert (split_reactions[0] == split).all()
def test_mixing(self):
"""Tests the reagent - reactant mixing toggle."""
trans = RxnSplitTransformer(sep_reagent=False)
split_reactions = trans.transform_array(X=reactions,
y=np.array([]),
w=np.array([]),
ids=np.array([]))
assert split_reactions[0].shape == (2, 2)
assert (split_reactions[0] == sep).all()
<file_sep>import deepchem as dc
import deepchem.models.optimizers as optimizers
import unittest
import pytest
try:
import tensorflow as tf
has_tensorflow = True
except:
has_tensorflow = False
try:
import tensorflow_addons as tfa
has_tensorflow_addons = True
except:
has_tensorflow_addons = False
try:
import torch
has_pytorch = True
except:
has_pytorch = False
try:
import jax # noqa: F401
import optax
has_jax = True
except:
has_jax = False
class TestOptimizers(unittest.TestCase):
"""Test optimizers and related classes."""
@pytest.mark.tensorflow
def test_adam_tf(self):
"""Test creating an Adam optimizer."""
opt = optimizers.Adam(learning_rate=0.01)
global_step = tf.Variable(0)
tfopt = opt._create_tf_optimizer(global_step)
assert isinstance(tfopt, tf.keras.optimizers.Adam)
@pytest.mark.torch
def test_adam_pytorch(self):
"""Test creating an Adam optimizer."""
opt = optimizers.Adam(learning_rate=0.01)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
assert isinstance(torchopt, torch.optim.Adam)
@pytest.mark.jax
def test_adam_jax(self):
"""Test creating an Adam optimizer."""
opt = optimizers.Adam(learning_rate=0.01)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.tensorflow
def test_adamw_tf(self):
"""Test creating an AdamW optimizer."""
opt = optimizers.AdamW(learning_rate=0.01)
global_step = tf.Variable(0)
tfopt = opt._create_tf_optimizer(global_step)
assert isinstance(tfopt, tfa.optimizers.AdamW)
@pytest.mark.torch
def test_adamw_pytorch(self):
"""Test creating an AdamW optimizer."""
opt = optimizers.AdamW(learning_rate=0.01)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
assert isinstance(torchopt, torch.optim.AdamW)
@pytest.mark.jax
def test_adamw_jax(self):
"""Test creating an AdamW optimizer."""
opt = optimizers.AdamW(learning_rate=0.01)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.tensorflow
def test_sparseadam_tf(self):
"""Test creating a SparseAdam optimizer."""
opt = optimizers.SparseAdam(learning_rate=0.01)
global_step = tf.Variable(0)
tfopt = opt._create_tf_optimizer(global_step)
assert isinstance(tfopt, tfa.optimizers.LazyAdam)
@pytest.mark.torch
def test_sparseadam_pytorch(self):
"""Test creating a SparseAdam optimizer."""
opt = optimizers.SparseAdam(learning_rate=0.01)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
assert isinstance(torchopt, torch.optim.SparseAdam)
@pytest.mark.tensorflow
def test_adagrad_tf(self):
"""Test creating an AdaGrad optimizer."""
opt = optimizers.AdaGrad(learning_rate=0.01)
global_step = tf.Variable(0)
tfopt = opt._create_tf_optimizer(global_step)
assert isinstance(tfopt, tf.keras.optimizers.Adagrad)
@pytest.mark.torch
def test_adagrad_pytorch(self):
"""Test creating an AdaGrad optimizer."""
opt = optimizers.AdaGrad(learning_rate=0.01)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
assert isinstance(torchopt, torch.optim.Adagrad)
@pytest.mark.jax
def test_adagrad_jax(self):
"""Test creating an AdaGrad optimizer."""
opt = optimizers.AdaGrad(learning_rate=0.01)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.tensorflow
def test_rmsprop_tf(self):
"""Test creating an RMSProp Optimizer."""
opt = optimizers.RMSProp(learning_rate=0.01)
global_step = tf.Variable(0)
tfopt = opt._create_tf_optimizer(global_step)
assert isinstance(tfopt, tf.keras.optimizers.RMSprop)
@pytest.mark.torch
def test_rmsprop_pytorch(self):
"""Test creating an RMSProp Optimizer."""
opt = optimizers.RMSProp(learning_rate=0.01)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
assert isinstance(torchopt, torch.optim.RMSprop)
@pytest.mark.jax
def test_rmsprop_jax(self):
"""Test creating an RMSProp Optimizer."""
opt = optimizers.RMSProp(learning_rate=0.01)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.tensorflow
def test_gradient_descent_tf(self):
"""Test creating a Gradient Descent optimizer."""
opt = optimizers.GradientDescent(learning_rate=0.01)
global_step = tf.Variable(0)
tfopt = opt._create_tf_optimizer(global_step)
assert isinstance(tfopt, tf.keras.optimizers.SGD)
@pytest.mark.torch
def test_gradient_descent_pytorch(self):
"""Test creating a Gradient Descent optimizer."""
opt = optimizers.GradientDescent(learning_rate=0.01)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
assert isinstance(torchopt, torch.optim.SGD)
@pytest.mark.jax
def test_gradient_descent_jax(self):
"""Test creating an Gradient Descent Optimizer."""
opt = optimizers.GradientDescent(learning_rate=0.01)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.tensorflow
def test_exponential_decay_tf(self):
"""Test creating an optimizer with an exponentially decaying learning rate."""
rate = optimizers.ExponentialDecay(initial_rate=0.001,
decay_rate=0.99,
decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
global_step = tf.Variable(0)
_ = opt._create_tf_optimizer(global_step)
@pytest.mark.torch
def test_exponential_decay_pytorch(self):
"""Test creating an optimizer with an exponentially decaying learning rate."""
rate = optimizers.ExponentialDecay(initial_rate=0.001,
decay_rate=0.99,
decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
_ = rate._create_pytorch_schedule(torchopt)
@pytest.mark.jax
def test_exponential_decay_jax(self):
"""Test creating an optimizer with an exponentially decaying learning rate."""
rate = optimizers.ExponentialDecay(initial_rate=0.001,
decay_rate=0.99,
decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.tensorflow
def test_polynomial_decay_tf(self):
"""Test creating an optimizer with a polynomially decaying learning rate."""
rate = optimizers.PolynomialDecay(initial_rate=0.001,
final_rate=0.0001,
decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
global_step = tf.Variable(0)
_ = opt._create_tf_optimizer(global_step)
@pytest.mark.torch
def test_polynomial_decay_pytorch(self):
"""Test creating an optimizer with a polynomially decaying learning rate."""
rate = optimizers.PolynomialDecay(initial_rate=0.001,
final_rate=0.0001,
decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
_ = rate._create_pytorch_schedule(torchopt)
@pytest.mark.jax
def test_polynomial_decay_jax(self):
"""Test creating an optimizer with a polynomially decaying learning rate."""
rate = optimizers.PolynomialDecay(initial_rate=0.001,
final_rate=0.0001,
decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.tensorflow
def test_linearCosine_decay_tf(self):
"""test creating an optimizer with a linear cosine decay to the learning rate"""
rate = optimizers.LinearCosineDecay(initial_rate=0.1, decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
global_step = tf.Variable(0)
_ = opt._create_tf_optimizer(global_step)
@pytest.mark.torch
def test_linearCosine_decay_pytorch(self):
"""test creating an optimizer with a linear cosine decay to the learning rate"""
rate = optimizers.LinearCosineDecay(initial_rate=0.1, decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
params = [torch.nn.Parameter(torch.Tensor([1.0]))]
torchopt = opt._create_pytorch_optimizer(params)
_ = rate._create_pytorch_schedule(torchopt)
@pytest.mark.jax
def test_linearCosine_decay_jax(self):
"""test creating an optimizer with a linear cosine decay to the learning rate"""
rate = optimizers.LinearCosineDecay(initial_rate=0.1, decay_steps=10000)
opt = optimizers.Adam(learning_rate=rate)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.jax
def test_PieceWise_decay_jax(self):
"""test creating an optimizer with a PeiceWise constant decay to the learning rate"""
rate = optimizers.PiecewiseConstantSchedule(initial_rate=0.1,
boundaries_and_scales={
5000: 0.1,
10000: 0.1,
15000: 0.1
})
opt = optimizers.Adam(learning_rate=rate)
jaxopt = opt._create_jax_optimizer()
assert isinstance(jaxopt, optax.GradientTransformation)
@pytest.mark.torch
def test_KFAC(self):
"""test creating a KFAC optimizer"""
import numpy as np
np.random.seed(123)
# Conv2d and Linear layers test(CNN classification)
n_samples = 10
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, 1, n_features, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 32, kernel_size=3, padding=1),
torch.nn.Conv2d(32, 64, kernel_size=3,
padding=1), torch.nn.Flatten(),
torch.nn.Linear(64 * n_features * n_features, 20), torch.nn.ReLU(),
torch.nn.Linear(20, n_tasks))
model = dc.models.TorchModel(model,
dc.models.losses.L2Loss(),
optimizers=optimizers.KFAC(
model=model,
learning_rate=0.003,
Tinv=10))
# Fit trained model
model.fit(
dataset,
nb_epoch=100,
)
# Eval model on train
scores = model.evaluate(dataset, [metric])
assert scores[metric.name] > 0.9
<file_sep>import numpy as np
from flask import request, abort, Flask
import flask
webapp = Flask(__name__)
@webapp.route('/potential', methods=["POST"])
def potential():
content = request.get_json(force=True)
if not content or not 'X' in content:
abort(400)
X = np.array(content['X'])
x0 = X[:, 1:]
a0 = X[:, :1]
result = webapp.model.pred_one(x0, a0)
return flask.jsonify({'y': result.tolist()[0]}), 200
@webapp.route('/gradient', methods=["POST"])
def index():
content = request.get_json(force=True)
if not content or not 'X' in content:
abort(400)
X = np.array(content['X'])
num_atoms = X.shape[0]
x0 = X[:, 1:]
a0 = X[:, :1]
res = webapp.model.grad_one(x0, a0)
res = res.reshape((num_atoms, 3))
return flask.jsonify({'grad': res.tolist()}), 200
@webapp.route('/minimize', methods=["POST"])
def minimize():
content = request.get_json(force=True)
if not content or not 'X' in content:
abort(400)
X = np.array(content['X'])
constraints = None
if 'constraints' in content:
constraints = content['constraints']
print('setting constraints')
num_atoms = X.shape[0]
x0 = X[:, 1:]
a0 = X[:, :1]
res = webapp.model.minimize_structure(x0, a0, constraints)
res = res.reshape((num_atoms, 3))
y = webapp.model.pred_one(res, a0).tolist()[0]
return flask.jsonify({'X': res.tolist(), 'y': y}), 200
<file_sep>def test_conformer_featurizer():
from deepchem.feat.molecule_featurizers.conformer_featurizer import RDKitConformerFeaturizer
from deepchem.feat.graph_data import BatchGraphData
import numpy as np
smiles = ["C1=CC=NC=C1", "CC(=O)C", "C"]
featurizer = RDKitConformerFeaturizer(num_conformers=5, rmsd_cutoff=1)
features_list = featurizer.featurize(smiles)
features = BatchGraphData(np.concatenate(features_list).ravel())
assert features.num_edge_features == 3 # 3 bond features
assert features.num_node_features == 9 # 9 atom features
assert features.num_nodes == len(features.graph_index)
assert features.num_edges == 240 # 240 edges
<file_sep>"""
Copied from https://github.com/tencent-ailab/grover/blob/0421d97a5e1bd1b59d1923e3afd556afbe4ff782/grover/model/layers.py
"""
from typing import List, Dict
try:
import torch
import torch.nn as nn
except ModuleNotFoundError:
raise ImportError('These classes require PyTorch to be installed.')
import numpy as np
from scipy import stats
from deepchem.models.torch_models.readout import GroverReadout
from deepchem.models.torch_models.layers import SublayerConnection, PositionwiseFeedForward
class GroverEmbedding(nn.Module):
"""GroverEmbedding layer.
This layer is a simple wrapper over GroverTransEncoder layer for retrieving the embeddings from the GroverTransEncoder layer.
Parameters
----------
edge_fdim: int
the dimension of additional feature for edge/bond.
node_fdim: int
the dimension of additional feature for node/atom.
depth: int
Dynamic message passing depth for use in MPNEncoder
undirected: bool
The message passing is undirected or not
num_mt_block: int
the number of message passing blocks.
num_head: int
the number of attention heads.
"""
def __init__(self,
node_fdim,
edge_fdim,
hidden_size=128,
depth=1,
undirected=False,
dropout=0.2,
activation='relu',
num_mt_block=1,
num_heads=4,
bias=False,
res_connection=False):
super(GroverEmbedding, self).__init__()
self.encoders = GroverTransEncoder(hidden_size=hidden_size,
edge_fdim=edge_fdim,
node_fdim=node_fdim,
depth=depth,
undirected=undirected,
dropout=dropout,
activation=activation,
num_mt_block=num_mt_block,
num_heads=num_heads,
bias=bias,
res_connection=res_connection)
def forward(self, graph_batch: List[torch.Tensor]):
"""Forward function
Parameters
----------
graph_batch: List[torch.Tensor]
A list containing f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
Returns
-------
embedding: Dict[str, torch.Tensor]
Returns a dictionary of embeddings. The embeddings are:
- atom_from_atom: node messages aggregated from node hidden states
- bond_from_atom: bond messages aggregated from bond hidden states
- atom_from_bond: node message aggregated from bond hidden states
- bond_from_bond: bond messages aggregated from bond hidden states.
"""
# TODO Explain in detail what the four outcompes are
output = self.encoders(graph_batch)
return {
"atom_from_atom": output[0][0],
"bond_from_atom": output[0][1],
"atom_from_bond": output[1][0],
"bond_from_bond": output[1][1]
}
class GroverBondVocabPredictor(nn.Module):
"""Layer for learning contextual information for bonds.
The layer is used in Grover architecture to learn contextual information of a bond by predicting
the context of a bond from the bond embedding in a multi-class classification setting.
The contextual information of a bond are encoded as strings (ex: '(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2').
Example
-------
>>> from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor
>>> num_bonds = 20
>>> in_features, vocab_size = 16, 10
>>> layer = GroverBondVocabPredictor(vocab_size, in_features)
>>> embedding = torch.randn(num_bonds * 2, in_features)
>>> result = layer(embedding)
>>> result.shape
torch.Size([20, 10])
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
def __init__(self, vocab_size: int, in_features: int = 128):
"""Initializes GroverBondVocabPredictor
Parameters
----------
vocab_size: int
Size of vocabulary, used for number of classes in prediction.
in_features: int, default: 128
Input feature size of bond embeddings.
"""
super(GroverBondVocabPredictor, self).__init__()
self.linear = nn.Linear(in_features, vocab_size)
self.linear_rev = nn.Linear(in_features, vocab_size)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, embeddings):
"""
Parameters
----------
embeddings: torch.Tensor
bond embeddings of shape (num_bond, in_features)
Returns
-------
logits: torch.Tensor
the prediction for each bond, (num_bond, vocab_size)
"""
nm_bonds = embeddings.shape[0]
# The bond and rev bond have even and odd ids respectively.
ids1 = list(range(0, nm_bonds, 2))
ids2 = list(range(1, nm_bonds, 2))
logits = self.linear(embeddings[ids1]) + self.linear_rev(
embeddings[ids2])
return self.logsoftmax(logits)
class GroverAtomVocabPredictor(nn.Module):
"""Grover Atom Vocabulary Prediction Module.
The GroverAtomVocabPredictor module is used for predicting atom-vocabulary
for the self-supervision task in Grover architecture. In the self-supervision tasks,
one task is to learn contextual-information of nodes (atoms).
Contextual information are encoded as strings, like `C_N-DOUBLE1_O-SINGLE1`.
The module accepts an atom encoding and learns to predict the contextual information
of the atom as a multi-class classification problem.
Example
-------
>>> from deepchem.models.torch_models.grover_layers import GroverAtomVocabPredictor
>>> num_atoms, in_features, vocab_size = 30, 16, 10
>>> layer = GroverAtomVocabPredictor(vocab_size, in_features)
>>> embedding = torch.randn(num_atoms, in_features)
>>> result = layer(embedding)
>>> result.shape
torch.Size([30, 10])
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
def __init__(self, vocab_size: int, in_features: int = 128):
"""Initializing Grover Atom Vocabulary Predictor
Parameters
----------
vocab_size: int
size of vocabulary (vocabulary here is the total number of different possible contexts)
in_features: int
feature size of atom embeddings.
"""
super(GroverAtomVocabPredictor, self).__init__()
self.linear = nn.Linear(in_features, vocab_size)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, embeddings):
"""
Parameters
----------
embeddings: torch.Tensor
the atom embeddings of shape (vocab_size, in_features)
Returns
-------
logits: torch.Tensor
the prediction for each atom of shape (num_bond, vocab_size)
"""
return self.logsoftmax(self.linear(embeddings))
class GroverFunctionalGroupPredictor(nn.Module):
"""The functional group prediction task for self-supervised learning.
Molecules have functional groups in them. This module is used for predicting
the functional group and the problem is formulated as an multi-label classification problem.
Parameters
----------
functional_group_size: int,
size of functional group
in_features: int,
hidden_layer size, default 128
Example
-------
>>> from deepchem.models.torch_models.grover_layers import GroverFunctionalGroupPredictor
>>> in_features, functional_group_size = 8, 20
>>> num_atoms, num_bonds = 10, 20
>>> predictor = GroverFunctionalGroupPredictor(functional_group_size=20, in_features=8)
>>> atom_scope, bond_scope = [(0, 3), (3, 3), (6, 4)], [(0, 5), (5, 4), (9, 11)]
>>> embeddings = {}
>>> embeddings['bond_from_atom'] = torch.randn(num_bonds, in_features)
>>> embeddings['bond_from_bond'] = torch.randn(num_bonds, in_features)
>>> embeddings['atom_from_atom'] = torch.randn(num_atoms, in_features)
>>> embeddings['atom_from_bond'] = torch.randn(num_atoms, in_features)
>>> result = predictor(embeddings, atom_scope, bond_scope)
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
def __init__(self, functional_group_size: int, in_features=128):
super(GroverFunctionalGroupPredictor, self).__init__()
self.readout = GroverReadout(rtype="mean", in_features=in_features)
self.linear_atom_from_atom = nn.Linear(in_features,
functional_group_size)
self.linear_atom_from_bond = nn.Linear(in_features,
functional_group_size)
self.linear_bond_from_atom = nn.Linear(in_features,
functional_group_size)
self.linear_bond_from_bond = nn.Linear(in_features,
functional_group_size)
def forward(self, embeddings: Dict, atom_scope: List, bond_scope: List):
"""
The forward function for the GroverFunctionalGroupPredictor (semantic motif prediction) layer.
It takes atom/bond embeddings produced from node and bond hidden states from GroverEmbedding module
and the atom, bond scopes and produces prediction logits for different each embedding.
The scopes are used to differentiate atoms/bonds belonging to a molecule in a batched molecular graph.
Parameters
----------
embedding: Dict
The input embeddings organized as an dictionary. The input embeddings are output of GroverEmbedding layer.
atom_scope: List
The scope for atoms.
bond_scope: List
The scope for bonds
Returns
-------
preds: Dict
A dictionary containing the predicted logits of functional group from four different types of input embeddings. The key and their corresponding predictions
are described below.
- atom_from_atom - prediction logits from atom embeddings generated via node hidden states
- atom_from_bond - prediction logits from atom embeddings generated via bond hidden states
- bond_from_atom - prediction logits from bond embeddings generated via node hidden states
- bond_from_bond - prediction logits from bond embeddings generated via bond hidden states
"""
preds_bond_from_atom = self.linear_bond_from_atom(
self.readout(embeddings["bond_from_atom"], bond_scope))
preds_bond_from_bond = self.linear_bond_from_bond(
self.readout(embeddings["bond_from_bond"], bond_scope))
preds_atom_from_atom = self.linear_atom_from_atom(
self.readout(embeddings["atom_from_atom"], atom_scope))
preds_atom_from_bond = self.linear_atom_from_bond(
self.readout(embeddings["atom_from_bond"], atom_scope))
return {
"atom_from_atom": preds_atom_from_atom,
"atom_from_bond": preds_atom_from_bond,
"bond_from_atom": preds_bond_from_atom,
"bond_from_bond": preds_bond_from_bond
}
def _index_select_nd(source: torch.Tensor, index: torch.Tensor) -> torch.Tensor:
"""
Selects the message features from source corresponding to the atom or bond indices in index.
Parameters
----------
source: torch.Tensor
A tensor of shape (num_bonds, hidden_size) containing message features.
index: torch.Tensor
A tensor of shape (num_atoms/num_bonds, max_num_bonds) containing the atom or bond indices to select from source.
Returns
----------
message_features: torch.Tensor
A tensor of shape (num_atoms/num_bonds, max_num_bonds, hidden_size) containing the message features corresponding to the atoms/bonds specified in index.
"""
index_size = index.size() # (num_atoms/num_bonds, max_num_bonds)
suffix_dim = source.size()[1:] # (hidden_size,)
final_size = index_size + suffix_dim # (num_atoms/num_bonds, max_num_bonds, hidden_size)
target = source.index_select(dim=0, index=index.view(
-1)) # (num_atoms/num_bonds * max_num_bonds, hidden_size)
target = target.view(
final_size) # (num_atoms/num_bonds, max_num_bonds, hidden_size)
return target
def _select_neighbor_and_aggregate(feature, index):
"""The basic operation in message passing.
Caution: the index_selec_ND would cause the reproducibility issue when performing the training on CUDA.
The operation is like map and reduce. `index_select_nd` maps message features to bonds/atoms and
this method aggregates the results by using `sum` as pooling operation. We can also add a configuration
here to use `mean` as pooling operation but it is left to future implementation.
References
----------
See: https://pytorch.org/docs/stable/notes/randomness.html
Parameters
----------
feature: np.array
The candidate feature for aggregate. (n_nodes, hidden)
index: np.array
The selected index (neighbor indexes).
Returns
-------
None
"""
neighbor = _index_select_nd(feature, index)
return neighbor.sum(dim=1)
class GroverMPNEncoder(nn.Module):
"""Performs Message Passing to generate encodings for the molecule.
Parameters
----------
atom_messages: bool
True if encoding atom-messages else False.
init_message_dim: int
Dimension of embedding message.
attach_feats: bool
Set to `True` if additional features are passed along with node/edge embeddings.
attached_feat_fdim: int
Dimension of additional features when `attach_feats` is `True`
undirected: bool
If set to `True`, the graph is considered as an undirected graph.
depth: int
number of hops in a message passing iteration
dynamic_depth: str, default: none
If set to `uniform` for randomly sampling dynamic depth from an uniform distribution else if set to `truncnorm`, dynamic depth is sampled from a truncated normal distribution.
input_layer: str
If set to `fc`, adds an initial feed-forward layer. If set to `none`, does not add an initial feed forward layer.
"""
# FIXME This layer is similar to DMPNNEncoderLayer and they
# must be unified.
def __init__(self,
atom_messages: bool,
init_message_dim: int,
hidden_size: int,
depth: int,
undirected: bool,
attach_feats: bool,
attached_feat_fdim: int = 0,
bias: bool = True,
dropout: float = 0.2,
activation: str = 'relu',
input_layer: str = 'fc',
dynamic_depth: str = 'none'):
super(GroverMPNEncoder, self).__init__()
if input_layer == 'none':
assert init_message_dim == hidden_size
assert dynamic_depth in [
'uniform', 'truncnorm', 'none'
], 'dynamic depth should be one of uniform, truncnorm, none'
self.init_message_dim = init_message_dim
self.depth = depth
self.input_layer = input_layer
self.layers_per_message = 1
self.undirected = undirected
self.atom_messages = atom_messages
self.attached_feat = attach_feats
assert dynamic_depth in [
'none', 'truncnorm', 'uniform'
], 'If dynamic depth, it should be truncnorm or uniform'
self.dynamic_depth = dynamic_depth
self.dropout_layer = nn.Dropout(p=dropout)
if activation == 'relu':
self.act_func = nn.ReLU()
else:
raise ValueError('Only ReLU activation function is supported')
if self.input_layer == "fc":
self.W_i = nn.Linear(self.init_message_dim, hidden_size, bias=bias)
w_h_input_size = hidden_size
elif input_layer == 'none':
w_h_input_size = self.init_message_dim
if self.attached_feat:
w_h_input_size += attached_feat_fdim
# Shared weight matrix across depths (default)
self.W_h = nn.Linear(w_h_input_size, hidden_size, bias=bias)
def forward(self,
init_messages,
init_attached_features,
a2nei,
a2attached,
b2a=None,
b2revb=None,
adjs=None) -> torch.FloatTensor:
if self.input_layer == 'fc':
message = self.act_func(self.W_i(init_messages))
elif self.input_layer == 'none':
message = init_messages
attached_feats = init_attached_features
if self.training and self.dynamic_depth != 'none':
if self.dynamic_depth == 'uniform':
ndepth = abs(
int(np.random.uniform(self.depth - 3, self.depth + 3)))
elif self.dynamic_depth == 'truncnorm':
mu, sigma = self.depth, 1
lower, upper = mu - 3 * sigma, mu + 3 * sigma
X = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma,
loc=mu,
scale=sigma)
ndepth = int(X.rvs(1))
else:
ndepth = self.depth
for _ in range(ndepth - 1):
if self.undirected:
message = (message + message[b2revb]) / 2
nei_message = _select_neighbor_and_aggregate(message, a2nei)
a_message = nei_message
if self.attached_feat:
attached_nei_feats = _select_neighbor_and_aggregate(
attached_feats, a2attached)
a_message = torch.cat((nei_message, attached_nei_feats), dim=1)
if not self.atom_messages:
rev_message = message[b2revb]
if self.attached_feat:
atom_rev_message = attached_feats[b2a[b2revb]]
rev_message = torch.cat((rev_message, atom_rev_message),
dim=1)
# Except reverse bond its-self(w) ! \sum_{k\in N(u) \ w}
message = a_message[b2a] - rev_message # num_bonds x hidden
else:
message = a_message
# FIXME When input_layer is none, for the first iteration of message passing, we should ideally
# be using different weight matrix since message will be of shape num_bonds x f_bonds_dim
# in the first iteration and in the subsequent iterations, it will be num_bonds x hidden_size
message = self.W_h(message)
message = self.dropout_layer(self.act_func(message))
return message
class GroverAttentionHead(nn.Module):
"""Generates attention head using GroverMPNEncoder for generating query, key and value
Parameters
----------
hidden_size: int
Dimension of hidden layer
undirected: bool
If set to `True`, the graph is considered as an undirected graph.
depth: int
number of hops in a message passing iteration
atom_messages: bool
True if encoding atom-messages else False.
"""
def __init__(self,
hidden_size: int = 128,
bias: bool = True,
depth: int = 1,
dropout: float = 0.0,
undirected: bool = False,
atom_messages: bool = False):
super(GroverAttentionHead, self).__init__()
self.atom_messages = atom_messages
# FIXME We assume that we are using a hidden layer to transform the initial atom message
# and bond messages to hidden dimension size.
self.mpn_q = GroverMPNEncoder(atom_messages=atom_messages,
init_message_dim=hidden_size,
attached_feat_fdim=hidden_size,
hidden_size=hidden_size,
bias=bias,
depth=depth,
dropout=dropout,
undirected=undirected,
attach_feats=False,
input_layer='none',
dynamic_depth='truncnorm')
self.mpn_k = GroverMPNEncoder(atom_messages=atom_messages,
init_message_dim=hidden_size,
attached_feat_fdim=hidden_size,
hidden_size=hidden_size,
bias=bias,
depth=depth,
dropout=dropout,
undirected=undirected,
attach_feats=False,
input_layer='none',
dynamic_depth='truncnorm')
self.mpn_v = GroverMPNEncoder(atom_messages=atom_messages,
init_message_dim=hidden_size,
attached_feat_fdim=hidden_size,
hidden_size=hidden_size,
bias=bias,
depth=depth,
dropout=dropout,
undirected=undirected,
attach_feats=False,
input_layer='none',
dynamic_depth='truncnorm')
def forward(self, f_atoms, f_bonds, a2b, a2a, b2a, b2revb):
if self.atom_messages:
init_messages = f_atoms
init_attached_features = f_bonds
a2nei = a2a
a2attached = a2b
b2a = b2a
b2revb = b2revb
else:
# self.atom_messages is False
init_messages = f_bonds
init_attached_features = f_atoms
a2nei = a2b
a2attached = a2a
b2a = b2a
b2revb = b2revb
q = self.mpn_q(init_messages=init_messages,
init_attached_features=init_attached_features,
a2nei=a2nei,
a2attached=a2attached,
b2a=b2a,
b2revb=b2revb)
k = self.mpn_k(init_messages=init_messages,
init_attached_features=init_attached_features,
a2nei=a2nei,
a2attached=a2attached,
b2a=b2a,
b2revb=b2revb)
v = self.mpn_v(init_messages=init_messages,
init_attached_features=init_attached_features,
a2nei=a2nei,
a2attached=a2attached,
b2a=b2a,
b2revb=b2revb)
return q, k, v
class GroverMTBlock(nn.Module):
"""Message passing combined with transformer architecture
The layer combines message passing performed using GroverMPNEncoder and uses it
to generate query, key and value for multi-headed Attention block.
Parameters
----------
atom_messages: bool
True if encoding atom-messages else False.
input_dim: int
Dimension of input features
num_heads: int
Number of attention heads
depth: int
Number of hops in a message passing iteration
undirected: bool
If set to `True`, the graph is considered as an undirected graph.
"""
def __init__(self,
atom_messages: bool,
input_dim: int,
num_heads: int,
depth: int,
undirected: bool = False,
hidden_size: int = 128,
dropout: float = 0.0,
bias: bool = True,
res_connection: bool = True,
activation: str = 'relu'):
super(GroverMTBlock, self).__init__()
self.hidden_size = hidden_size
self.atom_messages = atom_messages
self.res_connection = res_connection
self.num_heads = num_heads
if activation == 'relu':
self.act_func = nn.ReLU()
else:
raise ValueError('Only relu activation is supported')
self.dropout_layer = nn.Dropout(p=dropout)
# Note: Elementwise affine has to be consistent with the pre-training phase
self.layernorm = nn.LayerNorm(hidden_size, elementwise_affine=True)
self.attn = torch.nn.MultiheadAttention(embed_dim=hidden_size,
num_heads=num_heads,
dropout=dropout,
bias=bias,
batch_first=True)
self.W_i = nn.Linear(input_dim, hidden_size, bias)
self.W_o = nn.Linear(hidden_size * num_heads, hidden_size, bias)
self.sublayer = SublayerConnection(hidden_size, dropout)
self.attention_heads = nn.ModuleList()
for i in range(num_heads):
self.attention_heads.append(
GroverAttentionHead(hidden_size=hidden_size,
bias=bias,
dropout=dropout,
depth=depth,
atom_messages=atom_messages,
undirected=undirected))
def forward(self, batch):
f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a = batch
if self.atom_messages:
if f_atoms.shape[1] != self.hidden_size:
f_atoms = self.W_i(f_atoms)
f_atoms = self.dropout_layer(
self.layernorm(self.act_func(f_atoms)))
else:
if f_bonds.shape[1] != self.hidden_size:
f_bonds = self.W_i(f_bonds)
f_bonds = self.dropout_layer(
self.layernorm(self.act_func(f_bonds)))
queries, keys, values = [], [], []
for head in self.attention_heads:
q, k, v = head(f_atoms,
f_bonds,
a2b=a2b,
b2a=b2a,
b2revb=b2revb,
a2a=a2a)
queries.append(q.unsqueeze(1))
keys.append(k.unsqueeze(1))
values.append(v.unsqueeze(1))
queries = torch.cat(queries, dim=1)
keys = torch.cat(keys, dim=1)
values = torch.cat(values, dim=1)
# multi-headed attention
x_out, _ = self.attn(queries, keys, values)
x_out = x_out.reshape(x_out.shape[0], -1)
x_out = self.W_o(x_out)
# support no residual connection in MTBlock.
if self.res_connection:
if self.atom_messages:
f_atoms = self.sublayer(f_atoms, x_out)
else:
f_bonds = self.sublayer(f_bonds, x_out)
batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
return batch
class GroverTransEncoder(nn.Module):
"""GroverTransEncoder for encoding a molecular graph
The GroverTransEncoder layer is used for encoding a molecular graph.
The layer returns 4 outputs. They are atom messages aggregated from atom hidden states,
atom messages aggregated from bond hidden states, bond messages aggregated from atom hidden
states, bond messages aggregated from bond hidden states.
Parameters
----------
hidden_size: int
the hidden size of the model.
edge_fdim: int
the dimension of additional feature for edge/bond.
node_fdim: int
the dimension of additional feature for node/atom.
depth: int
Dynamic message passing depth for use in MPNEncoder
undirected: bool
The message passing is undirected or not
dropout: float
the dropout ratio
activation: str
the activation function
num_mt_block: int
the number of mt block.
num_head: int
the number of attention AttentionHead.
bias: bool
enable bias term in all linear layers.
res_connection: bool
enables the skip-connection in MTBlock.
"""
def __init__(self,
node_fdim: int,
edge_fdim: int,
depth: int = 3,
undirected: bool = False,
num_mt_block: int = 2,
num_heads: int = 2,
hidden_size: int = 64,
dropout: float = 0.2,
res_connection: bool = True,
bias: bool = True,
activation: str = 'relu'):
super(GroverTransEncoder, self).__init__()
self.hidden_size = hidden_size
self.edge_fdim = edge_fdim
self.node_fdim = node_fdim
self.edge_blocks = nn.ModuleList()
self.node_blocks = nn.ModuleList()
for i in range(num_mt_block):
if i == 0:
node_input_fdim, edge_input_fdim = node_fdim, edge_fdim
else:
node_input_fdim, edge_input_fdim = hidden_size, hidden_size
self.edge_blocks.append(
GroverMTBlock(num_heads=num_heads,
input_dim=edge_input_fdim,
hidden_size=hidden_size,
activation=activation,
dropout=dropout,
bias=bias,
atom_messages=False,
res_connection=res_connection,
depth=depth,
undirected=undirected))
self.node_blocks.append(
GroverMTBlock(num_heads=num_heads,
input_dim=node_input_fdim,
hidden_size=hidden_size,
activation=activation,
dropout=dropout,
bias=bias,
atom_messages=True,
res_connection=res_connection,
depth=depth,
undirected=undirected))
self.ffn_atom_from_atom = PositionwiseFeedForward(
d_input=self.hidden_size + node_fdim,
d_hidden=self.hidden_size * 4,
d_output=self.hidden_size,
n_layers=2,
activation=activation,
dropout_p=dropout)
self.ffn_atom_from_bond = PositionwiseFeedForward(
d_input=self.hidden_size + node_fdim,
d_hidden=self.hidden_size * 4,
d_output=self.hidden_size,
n_layers=2,
activation=activation,
dropout_p=dropout)
self.ffn_bond_from_atom = PositionwiseFeedForward(
d_input=self.hidden_size + edge_fdim,
d_hidden=self.hidden_size * 4,
d_output=self.hidden_size,
n_layers=2,
activation=activation,
dropout_p=dropout)
self.ffn_bond_from_bond = PositionwiseFeedForward(
d_input=self.hidden_size + edge_fdim,
d_hidden=self.hidden_size * 4,
d_output=self.hidden_size,
n_layers=2,
activation=activation,
dropout_p=dropout)
self.atom_from_atom_sublayer = SublayerConnection(size=self.hidden_size,
dropout_p=dropout)
self.atom_from_bond_sublayer = SublayerConnection(size=self.hidden_size,
dropout_p=dropout)
self.bond_from_atom_sublayer = SublayerConnection(size=self.hidden_size,
dropout_p=dropout)
self.bond_from_bond_sublayer = SublayerConnection(size=self.hidden_size,
dropout_p=dropout)
if activation == 'relu':
self.act_func_node = nn.ReLU()
self.act_func_edge = nn.ReLU()
else:
raise ValueError('Only relu activation is supported')
self.dropout_layer = nn.Dropout(p=dropout)
def _pointwise_feed_forward_to_atom_embedding(self, emb_output, atom_feat,
index, ffn_layer):
aggr_output = _select_neighbor_and_aggregate(emb_output, index)
aggr_outputx = torch.cat([atom_feat, aggr_output], dim=1)
return ffn_layer(aggr_outputx), aggr_outputx
def _pointwise_feed_forward_to_bond_embedding(self, emb_output, bond_feat,
a2nei, b2revb, ffn_layer):
aggr_output = _select_neighbor_and_aggregate(emb_output, a2nei)
aggr_output = self._remove_rev_bond_message(emb_output, aggr_output,
b2revb)
aggr_outputx = torch.cat([bond_feat, aggr_output], dim=1)
return ffn_layer(aggr_outputx), aggr_outputx
@staticmethod
def _remove_rev_bond_message(original_message, aggr_message, b2revb):
rev_message = original_message[b2revb]
return aggr_message - rev_message
def _atom_bond_transform(
self,
to_atom=True, # False: to bond
atomwise_input=None,
bondwise_input=None,
original_f_atoms=None,
original_f_bonds=None,
a2a=None,
a2b=None,
b2a=None,
b2revb=None):
"""Transfer the output of atom/bond multi-head attention to the final atom/bond output.
"""
if to_atom:
# atom input to atom output
atomwise_input, _ = self._pointwise_feed_forward_to_atom_embedding(
atomwise_input, original_f_atoms, a2a, self.ffn_atom_from_atom)
atom_in_atom_out = self.atom_from_atom_sublayer(
None, atomwise_input)
# bond to atom
bondwise_input, _ = self._pointwise_feed_forward_to_atom_embedding(
bondwise_input, original_f_atoms, a2b, self.ffn_atom_from_bond)
bond_in_atom_out = self.atom_from_bond_sublayer(
None, bondwise_input)
return atom_in_atom_out, bond_in_atom_out
else: # to bond embeddings
# atom input to bond output
atom_list_for_bond = torch.cat([b2a.unsqueeze(dim=1), a2a[b2a]],
dim=1)
atomwise_input, _ = self._pointwise_feed_forward_to_bond_embedding(
atomwise_input, original_f_bonds, atom_list_for_bond,
b2a[b2revb], self.ffn_bond_from_atom)
atom_in_bond_out = self.bond_from_atom_sublayer(
None, atomwise_input)
# bond input to bond output
bond_list_for_bond = a2b[b2a]
bondwise_input, _ = self._pointwise_feed_forward_to_bond_embedding(
bondwise_input, original_f_bonds, bond_list_for_bond, b2revb,
self.ffn_bond_from_bond)
bond_in_bond_out = self.bond_from_bond_sublayer(
None, bondwise_input)
return atom_in_bond_out, bond_in_bond_out
def forward(self, batch):
"""Forward layer
Parameters
----------
batch: Tuple
A tuple of tensors representing grover attributes
Returns
-------
embeddings: Tuple[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]
Embeddings for atom generated from hidden state of nodes and bonds and embeddings of bond generated from hidden states of nodes and bond.
"""
f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a = batch
node_batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
edge_batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
original_f_atoms, original_f_bonds = f_atoms, f_bonds
for nb in self.node_blocks: # atom messages. Multi-headed attention
node_batch = nb(node_batch)
for eb in self.edge_blocks: # bond messages. Multi-headed attention
edge_batch = eb(edge_batch)
atom_output, _, _, _, _, _, _, _ = node_batch # atom hidden states
_, bond_output, _, _, _, _, _, _ = edge_batch # bond hidden states
atom_embeddings = self._atom_bond_transform(
to_atom=True, # False: to bond
atomwise_input=atom_output,
bondwise_input=bond_output,
original_f_atoms=original_f_atoms,
original_f_bonds=original_f_bonds,
a2a=a2a,
a2b=a2b,
b2a=b2a,
b2revb=b2revb)
bond_embeddings = self._atom_bond_transform(
to_atom=False, # False: to bond
atomwise_input=atom_output,
bondwise_input=bond_output,
original_f_atoms=original_f_atoms,
original_f_bonds=original_f_bonds,
a2a=a2a,
a2b=a2b,
b2a=b2a,
b2revb=b2revb)
return ((atom_embeddings[0], bond_embeddings[0]), (atom_embeddings[1],
bond_embeddings[1]))
<file_sep>"""
Tests that sequence handling utilities work.
"""
__author__ = "<NAME>"
__license__ = "MIT"
import unittest
import os
import numpy as np
import deepchem as dc
LETTERS = "XYZ"
class TestSeq(unittest.TestCase):
"""
Tests sequence handling utilities.
"""
def setUp(self):
super(TestSeq, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_one_hot_simple(self):
sequences = np.array(["ACGT", "GATA", "CGCG"])
sequences = dc.utils.genomics_utils.seq_one_hot_encode(sequences)
self.assertEqual(sequences.shape, (3, 5, 4, 1))
def test_one_hot_mismatch(self):
# One sequence has length longer than others. This should throw a
# ValueError.
with self.assertRaises(ValueError):
sequences = np.array(["ACGTA", "GATA", "CGCG"])
sequences = dc.utils.genomics_utils.seq_one_hot_encode(sequences)
def test_encode_fasta_sequence(self):
# Test it's possible to load a sequence with an aribrary alphabet from a fasta file.
fname = os.path.join(self.current_dir, "./assets/example.fasta")
encoded_seqs = dc.utils.genomics_utils.encode_bio_sequence(
fname, letters=LETTERS)
expected = np.expand_dims(
np.array([
[[1, 0], [0, 1], [0, 0]],
[[0, 1], [0, 0], [1, 0]],
]), -1)
np.testing.assert_array_equal(expected, encoded_seqs)
def test_encode_fastq_sequence(self):
fname = os.path.join(self.current_dir, "./assets/example.fastq")
encoded_seqs = dc.utils.genomics_utils.encode_bio_sequence(
fname, file_type="fastq", letters=LETTERS)
expected = np.expand_dims(
np.array([
[[1, 0], [0, 1], [0, 0]],
[[0, 1], [0, 0], [1, 0]],
]), -1)
np.testing.assert_array_equal(expected, encoded_seqs)
<file_sep>import numpy as np
import pytest
from flaky import flaky
from deepchem.data import NumpyDataset
from deepchem.metrics import Metric, roc_auc_score, mean_absolute_error
from deepchem.molnet import load_bace_classification, load_delaney
from deepchem.feat import WeaveFeaturizer
try:
import tensorflow as tf
from deepchem.models import WeaveModel
has_tensorflow = True
except:
has_tensorflow = False
def get_dataset(mode='classification',
featurizer='GraphConv',
num_tasks=2,
data_points=20):
if mode == 'classification':
tasks, all_dataset, transformers = load_bace_classification(
featurizer, reload=False)
else:
tasks, all_dataset, transformers = load_delaney(featurizer,
reload=False)
train, _, _ = all_dataset
for _ in range(1, num_tasks):
tasks.append("random_task")
w = np.ones(shape=(data_points, len(tasks)))
if mode == 'classification':
y = np.random.randint(0, 2, size=(data_points, len(tasks)))
metric = Metric(roc_auc_score, np.mean, mode="classification")
else:
y = np.random.normal(size=(data_points, len(tasks)))
metric = Metric(mean_absolute_error, mode="regression")
ds = NumpyDataset(train.X[:data_points], y, w, train.ids[:data_points])
return tasks, ds, transformers, metric
@pytest.mark.tensorflow
def test_compute_features_on_infinity_distance():
"""Test that WeaveModel correctly transforms WeaveMol objects into tensors with infinite max_pair_distance."""
featurizer = WeaveFeaturizer(max_pair_distance=None)
X = featurizer(["C", "CCC"])
batch_size = 20
model = WeaveModel(1,
batch_size=batch_size,
mode='classification',
fully_connected_layer_sizes=[2000, 1000],
batch_normalize=True,
batch_normalize_kwargs={
"fused": False,
"trainable": True,
"renorm": True
},
learning_rate=0.0005)
atom_feat, pair_feat, pair_split, atom_split, atom_to_pair = model.compute_features_on_batch(
X)
# There are 4 atoms each of which have 75 atom features
assert atom_feat.shape == (4, 75)
# There are 10 pairs with infinity distance and 14 pair features
assert pair_feat.shape == (10, 14)
# 4 atoms in total
assert atom_split.shape == (4,)
assert np.all(atom_split == np.array([0, 1, 1, 1]))
# 10 pairs in total
assert pair_split.shape == (10,)
assert np.all(pair_split == np.array([0, 1, 1, 1, 2, 2, 2, 3, 3, 3]))
# 10 pairs in total each with start/finish
assert atom_to_pair.shape == (10, 2)
assert np.all(
atom_to_pair == np.array([[0, 0], [1, 1], [1, 2], [1, 3], [2, 1],
[2, 2], [2, 3], [3, 1], [3, 2], [3, 3]]))
@pytest.mark.tensorflow
def test_compute_features_on_distance_1():
"""Test that WeaveModel correctly transforms WeaveMol objects into tensors with finite max_pair_distance."""
featurizer = WeaveFeaturizer(max_pair_distance=1)
X = featurizer(["C", "CCC"])
batch_size = 20
model = WeaveModel(1,
batch_size=batch_size,
mode='classification',
fully_connected_layer_sizes=[2000, 1000],
batch_normalize=True,
batch_normalize_kwargs={
"fused": False,
"trainable": True,
"renorm": True
},
learning_rate=0.0005)
atom_feat, pair_feat, pair_split, atom_split, atom_to_pair = model.compute_features_on_batch(
X)
# There are 4 atoms each of which have 75 atom features
assert atom_feat.shape == (4, 75)
# There are 8 pairs with distance 1 and 14 pair features. (To see why 8,
# there's the self pair for "C". For "CCC" there are 7 pairs including self
# connections and accounting for symmetry.)
assert pair_feat.shape == (8, 14)
# 4 atoms in total
assert atom_split.shape == (4,)
assert np.all(atom_split == np.array([0, 1, 1, 1]))
# 10 pairs in total
assert pair_split.shape == (8,)
# The center atom is self connected and to both neighbors so it appears
# thrice. The canonical ranking used in MolecularFeaturizer means this
# central atom is ranked last in ordering.
assert np.all(pair_split == np.array([0, 1, 1, 2, 2, 3, 3, 3]))
# 10 pairs in total each with start/finish
assert atom_to_pair.shape == (8, 2)
assert np.all(atom_to_pair == np.array([[0, 0], [1, 1], [1, 3], [2, 2],
[2, 3], [3, 1], [3, 2], [3, 3]]))
@flaky
@pytest.mark.slow
@pytest.mark.tensorflow
def test_weave_model():
tasks, dataset, transformers, metric = get_dataset('classification',
'Weave',
data_points=10)
batch_size = 10
model = WeaveModel(len(tasks),
batch_size=batch_size,
mode='classification',
dropouts=0,
learning_rate=0.0001)
model.fit(dataset, nb_epoch=250)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.9
@pytest.mark.slow
@pytest.mark.tensorflow
def test_weave_regression_model():
tf.random.set_seed(123)
np.random.seed(123)
tasks, dataset, transformers, metric = get_dataset('regression',
'Weave',
data_points=10)
batch_size = 10
model = WeaveModel(len(tasks),
batch_size=batch_size,
mode='regression',
dropouts=0,
learning_rate=0.00003)
model.fit(dataset, nb_epoch=400)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean_absolute_error'] < 0.1
# def test_weave_fit_simple_infinity_distance():
# featurizer = WeaveFeaturizer(max_pair_distance=None)
# X = featurizer(["C", "CCC"])
# y = np.array([0, 1.])
# dataset = NumpyDataset(X, y)
# batch_size = 20
# model = WeaveModel(
# 1,
# batch_size=batch_size,
# mode='classification',
# fully_connected_layer_sizes=[2000, 1000],
# batch_normalize=True,
# batch_normalize_kwargs={
# "fused": False,
# "trainable": True,
# "renorm": True
# },
# learning_rate=0.0005)
# model.fit(dataset, nb_epoch=200)
# transformers = []
# metric = Metric(
# roc_auc_score, np.mean, mode="classification")
# scores = model.evaluate(dataset, [metric], transformers)
# assert scores['mean-roc_auc_score'] >= 0.9
@pytest.mark.tensorflow
def test_weave_fit_simple_distance_1():
featurizer = WeaveFeaturizer(max_pair_distance=1)
X = featurizer(["C", "CCC"])
y = np.array([0, 1.])
dataset = NumpyDataset(X, y)
batch_size = 20
model = WeaveModel(1,
batch_size=batch_size,
mode='classification',
fully_connected_layer_sizes=[2000, 1000],
batch_normalize=True,
batch_normalize_kwargs={
"fused": False,
"trainable": True,
"renorm": True
},
learning_rate=0.0005)
model.fit(dataset, nb_epoch=200)
transformers = []
metric = Metric(roc_auc_score, np.mean, mode="classification")
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.9
<file_sep>from collections import deque
import sys
import tensorflow as tf
import pickle
import os
import fnmatch
import numpy as np
from scipy.spatial.distance import pdist, squareform
import pandas as pd
from deepchem.feat.base_classes import Featurizer
from deepchem.feat.graph_features import atom_features
from scipy.sparse import csr_matrix
def get_atom_type(atom):
elem = atom.GetAtomicNum()
hyb = str(atom.GetHybridization).lower()
if elem == 1:
return (0)
if elem == 4:
return (1)
if elem == 5:
return (2)
if elem == 6:
if "sp2" in hyb:
return (3)
elif "sp3" in hyb:
return (4)
else:
return (5)
if elem == 7:
if "sp2" in hyb:
return (6)
elif "sp3" in hyb:
return (7)
else:
return (8)
if elem == 8:
if "sp2" in hyb:
return (9)
elif "sp3" in hyb:
return (10)
else:
return (11)
if elem == 9:
return (12)
if elem == 15:
if "sp2" in hyb:
return (13)
elif "sp3" in hyb:
return (14)
else:
return (15)
if elem == 16:
if "sp2" in hyb:
return (16)
elif "sp3" in hyb:
return (17)
else:
return (18)
if elem == 17:
return (19)
if elem == 35:
return (20)
if elem == 53:
return (21)
return (22)
def get_atom_adj_matrices(mol,
n_atom_types,
max_n_atoms=200,
max_valence=4,
graph_conv_features=True,
nxn=True):
if not graph_conv_features:
bond_matrix = np.zeros((max_n_atoms, 4 * max_valence)).astype(np.uint8)
if nxn:
adj_matrix = np.zeros((max_n_atoms, max_n_atoms)).astype(np.uint8)
else:
adj_matrix = np.zeros((max_n_atoms, max_valence)).astype(np.uint8)
adj_matrix += (adj_matrix.shape[0] - 1)
if not graph_conv_features:
atom_matrix = np.zeros((max_n_atoms, n_atom_types + 3)).astype(np.uint8)
atom_matrix[:, atom_matrix.shape[1] - 1] = 1
atom_arrays = []
for a_idx in range(0, mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(a_idx)
if graph_conv_features:
atom_arrays.append(atom_features(atom))
else:
atom_type = get_atom_type(atom)
atom_matrix[a_idx][-1] = 0
atom_matrix[a_idx][atom_type] = 1
for n_idx, neighbor in enumerate(atom.GetNeighbors()):
if nxn:
adj_matrix[a_idx][neighbor.GetIdx()] = 1
adj_matrix[a_idx][a_idx] = 1
else:
adj_matrix[a_idx][n_idx] = neighbor.GetIdx()
if not graph_conv_features:
bond = mol.GetBondBetweenAtoms(a_idx, neighbor.GetIdx())
bond_type = str(bond.GetBondType()).lower()
if "single" in bond_type:
bond_order = 0
elif "double" in bond_type:
bond_order = 1
elif "triple" in bond_type:
bond_order = 2
elif "aromatic" in bond_type:
bond_order = 3
bond_matrix[a_idx][(4 * n_idx) + bond_order] = 1
if graph_conv_features:
n_feat = len(atom_arrays[0])
atom_matrix = np.zeros((max_n_atoms, n_feat)).astype(np.uint8)
for idx, atom_array in enumerate(atom_arrays):
atom_matrix[idx, :] = atom_array
else:
atom_matrix = np.concatenate(
[atom_matrix, bond_matrix], axis=1).astype(np.uint8)
return (adj_matrix.astype(np.uint8), atom_matrix.astype(np.uint8))
def featurize_mol(mol, n_atom_types, max_n_atoms, max_valence,
num_atoms_feature):
adj_matrix, atom_matrix = get_atom_adj_matrices(mol, n_atom_types,
max_n_atoms, max_valence)
if num_atoms_feature:
return ((adj_matrix, atom_matrix, mol.GetNumAtoms()))
return ((adj_matrix, atom_matrix))
class AdjacencyFingerprint(Featurizer):
def __init__(self,
n_atom_types=23,
max_n_atoms=200,
add_hydrogens=False,
max_valence=4,
num_atoms_feature=False):
self.n_atom_types = n_atom_types
self.max_n_atoms = max_n_atoms
self.add_hydrogens = add_hydrogens
self.max_valence = max_valence
self.num_atoms_feature = num_atoms_feature
def featurize(self, rdkit_mols):
featurized_mols = np.empty((len(rdkit_mols)), dtype=object)
from rdkit import Chem
for idx, mol in enumerate(rdkit_mols):
if self.add_hydrogens:
mol = Chem.AddHs(mol)
featurized_mol = featurize_mol(mol, self.n_atom_types, self.max_n_atoms,
self.max_valence, self.num_atoms_feature)
featurized_mols[idx] = featurized_mol
return (featurized_mols)
<file_sep>"""
Script that trains graphconv models on delaney dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
import csv
from sklearn.metrics import r2_score
from deepchem.trans import undo_transforms
from delaney_datasets import load_delaney
MODEL_DIR = 'model_saves'
BATCH_SIZE = 64
LR = 1e-3
ERROR_BARS = True
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney(
featurizer='GraphConv', split='scaffold')
train_dataset, valid_dataset, test_dataset = delaney_datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
model = dc.models.GraphConvModel(
len(delaney_tasks),
batch_size=BATCH_SIZE,
learning_rate=LR,
use_queue=False,
mode='regression',
model_dir=MODEL_DIR,
error_bars=ERROR_BARS)
model.fit(train_dataset, nb_epoch=8)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
model.save()
model.load_from_dir('model_saves')
mu, sigma = model.bayesian_predict(
valid_dataset, transformers, untransform=True, n_passes=24)
print(mu[:4])
print(sigma[:4])
target = undo_transforms(valid_dataset.y, transformers)
print(r2_score(target, mu))
mu = mu[:, 0].tolist()
sigma = sigma[:, 0].tolist()
target = target[:, 0].tolist()
print(mu[:4])
print(sigma[:4])
print(target[:4])
in_one_sigma = 0
in_two_sigma = 0
in_four_sigma = 0
for i in xrange(0, len(mu)):
if target[i] < (mu[i] + sigma[i]) and target[i] > (mu[i] - sigma[i]):
in_one_sigma += 1
if target[i] < (mu[i] + 2 * sigma[i]) and target[i] > (mu[i] - 2 * sigma[i]):
in_two_sigma += 1
if target[i] < (mu[i] + 4 * sigma[i]) and target[i] > (mu[i] - 4 * sigma[i]):
in_four_sigma += 1
print('percent in 1 sigma [%f]' % (in_one_sigma / float(len(mu))))
print('percent in 2 sigma [%f]' % (in_two_sigma / float(len(mu))))
print('percent in 4 sigma [%f]' % (in_four_sigma / float(len(mu))))
print(sorted(sigma))
<file_sep>from rdkit import Chem
from deepchem.data import NumpyDataset
from deepchem.models.autoencoder_models.autoencoder import TensorflowMoleculeEncoder, TensorflowMoleculeDecoder
from deepchem.feat.one_hot import OneHotFeaturizer, zinc_charset
tf_enc = TensorflowMoleculeEncoder.zinc_encoder()
smiles = ["Cn1cnc2c1c(=O)n(C)c(=O)n2C", "CC(=O)N1CN(C(C)=O)[C@@H](O)[C@@H]1O"]
mols = [Chem.MolFromSmiles(x) for x in smiles]
featurizer = OneHotFeaturizer(zinc_charset)
features = featurizer.featurize(mols)
dataset = NumpyDataset(features, features)
prediction = tf_enc.predict_on_batch(dataset.X)
tf_de = TensorflowMoleculeDecoder.zinc_decoder()
one_hot_decoded = tf_de.predict_on_batch(prediction)
print(featurizer.untransform(one_hot_decoded))
<file_sep>"""Place constraints on models."""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from deepchem.nn import model_ops
from deepchem.nn.activations import get_from_module
class Constraint(object):
def __call__(self, p):
return p
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Parameters
----------
m: the maximum norm for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape (input_dim, output_dim),
set axis to 0 to constrain each weight vector
of length `(input_dim,)`.
# References
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting Srivastava, Hinton, et al. 2014](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
"""
def __init__(self, m=2, axis=0):
self.m = m
self.axis = axis
def __call__(self, p):
norms = model_ops.sqrt(model_ops.sum(
tf.square(p), axis=self.axis, keepdims=True))
desired = model_ops.clip(norms, 0, self.m)
p *= (desired / (model_ops.epsilon() + norms))
return p
class NonNeg(Constraint):
"""Constrains the weights to be non-negative.
"""
def __call__(self, p):
p *= tf.cast(p >= 0., tf.float32)
return p
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
# Arguments
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Convolution2D` layer with `dim_ordering="tf"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
def __call__(self, p):
return p / (model_ops.epsilon() + model_ops.sqrt(
model_ops.sum(tf.square(p), axis=self.axis, keepdims=True)))
# Aliases.
maxnorm = MaxNorm
nonneg = NonNeg
unitnorm = UnitNorm
def get(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'constraint',
instantiate=True, kwargs=kwargs)
<file_sep>import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import List, Sequence, Optional, Any, Tuple
from rdkit import Chem
from deepchem.feat.graph_data import BatchGraphData
from deepchem.models.torch_models.modular import ModularTorchModel
from deepchem.models.torch_models.grover_layers import (
GroverEmbedding, GroverBondVocabPredictor, GroverAtomVocabPredictor,
GroverFunctionalGroupPredictor)
from deepchem.models.torch_models.readout import GroverReadout
from deepchem.feat.vocabulary_builders import GroverAtomVocabularyBuilder, GroverBondVocabularyBuilder
from deepchem.utils.grover import extract_grover_attributes
class GroverPretrain(nn.Module):
"""The Grover Pretrain module.
The GroverPretrain module is used for training an embedding based on the Grover Pretraining task.
Grover pretraining is a self-supervised task where an embedding is trained to learn the contextual
information of atoms and bonds along with graph-level properties, which are functional groups
in case of molecular graphs.
Parameters
----------
embedding: nn.Module
An embedding layer to generate embedding from input molecular graph
atom_vocab_task_atom: nn.Module
A layer used for predicting atom vocabulary from atom features generated via atom hidden states.
atom_vocab_task_bond: nn.Module
A layer used for predicting atom vocabulary from atom features generated via bond hidden states.
bond_vocab_task_atom: nn.Module
A layer used for predicting bond vocabulary from bond features generated via atom hidden states.
bond_vocab_task_bond: nn.Module
A layer used for predicting bond vocabulary from bond features generated via bond hidden states.
Returns
-------
prediction_logits: Tuple
A tuple of prediction logits containing prediction logits of atom vocabulary task from atom hidden state,
prediction logits for atom vocabulary task from bond hidden states, prediction logits for bond vocabulary task
from atom hidden states, prediction logits for bond vocabulary task from bond hidden states, functional
group prediction logits from atom embedding generated from atom and bond hidden states, functional group
prediction logits from bond embedding generated from atom and bond hidden states.
Example
-------
>>> import deepchem as dc
>>> from deepchem.feat.graph_data import BatchGraphData
>>> from deepchem.utils.grover import extract_grover_attributes
>>> from deepchem.models.torch_models.grover import GroverPretrain
>>> from deepchem.models.torch_models.grover_layers import GroverEmbedding, GroverAtomVocabPredictor, GroverBondVocabPredictor, GroverFunctionalGroupPredictor
>>> smiles = ['CC', 'CCC', 'CC(=O)C']
>>> fg = dc.feat.CircularFingerprint()
>>> featurizer = dc.feat.GroverFeaturizer(features_generator=fg)
>>> graphs = featurizer.featurize(smiles)
>>> batched_graph = BatchGraphData(graphs)
>>> grover_graph_attributes = extract_grover_attributes(batched_graph)
>>> f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, _, _ = grover_graph_attributes
>>> components = {}
>>> components['embedding'] = GroverEmbedding(node_fdim=f_atoms.shape[1], edge_fdim=f_bonds.shape[1])
>>> components['atom_vocab_task_atom'] = GroverAtomVocabPredictor(vocab_size=10, in_features=128)
>>> components['atom_vocab_task_bond'] = GroverAtomVocabPredictor(vocab_size=10, in_features=128)
>>> components['bond_vocab_task_atom'] = GroverBondVocabPredictor(vocab_size=10, in_features=128)
>>> components['bond_vocab_task_bond'] = GroverBondVocabPredictor(vocab_size=10, in_features=128)
>>> components['functional_group_predictor'] = GroverFunctionalGroupPredictor(10)
>>> model = GroverPretrain(**components)
>>> inputs = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
>>> output = model(inputs)
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
def __init__(self, embedding: nn.Module, atom_vocab_task_atom: nn.Module,
atom_vocab_task_bond: nn.Module,
bond_vocab_task_atom: nn.Module,
bond_vocab_task_bond: nn.Module,
functional_group_predictor: nn.Module):
super(GroverPretrain, self).__init__()
self.embedding = embedding
self.atom_vocab_task_atom = atom_vocab_task_atom
self.atom_vocab_task_bond = atom_vocab_task_bond
self.bond_vocab_task_atom = bond_vocab_task_atom
self.bond_vocab_task_bond = bond_vocab_task_bond
self.functional_group_predictor = functional_group_predictor
def forward(self, graph_batch):
"""Forward function
Parameters
----------
graph_batch: List[torch.Tensor]
A list containing grover graph attributes
"""
_, _, _, _, _, atom_scope, bond_scope, _ = graph_batch
atom_scope = atom_scope.data.cpu().numpy().tolist()
bond_scope = bond_scope.data.cpu().numpy().tolist()
embeddings = self.embedding(graph_batch)
av_task_atom_pred = self.atom_vocab_task_atom(
embeddings["atom_from_atom"])
av_task_bond_pred = self.atom_vocab_task_bond(
embeddings["atom_from_bond"])
bv_task_atom_pred = self.bond_vocab_task_atom(
embeddings["bond_from_atom"])
bv_task_bond_pred = self.bond_vocab_task_bond(
embeddings["bond_from_bond"])
fg_prediction = self.functional_group_predictor(embeddings, atom_scope,
bond_scope)
return av_task_atom_pred, av_task_bond_pred, bv_task_atom_pred, bv_task_bond_pred, fg_prediction[
'atom_from_atom'], fg_prediction['atom_from_bond'], fg_prediction[
'bond_from_atom'], fg_prediction['bond_from_bond']
class GroverFinetune(nn.Module):
"""Grover Finetune model.
For a graph level prediction task, the GroverFinetune model uses node/edge embeddings
output by the GroverEmbeddong layer and applies a readout function on it to get
graph embeddings and use additional MLP layers to predict the property of the molecular graph.
Parameters
----------
embedding: nn.Module
An embedding layer to generate embedding from input molecular graph
readout: nn.Module
A readout layer to perform readout atom and bond hidden states
mol_atom_from_atom_ffn: nn.Module
A feed forward network which learns representation from atom messages generated via atom hidden states of a molecular graph
mol_atom_from_bond_ffn: nn.Module
A feed forward network which learns representation from atom messages generated via bond hidden states of a molecular graph
mode: str
classification or regression
Returns
-------
prediction_logits: torch.Tensor
prediction logits
Example
-------
>>> import deepchem as dc
>>> from deepchem.feat.graph_data import BatchGraphData
>>> from deepchem.utils.grover import extract_grover_attributes
>>> from deepchem.models.torch_models.grover_layers import GroverEmbedding
>>> from deepchem.models.torch_models.readout import GroverReadout
>>> from deepchem.models.torch_models.grover import GroverFinetune
>>> smiles = ['CC', 'CCC', 'CC(=O)C']
>>> fg = dc.feat.CircularFingerprint()
>>> featurizer = dc.feat.GroverFeaturizer(features_generator=fg)
>>> graphs = featurizer.featurize(smiles)
>>> batched_graph = BatchGraphData(graphs)
>>> attributes = extract_grover_attributes(batched_graph)
>>> components = {}
>>> f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, fg_labels, additional_features = attributes
>>> inputs = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
>>> components = {}
>>> components['embedding'] = GroverEmbedding(node_fdim=f_atoms.shape[1], edge_fdim=f_bonds.shape[1])
>>> components['readout'] = GroverReadout(rtype="mean", in_features=128)
>>> components['mol_atom_from_atom_ffn'] = nn.Linear(in_features=additional_features.shape[1]+ 128, out_features=128)
>>> components['mol_atom_from_bond_ffn'] = nn.Linear(in_features=additional_features.shape[1] + 128, out_features=128)
>>> model = GroverFinetune(**components, mode='regression', hidden_size=128)
>>> model.training = False
>>> output = model((inputs, additional_features))
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
def __init__(self,
embedding: nn.Module,
readout: nn.Module,
mol_atom_from_atom_ffn: nn.Module,
mol_atom_from_bond_ffn: nn.Module,
hidden_size: int = 128,
mode: str = 'regression',
n_tasks: int = 1,
n_classes: Optional[int] = None):
super().__init__()
self.embedding = embedding
self.readout = readout
self.mol_atom_from_atom_ffn = mol_atom_from_atom_ffn
self.mol_atom_from_bond_ffn = mol_atom_from_bond_ffn
self.n_tasks = n_tasks
self.n_classes = n_classes
self.mode = mode
# the hidden size here is the output size of last layer in mol_atom_from_atom_ffn and mol_atom_from_bond_ffn components.
# it is necessary that aforementioned components produces output tensor of same size.
if self.mode == 'classification':
assert n_classes is not None
self.linear = nn.Linear(hidden_size,
out_features=n_tasks * n_classes)
elif self.mode == 'regression':
self.linear = nn.Linear(hidden_size, out_features=n_tasks)
def forward(self, inputs):
"""
Parameters
----------
inputs: Tuple
grover batch graph attributes
"""
graphbatch, additional_features = inputs
_, _, _, _, _, atom_scope, bond_scope, _ = graphbatch
output = self.embedding(graphbatch)
mol_atom_from_bond_output = self.readout(output["atom_from_bond"],
atom_scope)
mol_atom_from_atom_output = self.readout(output["atom_from_atom"],
atom_scope)
if additional_features[0] is not None:
if len(additional_features.shape) == 1:
additional_features = additional_features.view(
1, additional_features.shape[0])
mol_atom_from_atom_output = torch.cat(
[mol_atom_from_atom_output, additional_features], 1)
mol_atom_from_bond_output = torch.cat(
[mol_atom_from_bond_output, additional_features], 1)
atom_ffn_output = self.mol_atom_from_atom_ffn(mol_atom_from_atom_output)
bond_ffn_output = self.mol_atom_from_bond_ffn(mol_atom_from_bond_output)
if self.training:
# In training mode, we return atom level aggregated output and bond level aggregated output.
# The training has an additional objective which ensures that atom and bond level aggregated outputs
# are similar to each other apart from the objective of making the aggregated output closer to each
# other.
return atom_ffn_output, bond_ffn_output
else:
if self.mode == 'classification':
atom_ffn_output = torch.sigmoid(atom_ffn_output)
bond_ffn_output = torch.sigmoid(bond_ffn_output)
output = (atom_ffn_output + bond_ffn_output) / 2
output = self.linear(output)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = output.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = output.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
elif self.mode == 'regression':
return output
class GroverModel(ModularTorchModel):
"""GROVER model
The GROVER model employs a self-supervised message passing transformer architecutre
for learning molecular representation. The pretraining task can learn rich structural
and semantic information of molecules from unlabelled molecular data, which can
be leveraged by finetuning for downstream applications. To this end, GROVER integrates message
passing networks into a transformer style architecture.
Parameters
----------
node_fdim: int
the dimension of additional feature for node/atom.
edge_fdim: int
the dimension of additional feature for edge/bond.
atom_vocab: GroverAtomVocabularyBuilder
Grover atom vocabulary builder required during pretraining.
bond_vocab: GroverBondVocabularyBuilder
Grover bond vocabulary builder required during pretraining.
hidden_size: int
Size of hidden layers
features_only: bool
Uses only additional features in the feed-forward network, no graph network
self_attention: bool, default False
When set to True, a self-attention layer is used during graph readout operation.
functional_group_size: int (default: 85)
Size of functional group used in grover.
features_dim: int
Size of additional molecular features, like fingerprints.
ffn_num_layers: int (default: 1)
Number of linear layers to use for feature extraction from embeddings
task: str (pretraining or finetuning)
Pretraining or finetuning tasks.
mode: str (classification or regression)
Training mode (used only for finetuning)
n_tasks: int, optional (default: 1)
Number of tasks
n_classes: int, optiona (default: 2)
Number of target classes in classification mode
model_dir: str
Directory to save model checkpoints
dropout: float, optional (default: 0.2)
dropout value
actionvation: str, optional (default: 'relu')
supported activation function
Example
-------
>>> import deepchem as dc
>>> from deepchem.models.torch_models.grover import GroverModel
>>> from deepchem.feat.vocabulary_builders import (GroverAtomVocabularyBuilder, GroverBondVocabularyBuilder)
>>> import pandas as pd
>>> import os
>>> import tempfile
>>> tmpdir = tempfile.mkdtemp()
>>> df = pd.DataFrame({'smiles': ['CC', 'CCC'], 'preds': [0, 0]})
>>> filepath = os.path.join(tmpdir, 'example.csv')
>>> df.to_csv(filepath, index=False)
>>> dataset_path = os.path.join(filepath)
>>> loader = dc.data.CSVLoader(tasks=['preds'], featurizer=dc.feat.DummyFeaturizer(), feature_field=['smiles'])
>>> dataset = loader.create_dataset(filepath)
>>> av = GroverAtomVocabularyBuilder()
>>> av.build(dataset)
>>> bv = GroverBondVocabularyBuilder()
>>> bv.build(dataset)
>>> fg = dc.feat.CircularFingerprint()
>>> loader2 = dc.data.CSVLoader(tasks=['preds'], featurizer=dc.feat.GroverFeaturizer(features_generator=fg), feature_field='smiles')
>>> graph_data = loader2.create_dataset(filepath)
>>> model = GroverModel(node_fdim=151, edge_fdim=165, atom_vocab=av, bond_vocab=bv, features_dim=2048, hidden_size=128, functional_group_size=85, mode='regression', task='finetuning', model_dir='gm')
>>> loss = model.fit(graph_data, nb_epoch=1)
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
def __init__(self,
node_fdim: int,
edge_fdim: int,
atom_vocab: GroverAtomVocabularyBuilder,
bond_vocab: GroverBondVocabularyBuilder,
hidden_size: int,
self_attention=False,
features_only=False,
functional_group_size: int = 85,
features_dim=128,
dropout=0.2,
activation='relu',
task='pretraining',
ffn_num_layers=1,
mode: Optional[str] = None,
model_dir=None,
n_tasks: int = 1,
n_classes: Optional[int] = None,
**kwargs):
assert task in ['pretraining', 'finetuning']
self.ffn_num_layers = ffn_num_layers
self.activation = activation
self.node_fdim = node_fdim
self.edge_fdim = edge_fdim
self.atom_vocab = atom_vocab
self.bond_vocab = bond_vocab
self.atom_vocab_size = atom_vocab.size
self.bond_vocab_size = bond_vocab.size
self.task = task
self.model_dir = model_dir
self.hidden_size = hidden_size
self.attn_hidden_size = hidden_size
self.attn_out_size = hidden_size
self.functional_group_size = functional_group_size
self.self_attention = self_attention
self.features_only = features_only
self.features_dim = features_dim
self.dropout = dropout
self.mode = mode
self.n_tasks = n_tasks
self.n_classes = n_classes
self.components = self.build_components()
self.model = self.build_model()
super().__init__(self.model,
self.components,
model_dir=self.model_dir,
**kwargs)
# FIXME In the above step, we initialize modular torch model but
# something is missing here. The attribute loss from TorchModel gets assigned `loss_func`
# by super class initialization in ModularTorchModel but here we reinitialize it.
self.loss = self.get_loss_func()
def build_components(self):
"""Builds components for grover pretraining and finetuning model.
.. list-table:: Components of pretraining model
:widths: 25 25 50
:header-rows: 1
* - Component name
- Type
- Description
* - `embedding`
- Graph message passing network
- A layer which accepts a molecular graph and produces an embedding for grover pretraining task
* - `atom_vocab_task_atom`
- Feed forward layer
- A layer which accepts an embedding generated from atom hidden states and predicts atom vocabulary for grover pretraining task
* - `atom_vocab_task_bond`
- Feed forward layer
- A layer which accepts an embedding generated from bond hidden states and predicts atom vocabulary for grover pretraining task
* - `bond_vocab_task_atom`
- Feed forward layer
- A layer which accepts an embedding generated from atom hidden states and predicts bond vocabulary for grover pretraining task
* - `bond_vocab_task_bond`
- Feed forward layer
- A layer which accepts an embedding generated from bond hidden states and predicts bond vocabulary for grover pretraining task
* - `functional_group_predictor`
- Feed forward layer
- A layer which accepts an embedding generated from a graph readout and predicts functional group for grover pretraining task
.. list-table:: Components of finetuning model
* - Component name
- Type
- Description
* - `embedding`
- Graph message passing network
- An embedding layer to generate embedding from input molecular graph
* - `readout`
- Feed forward layer
- A readout layer to perform readout atom and bond hidden states
* - `mol_atom_from_atom_ffn`
- Feed forward layer
- A feed forward network which learns representation from atom messages generated via atom hidden states of a molecular graph
* - `mol_atom_from_bond_ffn`
- Feed forward layer
- A feed forward network which learns representation from atom messages generated via bond hidden states of a molecular graph
"""
if self.task == 'pretraining':
components = self._get_pretraining_components()
elif self.task == 'finetuning':
components = self._get_finetuning_components()
return components
def build_model(self):
"""Builds grover pretrain or finetune model based on task"""
if self.task == 'pretraining':
return GroverPretrain(**self.components)
elif self.task == 'finetuning':
return GroverFinetune(**self.components,
mode=self.mode,
hidden_size=self.hidden_size,
n_tasks=self.n_tasks,
n_classes=self.n_classes)
def get_loss_func(self):
"""Returns loss function based on task"""
if self.task == 'pretraining':
from deepchem.models.losses import GroverPretrainLoss
return GroverPretrainLoss()._create_pytorch_loss()
elif self.task == 'finetuning':
return self._finetuning_loss
def loss_func(self, inputs, labels, weights):
"""Returns loss function which performs forward iteration based on task type"""
if self.task == 'pretraining':
return self._pretraining_loss(inputs, labels, weights)
elif self.task == 'finetuning':
return self._finetuning_loss(inputs, labels, weights)
def _get_pretraining_components(self):
"""Return pretraining components.
The component names are described in GroverModel.build_components method.
"""
components = {}
components['embedding'] = GroverEmbedding(node_fdim=self.node_fdim,
edge_fdim=self.edge_fdim)
components['atom_vocab_task_atom'] = GroverAtomVocabPredictor(
self.atom_vocab_size, self.hidden_size)
components['atom_vocab_task_bond'] = GroverAtomVocabPredictor(
self.atom_vocab_size, self.hidden_size)
components['bond_vocab_task_atom'] = GroverBondVocabPredictor(
self.bond_vocab_size, self.hidden_size)
components['bond_vocab_task_bond'] = GroverBondVocabPredictor(
self.bond_vocab_size, self.hidden_size)
components[
'functional_group_predictor'] = GroverFunctionalGroupPredictor(
self.functional_group_size)
return components
def _get_finetuning_components(self):
"""Return finetuning components.
The component names are described in GroverModel.build_components method.
"""
components = {}
components['embedding'] = GroverEmbedding(node_fdim=self.node_fdim,
edge_fdim=self.edge_fdim)
if self.self_attention:
components['readout'] = GroverReadout(
rtype="self_attention",
in_features=self.hidden_size,
attn_hidden=self.attn_hidden_size,
attn_out=self.attn_out_size)
else:
components['readout'] = GroverReadout(rtype="mean",
in_features=self.hidden_size)
components['mol_atom_from_atom_ffn'] = self._create_ffn()
components['mol_atom_from_bond_ffn'] = self._create_ffn()
return components
def _prepare_batch(self, batch):
"""Prepare batch method for preprating batch of data for finetuning and pretraining tasks"""
if self.task == 'pretraining':
return self._prepare_batch_for_pretraining(batch)
elif self.task == 'finetuning':
return self._prepare_batch_for_finetuning(batch)
def _prepare_batch_for_pretraining(self, batch: Tuple[Any, Any, Any]):
"""Prepare batch for pretraining
This method is used for batching a sequence of graph data objects using BatchGraphData method.
It batches a graph and extracts attributes from the batched graph and return it as inputs for
the model. It also performs generates labels for atom vocab prediction and bonc vocab
prediction tasks in grover.
Parameters
----------
batch: Tuple[Sequence[GraphData], Any, Any]
A batch of data containing grover molecular graphs, target prediction values and weights for datapoint.
Returns
-------
inputs: Tuple
Inputs for grover pretraining model
labels: Dict[str, torch.Tensor]
Labels for grover pretraining self-supervised task
w: Any
Weights of data point
"""
X, y, w = batch
batchgraph = BatchGraphData(X[0])
fgroup_label = getattr(batchgraph, 'fg_labels')
smiles_batch = getattr(batchgraph, 'smiles').reshape(-1).tolist()
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, _, _ = extract_grover_attributes(
batchgraph)
atom_vocab_label = torch.Tensor(
self.atom_vocab_random_mask(self.atom_vocab,
smiles_batch)).long().to(self.device)
bond_vocab_label = torch.Tensor(
self.bond_vocab_random_mask(self.bond_vocab,
smiles_batch)).long().to(self.device)
labels = {
"av_task": atom_vocab_label,
"bv_task": bond_vocab_label,
"fg_task": torch.Tensor(fgroup_label).to(self.device)
}
inputs = (f_atoms.to(self.device), f_bonds.to(self.device),
a2b.to(self.device), b2a.to(self.device),
b2revb.to(self.device), a_scope.to(self.device),
b_scope.to(self.device), a2a.to(self.device))
return inputs, labels, w
def _prepare_batch_for_finetuning(self, batch: Tuple[Any, Any, Any]):
"""Prepare batch for finetuning task
The method batches a sequence of grover graph data objects using BatchGraphData utility
and extracts attributes from the batched graph. The extracted attributes are fed to
the grover model as inputs.
Parameters
----------
batch: Tuple[Sequence[GraphData], Any, Any]
A batch of data containing grover molecular graphs, target prediction values and weights for datapoint.
Returns
-------
inputs: Tuple
Inputs for grover finetuning model
labels: Dict[str, torch.Tensor]
Labels for grover finetuning task
w: Any
Weights of data point
"""
X, y, w = batch
batchgraph = BatchGraphData(X[0])
if y is not None:
labels = torch.FloatTensor(y[0]).to(self.device)
else:
labels = None
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, _, additional_features = extract_grover_attributes(
batchgraph)
inputs = (f_atoms.to(self.device), f_bonds.to(self.device),
a2b.to(self.device), b2a.to(self.device),
b2revb.to(self.device), a_scope.to(self.device),
b_scope.to(self.device),
a2a.to(self.device)), additional_features.to(self.device)
return inputs, labels, w
def _pretraining_loss(self,
inputs,
labels,
weights: Optional[List[Sequence]] = None,
dist_coff: float = 0.1):
"""Grover pretraining loss
The Grover pretraining loss function performs a forward iteration and returns
the loss value.
Parameters
----------
inputs: Tuple[torch.Tensor]
extracted grover graph attributed
labels: Dict[str, torch.Tensor]
Target predictions
weights: List[Sequence]
Weight to assign to each datapoint
dist_coff: float, default: 0.1
Loss term weight for weighting closeness between embedding generated from atom hidden state and bond hidden state in atom vocabulary and bond vocabulary prediction tasks.
Returns
-------
loss: torch.Tensor
loss value
"""
_, _, _, _, _, atom_scope, bond_scope, _ = inputs
av_task_atom_pred, av_task_bond_pred, bv_task_atom_pred, bv_task_bond_pred, fg_prediction_atom_from_atom, fg_prediction_atom_from_bond, fg_prediction_bond_from_atom, fg_prediction_bond_from_bond = self.model(
inputs)
loss = self.loss(av_task_atom_pred,
av_task_bond_pred,
bv_task_atom_pred,
bv_task_bond_pred,
fg_prediction_atom_from_atom,
fg_prediction_atom_from_bond,
fg_prediction_bond_from_atom,
fg_prediction_bond_from_bond,
labels['av_task'],
labels['bv_task'],
labels['fg_task'],
weights=weights,
dist_coff=dist_coff) # type: ignore
return loss
def _finetuning_loss(self, inputs, labels, weights, dist_coff=0.1):
"""Loss function for finetuning task
The finetuning loss is a binary cross entropy loss for classification mode and
mean squared error loss for regression mode. During training of the model, apart from
learning the data distribution, the loss function is also used to make the embedding
generated from atom hidden state and bond hidden state close to each other.
Parameters
----------
inputs: Tuple[torch.Tensor]
extracted grover graph attributed
labels: Dict[str, torch.Tensor]
Target predictions
weights: List[Sequence]
Weight to assign to each datapoint
dist_coff: float, default: 0.1
Loss term weight for weighting closeness between embedding generated from atom hidden state and bond hidden state
Returns
-------
loss: torch.Tensor
loss value
"""
if self.mode == 'classification':
pred_loss = nn.BCEWithLogitsLoss()
elif self.mode == 'regression':
pred_loss = nn.MSELoss()
preds = self.model(inputs)
if not self.model.training:
# in eval mode.
return pred_loss(preds, labels)
elif self.model.training:
dist_loss = nn.MSELoss()
dist = dist_loss(preds[0], preds[1])
pred_loss1 = pred_loss(preds[0].mean(axis=-1).unsqueeze(-1), labels)
pred_loss2 = pred_loss(preds[1].mean(axis=-1).unsqueeze(-1), labels)
return pred_loss1 + pred_loss2 + dist_coff * dist
def _create_ffn(self):
"""Creates feed-forward network for the finetune task"""
if self.features_only:
first_linear_dim = self.features_size + self.features_dim
else:
if self.self_attention:
first_linear_dim = self.hidden_size * self.attn_out_size
# Also adding features, this is optional
first_linear_dim += self.features_dim
else:
first_linear_dim = self.hidden_size + self.features_dim
dropout = nn.Dropout(self.dropout)
if self.activation == 'relu':
activation = nn.ReLU()
ffn = [dropout, nn.Linear(first_linear_dim, self.hidden_size)]
for i in range(self.ffn_num_layers - 1):
ffn.extend([
activation, dropout,
nn.Linear(self.hidden_size, self.hidden_size)
])
return nn.Sequential(*ffn)
@staticmethod
def atom_vocab_random_mask(atom_vocab: GroverAtomVocabularyBuilder,
smiles: List[str]) -> List[int]:
"""Random masking of atom labels from vocabulary
For every atom in the list of SMILES string, the algorithm fetches the atoms
context (vocab label) from the vocabulary provided and returns the vocabulary
labels with a random masking (probability of masking = 0.15).
Parameters
----------
atom_vocab: GroverAtomVocabularyBuilder
atom vocabulary
smiles: List[str]
a list of smiles string
Returns
-------
vocab_label: List[int]
atom vocab label with random masking
Example
-------
>>> import deepchem as dc
>>> from deepchem.models.torch_models.grover import GroverModel
>>> from deepchem.feat.vocabulary_builders import GroverAtomVocabularyBuilder
>>> smiles = np.array(['CC', 'CCC'])
>>> dataset = dc.data.NumpyDataset(X=smiles)
>>> atom_vocab = GroverAtomVocabularyBuilder()
>>> atom_vocab.build(dataset)
>>> vocab_labels = GroverModel.atom_vocab_random_mask(atom_vocab, smiles)
"""
vocab_label = []
percent = 0.15
for smi in smiles:
mol = Chem.MolFromSmiles(smi)
mlabel = [0] * mol.GetNumAtoms()
n_mask = math.ceil(mol.GetNumAtoms() * percent)
perm = np.random.permutation(mol.GetNumAtoms())[:n_mask]
for p in perm:
atom = mol.GetAtomWithIdx(int(p))
mlabel[p] = atom_vocab.stoi.get(
GroverAtomVocabularyBuilder.atom_to_vocab(mol, atom),
atom_vocab.other_index)
vocab_label.extend(mlabel)
return vocab_label
@staticmethod
def bond_vocab_random_mask(bond_vocab: GroverBondVocabularyBuilder,
smiles: List[str]) -> List[int]:
"""Random masking of bond labels from bond vocabulary
For every bond in the list of SMILES string, the algorithm fetches the bond
context (vocab label) from the vocabulary provided and returns the vocabulary
labels with a random masking (probability of masking = 0.15).
Parameters
----------
bond_vocab: GroverBondVocabularyBuilder
bond vocabulary
smiles: List[str]
a list of smiles string
Returns
-------
vocab_label: List[int]
bond vocab label with random masking
Example
-------
>>> import deepchem as dc
>>> from deepchem.models.torch_models.grover import GroverModel
>>> from deepchem.feat.vocabulary_builders import GroverBondVocabularyBuilder
>>> smiles = np.array(['CC', 'CCC'])
>>> dataset = dc.data.NumpyDataset(X=smiles)
>>> bond_vocab = GroverBondVocabularyBuilder()
>>> bond_vocab.build(dataset)
>>> vocab_labels = GroverModel.bond_vocab_random_mask(bond_vocab, smiles)
"""
vocab_label = []
percent = 0.15
for smi in smiles:
mol = Chem.MolFromSmiles(smi)
nm_atoms = mol.GetNumAtoms()
nm_bonds = mol.GetNumBonds()
mlabel = []
n_mask = math.ceil(nm_bonds * percent)
perm = np.random.permutation(nm_bonds)[:n_mask]
virtual_bond_id = 0
for a1 in range(nm_atoms):
for a2 in range(a1 + 1, nm_atoms):
bond = mol.GetBondBetweenAtoms(a1, a2)
if bond is None:
continue
if virtual_bond_id in perm:
label = bond_vocab.stoi.get(
GroverBondVocabularyBuilder.bond_to_vocab(
mol, bond), bond_vocab.other_index)
mlabel.extend([label])
else:
mlabel.extend([0])
virtual_bond_id += 1
vocab_label.extend(mlabel)
return vocab_label
def restore( # type: ignore
self,
checkpoint: Optional[str] = None,
model_dir: Optional[str] = None) -> None: # type: ignore
"""Reload the values of all variables from a checkpoint file.
Parameters
----------
checkpoint: str
the path to the checkpoint file to load. If this is None, the most recent
checkpoint will be chosen automatically. Call get_checkpoints() to get a
list of all available checkpoints.
model_dir: str, default None
Directory to restore checkpoint from. If None, use self.model_dir. If
checkpoint is not None, this is ignored.
"""
# FIXME I am rewriting restore because the restore method in parent class
# does not restore layers which are not components. This restore method
# can restore an full model.
self._ensure_built()
if checkpoint is None:
checkpoints = sorted(self.get_checkpoints(model_dir))
if len(checkpoints) == 0:
raise ValueError('No checkpoint found')
checkpoint = checkpoints[0]
data = torch.load(checkpoint)
self.model.load_state_dict(data['model'])
self._pytorch_optimizer.load_state_dict(data['optimizer_state_dict'])
self._global_step = data['global_step']
<file_sep>import os
import deepchem as dc
import pandas as pd
import numpy as np
def test_log_trans_1D():
"""Test in 1D case without explicit task variable."""
X = np.random.rand(10, 10)
y = np.random.rand(10)
dataset = dc.data.NumpyDataset(X, y)
trans = dc.trans.LogTransformer(transform_y=True)
log_dataset = trans.transform(dataset)
assert np.isclose(np.log(y + 1), log_dataset.y).all()
untrans_y = trans.untransform(log_dataset.y)
assert np.isclose(untrans_y, y).all()
def load_feat_multitask_data():
"""Load example with numerical features, tasks."""
current_dir = os.path.dirname(os.path.abspath(__file__))
features = ["feat0", "feat1", "feat2", "feat3", "feat4", "feat5"]
featurizer = dc.feat.UserDefinedFeaturizer(features)
tasks = ["task0", "task1", "task2", "task3", "task4", "task5"]
input_file = os.path.join(current_dir, "assets/feat_multitask_example.csv")
loader = dc.data.UserCSVLoader(tasks=tasks,
featurizer=featurizer,
id_field="id")
return loader.create_dataset(input_file)
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
def test_y_log_transformer():
"""Tests logarithmic data transformer."""
solubility_dataset = load_solubility_data()
log_transformer = dc.trans.LogTransformer(transform_y=True,
dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t, np.log(y + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_X_log_transformer():
"""Tests logarithmic data transformer."""
solubility_dataset = load_solubility_data()
log_transformer = dc.trans.LogTransformer(transform_X=True,
dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t, np.log(X + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_log_transformer_select():
"""Tests logarithmic data transformer with selection."""
current_dir = os.path.dirname(os.path.abspath(__file__))
multitask_dataset = load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(current_dir, "assets/feat_multitask_example.csv"))
tid = []
tasklist = ["task0", "task3", "task4", "task5"]
first_task = "task0"
for task in tasklist:
tiid = dfe.columns.get_loc(task) - dfe.columns.get_loc(first_task)
tid = np.concatenate((tid, np.array([tiid])))
tasks = tid.astype(int)
log_transformer = dc.trans.LogTransformer(transform_y=True,
tasks=tasks,
dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t[:, tasks], np.log(y[:, tasks] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_X_log_transformer_select():
# Tests logarithmic data transformer with selection.
current_dir = os.path.dirname(os.path.abspath(__file__))
multitask_dataset = load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(current_dir, "assets/feat_multitask_example.csv"))
fid = []
featurelist = ["feat0", "feat1", "feat2", "feat3", "feat5"]
first_feature = "feat0"
for feature in featurelist:
fiid = dfe.columns.get_loc(feature) - dfe.columns.get_loc(first_feature)
fid = np.concatenate((fid, np.array([fiid])))
features = fid.astype(int)
log_transformer = dc.trans.LogTransformer(transform_X=True,
features=features,
dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t[:, features], np.log(X[:, features] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
<file_sep>from deepchem.feat.vocabulary_builders.vocabulary_builder import VocabularyBuilder
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
import tokenizers
import transformers
class HuggingFaceVocabularyBuilder(VocabularyBuilder):
"""Wrapper for building vocabulary from HuggingFace algorithms
The wrapper class can be used to build huggingface vocabulary building algorithms
with DeepChem.
Parameters
----------
model: tokenizers.models.Model
The core tokenization algorithm to learn the vocabulary.
trainers: tokenizers.trainers.Trainer
The trainer capable of training the model
Example
-------
>>> from tokenizers import models, trainers
>>> from tokenizers.pre_tokenizers import Whitespace
>>> model = models.BPE(unk_token="[UNK]")
>>> special_tokens = ["[UNK]"]
>>> trainer = trainers.BpeTrainer(vocab_size=20, special_tokens=special_tokens)
>>> vb = HuggingFaceVocabularyBuilder(model=model, trainer=trainer)
"""
def __init__(self, model: 'tokenizers.models.Model',
trainer: 'tokenizers.trainers.Trainer'):
from tokenizers import Tokenizer
self.model = model
self.trainer = trainer
self.tokenizer = Tokenizer(model)
# superclass accepts a DeepChem dataset while huggingface vocabulary builders
# reads data from file
def build(self, paths: List[str]): # type: ignore
"""Trains the algorithm on the vocabulary
Parameters
----------
paths: List[str]
A list of file paths for training
"""
self.tokenizer.train(paths, self.trainer)
@classmethod
def load(
cls, fname: str
) -> 'transformers.tokenization_utils_fast.PreTrainedTokenizerFast':
"""Loads vocabulary from a vocabulary file
Parameters
----------
fname: str
Filename to load vocabulary from
"""
from transformers import PreTrainedTokenizerFast
tokenizer = PreTrainedTokenizerFast(tokenizer_file=fname)
return tokenizer
def save(self, fname: str) -> None:
"""Saves vocabulary to a file
Parameters
----------
fname: str
A json file path
"""
self.tokenizer.save(fname)
<file_sep>import os
import deepchem as dc
import numpy as np
def load_gaussian_cdf_data():
"""Load example with numbers sampled from Gaussian normal distribution.
Each feature and task is a column of values that is sampled
from a normal distribution of mean 0, stdev 1."""
current_dir = os.path.dirname(os.path.abspath(__file__))
features = ["feat0", "feat1"]
featurizer = dc.feat.UserDefinedFeaturizer(features)
tasks = ["task0", "task1"]
input_file = os.path.join(current_dir, "assets/gaussian_cdf_example.csv")
loader = dc.data.UserCSVLoader(tasks=tasks,
featurizer=featurizer,
id_field="id")
return loader.create_dataset(input_file)
def test_power_X_transformer():
"""Test Power transformer on Gaussian normal dataset."""
N = 10
n_feat = 2
powers = [1, 2, 0.5]
X = np.random.rand(N, n_feat)
y = np.random.normal(size=(N,))
gaussian_dataset = dc.data.NumpyDataset(X, y)
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(transform_X=True,
powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y.flatten(), y_t.flatten())
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values in each column.
np.testing.assert_allclose(X_t.shape[1], len(powers) * X.shape[1])
np.testing.assert_allclose(X, X_t[:, :2])
np.testing.assert_allclose(np.power(X, 2), X_t[:, 2:4])
np.testing.assert_allclose(np.power(X, 0.5), X_t[:, 4:])
def test_power_y_transformer():
"""Test Power transformer on Gaussian normal dataset."""
N = 10
n_feat = 2
powers = [1, 2, 0.5]
X = np.random.rand(N, n_feat)
y = np.random.rand(N)
gaussian_dataset = dc.data.NumpyDataset(X, y)
power_transformer = dc.trans.PowerTransformer(transform_y=True,
powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an X transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values in each column.
np.testing.assert_allclose(y_t.shape[1], len(powers))
np.testing.assert_allclose(y, y_t[:, :1].flatten())
np.testing.assert_allclose(np.power(y, 2), y_t[:, 1:2].flatten())
np.testing.assert_allclose(np.power(y, 0.5), y_t[:, 2:].flatten())
# Check that untransform does the right thing.
np.testing.assert_allclose(power_transformer.untransform(y_t).flatten(), y)
<file_sep>import os
import unittest
import numpy as np
from deepchem.utils.rdkit_utils import load_molecule
from deepchem.utils.rdkit_utils import compute_ring_center
from deepchem.utils.rdkit_utils import compute_ring_normal
from deepchem.utils.noncovalent_utils import is_pi_parallel
from deepchem.utils.noncovalent_utils import is_pi_t
from deepchem.utils.noncovalent_utils import compute_pi_stack
from deepchem.utils.noncovalent_utils import is_cation_pi
from deepchem.utils.noncovalent_utils import compute_cation_pi
from deepchem.utils.noncovalent_utils import compute_binding_pocket_cation_pi
class TestPiInteractions(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
# simple flat ring
from rdkit.Chem import MolFromSmiles
from rdkit.Chem.rdDepictor import Compute2DCoords
self.cycle4 = MolFromSmiles('C1CCC1')
# self.cycle4.Compute2DCoords()
Compute2DCoords(self.cycle4)
# load and sanitize two real molecules
_, self.prot = load_molecule(os.path.join(
current_dir, '../../feat/tests/data/3ws9_protein_fixer_rdkit.pdb'),
add_hydrogens=False,
calc_charges=False,
sanitize=True)
_, self.lig = load_molecule(os.path.join(
current_dir, '../../feat//tests/data/3ws9_ligand.sdf'),
add_hydrogens=False,
calc_charges=False,
sanitize=True)
def test_compute_ring_center(self):
self.assertTrue(
np.allclose(compute_ring_center(self.cycle4, range(4)), 0))
def test_compute_ring_normal(self):
normal = compute_ring_normal(self.cycle4, range(4))
self.assertTrue(
np.allclose(np.abs(normal / np.linalg.norm(normal)), [0, 0, 1]))
def test_is_pi_parallel(self):
ring1_center = np.array([0.0, 0.0, 0.0])
ring2_center_true = np.array([4.0, 0.0, 0.0])
ring2_center_false = np.array([10.0, 0.0, 0.0])
ring1_normal_true = np.array([1.0, 0.0, 0.0])
ring1_normal_false = np.array([0.0, 1.0, 0.0])
for ring2_normal in (np.array([2.0, 0, 0]), np.array([-3.0, 0, 0])):
# parallel normals
self.assertTrue(
is_pi_parallel(ring1_center, ring1_normal_true,
ring2_center_true, ring2_normal))
# perpendicular normals
self.assertFalse(
is_pi_parallel(ring1_center, ring1_normal_false,
ring2_center_true, ring2_normal))
# too far away
self.assertFalse(
is_pi_parallel(ring1_center, ring1_normal_true,
ring2_center_false, ring2_normal))
def test_is_pi_t(self):
ring1_center = np.array([0.0, 0.0, 0.0])
ring2_center_true = np.array([4.0, 0.0, 0.0])
ring2_center_false = np.array([10.0, 0.0, 0.0])
ring1_normal_true = np.array([0.0, 1.0, 0.0])
ring1_normal_false = np.array([1.0, 0.0, 0.0])
for ring2_normal in (np.array([2.0, 0, 0]), np.array([-3.0, 0, 0])):
# perpendicular normals
self.assertTrue(
is_pi_t(ring1_center, ring1_normal_true, ring2_center_true,
ring2_normal))
# parallel normals
self.assertFalse(
is_pi_t(ring1_center, ring1_normal_false, ring2_center_true,
ring2_normal))
# too far away
self.assertFalse(
is_pi_t(ring1_center, ring1_normal_true, ring2_center_false,
ring2_normal))
def test_compute_pi_stack(self):
# order of the molecules shouldn't matter
dicts1 = compute_pi_stack(self.prot, self.lig)
dicts2 = compute_pi_stack(self.lig, self.prot)
for i, j in ((0, 2), (1, 3)):
self.assertEqual(dicts1[i], dicts2[j])
self.assertEqual(dicts1[j], dicts2[i])
# with this criteria we should find both types of stacking
for d in compute_pi_stack(self.lig,
self.prot,
dist_cutoff=7,
angle_cutoff=40.):
self.assertGreater(len(d), 0)
def test_is_cation_pi(self):
cation_position = np.array([[2.0, 0.0, 0.0]])
ring_center_true = np.array([4.0, 0.0, 0.0])
ring_center_false = np.array([10.0, 0.0, 0.0])
ring_normal_true = np.array([1.0, 0.0, 0.0])
ring_normal_false = np.array([0.0, 1.0, 0.0])
# parallel normals
self.assertTrue(
is_cation_pi(cation_position, ring_center_true, ring_normal_true))
# perpendicular normals
self.assertFalse(
is_cation_pi(cation_position, ring_center_true, ring_normal_false))
# too far away
self.assertFalse(
is_cation_pi(cation_position, ring_center_false, ring_normal_true))
# def test_compute_cation_pi(self):
# # TODO(rbharath): find better example, currently dicts are empty
# dicts1 = compute_cation_pi(self.prot, self.lig)
# dicts2 = compute_cation_pi(self.lig, self.prot)
def test_compute_binding_pocket_cation_pi(self):
# TODO find better example, currently dicts are empty
prot_dict, lig_dict = compute_binding_pocket_cation_pi(
self.prot, self.lig)
exp_prot_dict, exp_lig_dict = compute_cation_pi(self.prot, self.lig)
add_lig, add_prot = compute_cation_pi(self.lig, self.prot)
for exp_dict, to_add in ((exp_prot_dict, add_prot), (exp_lig_dict,
add_lig)):
for atom_idx, count in to_add.items():
if atom_idx not in exp_dict:
exp_dict[atom_idx] = count
else:
exp_dict[atom_idx] += count
self.assertEqual(prot_dict, exp_prot_dict)
self.assertEqual(lig_dict, exp_lig_dict)
def test_compute_hydrogen_bonds(self):
pass
<file_sep>import deepchem as dc
import numpy as np
import pytest
try:
import torch
import deepchem.models.torch_models.layers as torch_layers
has_torch = True
except:
has_torch = False
@pytest.mark.torch
def test_weave_gather_without_compression():
"""Test invoking the torch equivalent of WeaveGather."""
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
from rdkit import Chem
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
atom_feat = []
atom_split = []
for im, mol in enumerate(mols):
n_atoms = mol.get_num_atoms()
atom_split.extend([im] * n_atoms)
# atom features
atom_feat.append(mol.get_atom_features())
inputs = [
np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),
np.array(atom_split)
]
torch.set_printoptions(precision=8)
# Try without compression
gather = torch_layers.WeaveGather(batch_size=2,
n_input=75,
gaussian_expand=True)
# Outputs should be [mol1_vec, mol2_vec]
outputs = gather(inputs)
assert len(outputs) == 2
assert np.array(outputs[0]).shape == (11 * 75,)
assert np.array(outputs[1]).shape == (11 * 75,)
assert np.allclose(
outputs.numpy(),
np.load(
"deepchem/models/tests/assets/weavegather_results_without_compression.npy"
),
atol=1e-4)
@pytest.mark.torch
def test_weave_gather_with_compression():
"""Test invoking the torch equivalent of WeaveGather."""
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
from rdkit import Chem
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
atom_feat = []
atom_split = []
for im, mol in enumerate(mols):
n_atoms = mol.get_num_atoms()
atom_split.extend([im] * n_atoms)
# atom features
atom_feat.append(mol.get_atom_features())
inputs = [
np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),
np.array(atom_split)
]
torch.set_printoptions(precision=8)
# Try with compression
gather = torch_layers.WeaveGather(batch_size=2,
n_input=75,
gaussian_expand=True,
compress_post_gaussian_expansion=True)
gather.W = torch.from_numpy(
np.load("deepchem/models/tests/assets/weavegather_weights.npy"))
# Outputs should be [mol1_vec, mol2_vec]
outputs = gather(inputs)
assert len(outputs) == 2
assert np.array(outputs[0]).shape == (75,)
assert np.array(outputs[1]).shape == (75,)
assert np.allclose(
outputs.numpy(),
np.load(
"deepchem/models/tests/assets/weavegather_results_with_compression.npy"
),
atol=1e-4)
<file_sep>Installation
============
Stable version
--------------
Install deepchem via pip or conda by simply running,
.. code-block:: bash
pip install deepchem
or
.. code-block:: bash
conda install -c conda-forge deepchem
Nightly build version
---------------------
The nightly version is built by the HEAD of DeepChem.
For using general utilites like Molnet, Featurisers, Datasets, etc, then, you install deepchem via pip.
.. code-block:: bash
pip install --pre deepchem
Deepchem provides support for tensorflow, pytorch, jax and each require
a individual pip Installation.
For using models with tensorflow dependencies, you install using
.. code-block:: bash
pip install --pre deepchem[tensorflow]
For using models with Pytorch dependencies, you install using
.. code-block:: bash
pip install --pre deepchem[torch]
For using models with Jax dependencies, you install using
.. code-block:: bash
pip install --pre deepchem[jax]
If GPU support is required, then make sure CUDA is installed and then install the desired deep learning framework using the links below before installing deepchem
1. tensorflow - just cuda installed
2. pytorch - https://pytorch.org/get-started/locally/#start-locally
3. jax - https://github.com/google/jax#pip-installation-gpu-cuda
In :code:`zsh` square brackets are used for globbing/pattern matching. This means
you need to escape the square brackets in the above installation. You can do so by
including the dependencies in quotes like :code:`pip install --pre 'deepchem[jax]'`
Note: Support for jax is not available in windows (jax is not officially supported in windows).
Google Colab
------------
The fastest way to get up and running with DeepChem is to run it on
Google Colab. Check out one of the `DeepChem Tutorials`_ or this
`forum post`_ for Colab quick start guides.
Docker
------
If you want to install using a docker,
you can pull two kinds of images from `DockerHub`_.
- **deepchemio/deepchem:x.x.x**
- Image built by using a conda (x.x.x is a version of deepchem)
- This image is built when we push x.x.x. tag
- Dockerfile is put in `docker/tag`_ directory
- **deepchemio/deepchem:latest**
- Image built from source codes
- This image is built every time we commit to the master branch
- Dockerfile is put in `docker/nightly`_ directory
First, you pull the image you want to use.
.. code-block:: bash
docker pull deepchemio/deepchem:latest
Then, you create a container based on the image.
.. code-block:: bash
docker run --rm -it deepchemio/deepchem:latest
If you want GPU support:
.. code-block:: bash
# If nvidia-docker is installed
nvidia-docker run --rm -it deepchemio/deepchem:latest
docker run --runtime nvidia --rm -it deepchemio/deepchem:latest
# If nvidia-container-toolkit is installed
docker run --gpus all --rm -it deepchemio/deepchem:latest
You are now in a docker container which deepchem was installed.
You can start playing with it in the command line.
.. code-block:: bash
(deepchem) root@xxxxxxxxxxxxx:~/mydir# python
Python 3.6.10 |Anaconda, Inc.| (default, May 8 2020, 02:54:21)
[GCC 7.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import deepchem as dc
If you want to check the tox21 benchmark:
.. code-block:: bash
# you can run our tox21 benchmark
(deepchem) root@xxxxxxxxxxxxx:~/mydir# wget https://raw.githubusercontent.com/deepchem/deepchem/master/examples/benchmark.py
(deepchem) root@xxxxxxxxxxxxx:~/mydir# python benchmark.py -d tox21 -m graphconv -s random
Jupyter Notebook
----------------------
**Installing via these steps will allow you to install and import DeepChem into a jupyter notebook within a conda virtual environment.**
**Prerequisite**
- Shell: Bash, Zsh, PowerShell
- Conda: >4.6
First, please create a conda virtual environment (here it's named "deepchem-test") and activate it.
.. code-block:: bash
conda create --name deepchem-test
conda activate deepchem-test
Install DeepChem, Jupyter and matplotlib into the conda environment.
.. code-block:: bash
conda install -y -c conda-forge nb_conda_kernels matplotlib
pip install tensorflow
pip install --pre deepchem
You may need to use :code:`pip3` depending on your Python 3 pip installation. Install pip dependencies after deepchem-test is activated.
While the deepchem-test environment is activated, open Jupyter Notebook by running :code:`jupyter notebook`. Your terminal prompt should be prefixed with (deepchem-test).
Once Jupyter Notebook opens in a browser, select the new button, and select the environment "Python[conda env:deepchem-test]." This will open a notebook running in the deepchem-test conda virtual environment.
From source with conda
----------------------
**Installing via these steps will ensure you are installing from the source**.
**Prerequisite**
- Shell: Bash, Zsh, PowerShell
- Conda: >4.6
First, please clone the deepchem repository from GitHub.
.. code-block:: bash
git clone https://github.com/deepchem/deepchem.git
cd deepchem
Then, execute the shell script. The shell scripts require two arguments,
**python version** and **gpu/cpu**.
.. code-block:: bash
source scripts/install_deepchem_conda.sh 3.8 cpu
If you want GPU support (we supports only CUDA 10.1):
.. code-block:: bash
source scripts/install_deepchem_conda.sh 3.8 gpu
If you are using the Windows and the PowerShell:
.. code-block:: ps1
.\scripts\install_deepchem_conda.ps1 3.7 cpu
| Sometimes, PowerShell scripts can't be executed due to problems in Execution Policies.
| In that case, you can either change the Execution policies or use the bypass argument.
.. code-block:: ps1
powershell -executionpolicy bypass -File .\scripts\install_deepchem_conda.ps1 3.7 cpu
| Before activating deepchem environment, make sure conda has been initialized.
| Check if there is a :code:`(XXXX)` in your command line.
| If not, use :code:`conda init <YOUR_SHELL_NAME>` to activate it, then:
.. code-block:: bash
conda activate deepchem
pip install -e .
pytest -m "not slow" deepchem # optional
From source lightweight guide
-------------------------------------
**Installing via these steps will ensure you are installing from the source**.
**Prerequisite**
- Shell: Bash, Zsh, PowerShell
- Conda: >4.6
First, please clone the deepchem repository from GitHub.
.. code-block:: bash
git clone https://github.com/deepchem/deepchem.git
cd deepchem
We would advise all users to use conda environment, following below-
.. code-block:: bash
conda create --name deepchem python=3.8
conda activate deepchem
pip install -e .
DeepChem provides diffrent additional packages depending on usage & contribution
If one also wants to build the tensorflow environment, add this
.. code-block:: bash
pip install -e .[tensorflow]
If one also wants to build the Pytorch environment, add this
.. code-block:: bash
pip install -e .[torch]
If one also wants to build the Jax environment, add this
.. code-block:: bash
pip install -e .[jax]
DeepChem has soft requirements, which can be installed on the fly during development inside the environment
but if you want to install all the soft-dependencies at once, then take a look at
`deepchem/requirements <https://github.com/deepchem/deepchem/tree/master/requirements>`_
.. _`DeepChem Tutorials`: https://github.com/deepchem/deepchem/tree/master/examples/tutorials
.. _`forum post`: https://forum.deepchem.io/t/getting-deepchem-running-in-colab/81/7
.. _`DockerHub`: https://hub.docker.com/repository/docker/deepchemio/deepchem
.. _`docker/conda-forge`: https://github.com/deepchem/deepchem/tree/master/docker/conda-forge
.. _`docker/master`: https://github.com/deepchem/deepchem/tree/master/docker/master
<file_sep>"""Evaluation metrics."""
import numpy as np
import scipy.stats
from sklearn.metrics import matthews_corrcoef # noqa
from sklearn.metrics import recall_score # noqa
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import r2_score # noqa
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import precision_score # noqa
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score # noqa
from sklearn.metrics import accuracy_score # noqa
from sklearn.metrics import balanced_accuracy_score # noqa
from sklearn.metrics import top_k_accuracy_score # noqa
# kappa_score is an alias for `sklearn.metrics.cohen_kappa_score`
kappa_score = cohen_kappa_score
def pearsonr(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes Pearson correlation coefficient.
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
float
The Pearson correlation coefficient.
"""
return scipy.stats.pearsonr(y, y_pred)[0]
def pearson_r2_score(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes Pearson R^2 (square of Pearson correlation).
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
float
The Pearson-R^2 score.
"""
return scipy.stats.pearsonr(y, y_pred)[0]**2
def jaccard_index(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes Jaccard Index which is the Intersection Over Union metric
which is commonly used in image segmentation tasks.
DEPRECATED: WILL BE REMOVED IN A FUTURE VERSION OF DEEEPCHEM. USE `jaccard_score` instead.
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
score: float
The jaccard index. A number between 0 and 1.
"""
return jaccard_score(y, y_pred)
def pixel_error(y: np.ndarray, y_pred: np.ndarray) -> float:
"""An error metric in case y, y_pred are images.
Defined as 1 - the maximal F-score of pixel similarity, or squared
Euclidean distance between the original and the result labels.
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
score: float
The pixel-error. A number between 0 and 1.
"""
return 1 - f1_score(y, y_pred)
def prc_auc_score(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Compute area under precision-recall curve
Parameters
----------
y: np.ndarray
A numpy array of shape `(N, n_classes)` or `(N,)` with true labels
y_pred: np.ndarray
Of shape `(N, n_classes)` with class probabilities.
Returns
-------
float
The area under the precision-recall curve. A number between 0 and 1.
"""
precision, recall, _ = precision_recall_curve(y[:, 1], y_pred[:, 1])
return auc(recall, precision)
def rms_score(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes RMS error."""
return np.sqrt(mean_squared_error(y_true, y_pred))
def mae_score(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes MAE."""
return mean_absolute_error(y_true, y_pred)
def bedroc_score(y_true: np.ndarray, y_pred: np.ndarray, alpha: float = 20.0):
"""Compute BEDROC metric.
BEDROC metric implemented according to Truchon and Bayley that modifies
the ROC score by allowing for a factor of early recognition.
Please confirm details from [1]_.
Parameters
----------
y_true: np.ndarray
Binary class labels. 1 for positive class, 0 otherwise
y_pred: np.ndarray
Predicted labels
alpha: float, default 20.0
Early recognition parameter
Returns
-------
float
Value in [0, 1] that indicates the degree of early recognition
Notes
-----
This function requires RDKit to be installed.
References
----------
.. [1] Truchon et al. "Evaluating virtual screening methods: good and bad metrics
for the “early recognition” problem." Journal of chemical information and modeling
47.2 (2007): 488-508.
"""
try:
from rdkit.ML.Scoring.Scoring import CalcBEDROC
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
# validation
assert len(y_true) == len(y_pred), 'Number of examples do not match'
assert np.array_equal(np.unique(y_true).astype(int),
[0, 1]), ('Class labels must be binary: %s' %
np.unique(y_true))
yt = np.asarray(y_true)
yp = np.asarray(y_pred)
yt = yt.flatten()
yp = yp[:, 1].flatten() # Index 1 because one_hot predictions
scores = list(zip(yt, yp))
scores = sorted(scores, key=lambda pair: pair[1], reverse=True)
return CalcBEDROC(scores, 0, alpha)
def concordance_index(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Compute Concordance index.
Statistical metric indicates the quality of the predicted ranking.
Please confirm details from [1]_.
Parameters
----------
y_true: np.ndarray
continous value
y_pred: np.ndarray
Predicted value
Returns
-------
float
score between [0,1]
References
----------
.. [1] Steck, Harald, et al. "On ranking in survival analysis:
Bounds on the concordance index." Advances in neural information processing systems (2008): 1209-1216.
"""
idx = np.argsort(y_true)
y_true = y_true[idx]
y_pred = y_pred[idx]
pairs = 0
correct_pairs = 0.0
for i in range(len(y_true)):
true_a = y_true[i]
pred_a = y_pred[i]
for j in range(i + 1, len(y_true)):
true_b = y_true[j]
pred_b = y_pred[j]
if true_a != true_b:
pairs += 1
if pred_a == pred_b:
correct_pairs += 0.5
elif pred_a < pred_b:
correct_pairs += true_a < true_b
else:
correct_pairs += true_a > true_b
assert pairs > 0, 'No pairs for comparision'
return correct_pairs / pairs
<file_sep>import os
import numpy as np
import deepchem as dc
def load_gaussian_cdf_data():
"""Load example with numbers sampled from Gaussian normal distribution.
Each feature and task is a column of values that is sampled
from a normal distribution of mean 0, stdev 1."""
current_dir = os.path.dirname(os.path.abspath(__file__))
features = ["feat0", "feat1"]
featurizer = dc.feat.UserDefinedFeaturizer(features)
tasks = ["task0", "task1"]
input_file = os.path.join(current_dir, "assets/gaussian_cdf_example.csv")
loader = dc.data.UserCSVLoader(tasks=tasks,
featurizer=featurizer,
id_field="id")
return loader.create_dataset(input_file)
def test_cdf_X_transformer():
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(transform_X=True,
dataset=gaussian_dataset,
bins=bins)
_, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
sorted = np.sort(X_t, axis=0)
np.testing.assert_allclose(sorted, target)
def test_cdf_1d_y_transformer():
"""Test on a synthetic dataset we sample with 1d y."""
N = 10
n_feat = 5
n_bins = 100
X = np.random.normal(size=(N, n_feat))
y = np.random.normal(size=(N,))
dataset = dc.data.NumpyDataset(X, y)
cdftrans = dc.trans.CDFTransformer(transform_y=True,
dataset=dataset,
bins=n_bins)
dataset = cdftrans.transform(dataset)
def test_cdf_y_transformer():
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(transform_y=True,
dataset=gaussian_dataset,
bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
sorted = np.sort(y_t, axis=0)
np.testing.assert_allclose(sorted, target)
# Check that untransform does the right thing.
y_restored = cdf_transformer.untransform(y_t)
assert np.max(y_restored - y) < 1e-5
<file_sep># -*- coding: utf-8 -*-
"""
Created on Mon Mar 06 14:25:40 2017
@author: <NAME>
"""
import os
import time
import csv
import numpy as np
import deepchem
import pickle
from deepchem.molnet.run_benchmark_models import benchmark_classification, benchmark_regression
from deepchem.molnet.check_availability import CheckFeaturizer, CheckSplit
from deepchem.molnet.preset_hyper_parameters import hps
def run_benchmark(datasets,
model,
split=None,
metric=None,
direction=True,
featurizer=None,
n_features=0,
out_path='.',
hyper_parameters=None,
hyper_param_search=False,
max_iter=20,
search_range=2,
test=False,
reload=True,
seed=123):
"""
Run benchmark test on designated datasets with deepchem(or user-defined) model
Parameters
----------
datasets: list of string
choice of which datasets to use, should be: bace_c, bace_r, bbbp, chembl,
clearance, clintox, delaney, hiv, hopv, kaggle, lipo, muv, nci, pcba,
pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl, sider, tox21, toxcast, uv, factors,
kinase
model: string or user-defined model stucture
choice of which model to use, deepchem provides implementation of
logistic regression, random forest, multitask network,
bypass multitask network, irv, graph convolution;
for user define model, it should include function: fit, evaluate
split: string, optional (default=None)
choice of splitter function, None = using the default splitter
metric: string, optional (default=None)
choice of evaluation metrics, None = using the default metrics(AUC & R2)
direction: bool, optional(default=True)
Optimization direction when doing hyperparameter search
Maximization(True) or minimization(False)
featurizer: string or dc.feat.Featurizer, optional (default=None)
choice of featurization, None = using the default corresponding to model
(string only applicable to deepchem models)
n_features: int, optional(default=0)
depending on featurizers, redefined when using deepchem featurizers,
need to be specified for user-defined featurizers(if using deepchem models)
out_path: string, optional(default='.')
path of result file
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
hyper_param_search: bool, optional(default=False)
whether to perform hyper parameter search, using gaussian process by default
max_iter: int, optional(default=20)
number of optimization trials
search_range: int(float), optional(default=4)
optimization on [initial values / search_range,
initial values * search_range]
test: boolean, optional(default=False)
whether to evaluate on test set
reload: boolean, optional(default=True)
whether to save and reload featurized datasets
"""
for dataset in datasets:
if dataset in [
'bace_c', 'bbbp', 'clintox', 'hiv', 'muv', 'pcba', 'pcba_146',
'pcba_2475', 'sider', 'tox21', 'toxcast'
]:
mode = 'classification'
if metric is None:
metric = [
deepchem.metrics.Metric(deepchem.metrics.roc_auc_score,
np.mean),
]
elif dataset in [
'bace_r', 'chembl', 'clearance', 'delaney', 'hopv', 'kaggle',
'lipo', 'nci', 'pdbbind', 'ppb', 'qm7', 'qm7b', 'qm8', 'qm9',
'sampl', 'thermosol'
]:
mode = 'regression'
if metric is None:
metric = [
deepchem.metrics.Metric(deepchem.metrics.pearson_r2_score,
np.mean)
]
else:
raise ValueError('Dataset not supported')
if featurizer is None and isinstance(model, str):
# Assigning featurizer if not user defined
pair = (dataset, model)
if pair in CheckFeaturizer:
featurizer = CheckFeaturizer[pair][0]
n_features = CheckFeaturizer[pair][1]
else:
continue
if split not in [None] + CheckSplit[dataset]:
continue
loading_functions = {
'bace_c': deepchem.molnet.load_bace_classification,
'bace_r': deepchem.molnet.load_bace_regression,
'bbbp': deepchem.molnet.load_bbbp,
'chembl': deepchem.molnet.load_chembl,
'clearance': deepchem.molnet.load_clearance,
'clintox': deepchem.molnet.load_clintox,
'delaney': deepchem.molnet.load_delaney,
'factors': deepchem.molnet.load_factors,
'hiv': deepchem.molnet.load_hiv,
'hopv': deepchem.molnet.load_hopv,
'hppb': deepchem.molnet.load_hppb,
'kaggle': deepchem.molnet.load_kaggle,
'kinase': deepchem.molnet.load_kinase,
'lipo': deepchem.molnet.load_lipo,
'muv': deepchem.molnet.load_muv,
'nci': deepchem.molnet.load_nci,
'pcba': deepchem.molnet.load_pcba,
'pdbbind': deepchem.molnet.load_pdbbind,
'ppb': deepchem.molnet.load_ppb,
'qm7': deepchem.molnet.load_qm7,
'qm8': deepchem.molnet.load_qm8,
'qm9': deepchem.molnet.load_qm9,
'sampl': deepchem.molnet.load_sampl,
'sider': deepchem.molnet.load_sider,
'thermosol': deepchem.molnet.load_thermosol,
'tox21': deepchem.molnet.load_tox21,
'toxcast': deepchem.molnet.load_toxcast,
'uv': deepchem.molnet.load_uv,
}
print('-------------------------------------')
print('Benchmark on dataset: %s' % dataset)
print('-------------------------------------')
# loading datasets
if split is not None:
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, split=split, reload=reload)
else:
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, reload=reload)
train_dataset, valid_dataset, test_dataset = all_dataset
time_start_fitting = time.time()
train_score = {}
valid_score = {}
test_score = {}
if hyper_param_search:
if hyper_parameters is None:
hyper_parameters = hps[model]
search_mode = deepchem.hyper.GaussianProcessHyperparamOpt(model)
hyper_param_opt, _ = search_mode.hyperparam_search(
hyper_parameters,
train_dataset,
valid_dataset,
transformers,
metric,
direction=direction,
n_features=n_features,
n_tasks=len(tasks),
max_iter=max_iter,
search_range=search_range)
hyper_parameters = hyper_param_opt
if isinstance(model, str):
if mode == 'classification':
train_score, valid_score, test_score = benchmark_classification(
train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=test,
hyper_parameters=hyper_parameters,
seed=seed)
elif mode == 'regression':
train_score, valid_score, test_score = benchmark_regression(
train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=test,
hyper_parameters=hyper_parameters,
seed=seed)
else:
model.fit(train_dataset)
train_score['user_defined'] = model.evaluate(
train_dataset, metric, transformers)
valid_score['user_defined'] = model.evaluate(
valid_dataset, metric, transformers)
if test:
test_score['user_defined'] = model.evaluate(
test_dataset, metric, transformers)
time_finish_fitting = time.time()
with open(os.path.join(out_path, 'results.csv'), 'a') as f:
writer = csv.writer(f)
model_name = list(train_score.keys())[0]
for i in train_score[model_name]:
output_line = [
dataset,
str(split), mode, model_name, i, 'train',
train_score[model_name][i], 'valid',
valid_score[model_name][i]
]
if test:
output_line.extend(['test', test_score[model_name][i]])
output_line.extend([
'time_for_running', time_finish_fitting - time_start_fitting
])
writer.writerow(output_line)
if hyper_param_search:
with open(os.path.join(out_path, dataset + model + '.pkl'),
'w') as f:
pickle.dump(hyper_parameters, f)
#
# Note by @XericZephyr. Reason why I spun off this function:
# 1. Some model needs dataset information.
# 2. It offers us possibility to **cache** the dataset
# if the featurizer runs very slow, e.g., GraphConv.
# 2+. The cache can even happen at Travis CI to accelerate
# CI testing.
#
def load_dataset(dataset, featurizer, split='random'):
"""
Load specific dataset for benchmark.
Parameters
----------
dataset: string
choice of which datasets to use, should be: tox21, muv, sider,
toxcast, pcba, delaney, factors, hiv, hopv, kaggle, kinase, nci,
clintox, hiv, pcba_128, pcba_146, pdbbind, chembl, qm7, qm7b, qm9,
sampl, uv
featurizer: string or dc.feat.Featurizer.
choice of featurization.
split: string, optional (default=None)
choice of splitter function, None = using the default splitter
"""
dataset_loading_functions = {
'bace_c': deepchem.molnet.load_bace_classification,
'bace_r': deepchem.molnet.load_bace_regression,
'bbbp': deepchem.molnet.load_bbbp,
'chembl': deepchem.molnet.load_chembl,
'clearance': deepchem.molnet.load_clearance,
'clintox': deepchem.molnet.load_clintox,
'delaney': deepchem.molnet.load_delaney,
'factors': deepchem.molnet.load_factors,
'hiv': deepchem.molnet.load_hiv,
'hopv': deepchem.molnet.load_hopv,
'hppb': deepchem.molnet.load_hppb,
'kaggle': deepchem.molnet.load_kaggle,
'kinase': deepchem.molnet.load_kinase,
'lipo': deepchem.molnet.load_lipo,
'muv': deepchem.molnet.load_muv,
'nci': deepchem.molnet.load_nci,
'pcba': deepchem.molnet.load_pcba,
'pcba_128': deepchem.molnet.load_pcba_128,
'pcba_146': deepchem.molnet.load_pcba_146,
'pcba_2475': deepchem.molnet.load_pcba_2475,
'pdbbind': deepchem.molnet.load_pdbbind,
'ppb': deepchem.molnet.load_ppb,
'qm7': deepchem.molnet.load_qm7,
'qm8': deepchem.molnet.load_qm8,
'qm9': deepchem.molnet.load_qm9,
'sampl': deepchem.molnet.load_sampl,
'sider': deepchem.molnet.load_sider,
'thermosol': deepchem.molnet.load_thermosol,
'tox21': deepchem.molnet.load_tox21,
'toxcast': deepchem.molnet.load_toxcast,
'uv': deepchem.molnet.load_uv
}
print('-------------------------------------')
print('Loading dataset: %s' % dataset)
print('-------------------------------------')
# loading datasets
if split is not None:
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = dataset_loading_functions[dataset](
featurizer=featurizer, split=split)
return tasks, all_dataset, transformers
def benchmark_model(model, all_dataset, transformers, metric, test=False):
"""
Benchmark custom model.
model: user-defined model stucture
For user define model, it should include function: fit, evaluate.
all_dataset: (train, test, val) data tuple.
Returned by `load_dataset` function.
transformers
metric: string
choice of evaluation metrics.
"""
time_start_fitting = time.time()
train_score = .0
valid_score = .0
test_score = .0
train_dataset, valid_dataset, test_dataset = all_dataset
model.fit(train_dataset)
train_score = model.evaluate(train_dataset, metric, transformers)
valid_score = model.evaluate(valid_dataset, metric, transformers)
if test:
test_score = model.evaluate(test_dataset, metric, transformers)
time_finish_fitting = time.time()
time_for_running = time_finish_fitting - time_start_fitting
return train_score, valid_score, test_score, time_for_running
<file_sep>"""
Script that trains sklearn models on HOPV dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
from deepchem.molnet import load_hopv
from sklearn.ensemble import RandomForestRegressor
# Only for debug!
np.random.seed(123)
# Load HOPV dataset
hopv_tasks, hopv_datasets, transformers = load_hopv()
(train_dataset, valid_dataset, test_dataset) = hopv_datasets
# Fit models
metric = [
dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean, mode="regression"),
dc.metrics.Metric(
dc.metrics.mean_absolute_error, np.mean, mode="regression")
]
def model_builder(model_dir):
sklearn_model = RandomForestRegressor(n_estimators=500)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(hopv_tasks, model_builder)
# Fit trained model
print("About to fit model")
model.fit(train_dataset)
model.save()
print("About to evaluate model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import unittest
from deepchem.feat.molecule_featurizers import MolGanFeaturizer
from deepchem.feat.molecule_featurizers import GraphMatrix
class TestMolganFeaturizer(unittest.TestCase):
def test_featurizer_smiles(self):
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This method requires RDKit to be installed.")
smiles = [
'Cc1ccccc1CO', 'CC1CCC(C)C(N)C1', 'CCC(N)=O', 'Fc1cccc(F)c1',
'CC(C)F', 'C1COC2NCCC2C1', 'C1=NCc2ccccc21'
]
invalid_smiles = ['axa', 'xyz', 'inv']
featurizer = MolGanFeaturizer()
valid_data = featurizer.featurize(smiles)
invalid_data = featurizer.featurize(invalid_smiles)
# test featurization
valid_graphs = list(
filter(lambda x: isinstance(x, GraphMatrix), valid_data))
invalid_graphs = list(
filter(lambda x: not isinstance(x, GraphMatrix), invalid_data))
assert len(valid_graphs) == len(smiles)
assert len(invalid_graphs) == len(invalid_smiles)
# test defeaturization
valid_mols = featurizer.defeaturize(valid_graphs)
invalid_mols = featurizer.defeaturize(invalid_graphs)
valid_mols = list(
filter(lambda x: isinstance(x, Chem.rdchem.Mol), valid_mols))
invalid_mols = list(
filter(lambda x: not isinstance(x, Chem.rdchem.Mol), invalid_mols))
assert len(valid_graphs) == len(valid_mols)
assert len(invalid_graphs) == len(invalid_mols)
mols = list(map(Chem.MolFromSmiles, smiles))
redone_smiles = list(map(Chem.MolToSmiles, mols))
# sanity check; see if something weird does not happen with rdkit
assert redone_smiles == smiles
# check if original smiles match defeaturized smiles
defe_smiles = list(map(Chem.MolToSmiles, valid_mols))
assert defe_smiles == smiles
def test_featurizer_rdkit(self):
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This method requires RDKit to be installed.")
smiles = [
'Cc1ccccc1CO', 'CC1CCC(C)C(N)C1', 'CCC(N)=O', 'Fc1cccc(F)c1',
'CC(C)F', 'C1COC2NCCC2C1', 'C1=NCc2ccccc21'
]
invalid_smiles = ['axa', 'xyz', 'inv']
valid_molecules = list(map(Chem.MolFromSmiles, smiles))
invalid_molecules = list(map(Chem.MolFromSmiles, invalid_smiles))
redone_smiles = list(map(Chem.MolToSmiles, valid_molecules))
# sanity check; see if something weird does not happen with rdkit
assert redone_smiles == smiles
featurizer = MolGanFeaturizer()
valid_data = featurizer.featurize(valid_molecules)
invalid_data = featurizer.featurize(invalid_molecules)
# test featurization
valid_graphs = list(
filter(lambda x: isinstance(x, GraphMatrix), valid_data))
invalid_graphs = list(
filter(lambda x: not isinstance(x, GraphMatrix), invalid_data))
assert len(valid_graphs) == len(valid_molecules)
assert len(invalid_graphs) == len(invalid_molecules)
# test defeaturization
valid_mols = featurizer.defeaturize(valid_graphs)
invalid_mols = featurizer.defeaturize(invalid_graphs)
valid_mols = list(
filter(lambda x: isinstance(x, Chem.rdchem.Mol), valid_mols))
invalid_mols = list(
filter(lambda x: not isinstance(x, Chem.rdchem.Mol), invalid_mols))
assert len(valid_mols) == len(valid_graphs)
assert len(invalid_mols) == len(invalid_graphs)
# check if original smiles match defeaturized smiles
defe_smiles = list(map(Chem.MolToSmiles, valid_mols))
assert defe_smiles == smiles
if __name__ == '__main__':
unittest.main()
<file_sep>import os
import math
import deepchem as dc
import numpy as np
import unittest
import pytest
try:
import wandb # noqa: F401
has_wandb = True
except:
has_wandb = False
try:
import tensorflow as tf
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def test_overfit_graph_model():
"""Test fitting a KerasModel defined as a graph."""
n_data_points = 10
n_features = 2
np.random.seed(1234)
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
inputs = tf.keras.Input(shape=(n_features,))
hidden = tf.keras.layers.Dense(10, activation='relu')(inputs)
logits = tf.keras.layers.Dense(1)(hidden)
outputs = tf.keras.layers.Activation('sigmoid')(logits)
keras_model = tf.keras.Model(inputs=inputs, outputs=[outputs, logits])
model = dc.models.KerasModel(keras_model,
dc.models.losses.SigmoidCrossEntropy(),
output_types=['prediction', 'loss'],
learning_rate=0.005)
model.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(model.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
scores = model.evaluate(dataset, [metric])
assert scores[metric.name] > 0.9
# Check that predicting internal layers works.
pred_logits = np.squeeze(model.predict_on_batch(X, outputs=logits))
pred_from_logits = 1.0 / (1.0 + np.exp(-pred_logits))
assert np.allclose(prediction, pred_from_logits, atol=1e-4)
@pytest.mark.tensorflow
def test_overfit_sequential_model():
"""Test fitting a KerasModel defined as a sequential model."""
n_data_points = 10
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model = dc.models.KerasModel(keras_model,
dc.models.losses.BinaryCrossEntropy(),
learning_rate=0.005)
model.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(model.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
generator = model.default_generator(dataset, pad_batches=False)
scores = model.evaluate_generator(generator, [metric])
assert scores[metric.name] > 0.9
@pytest.mark.tensorflow
def test_fit_use_all_losses():
"""Test fitting a KerasModel and getting a loss curve back."""
n_data_points = 10
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model = dc.models.KerasModel(keras_model,
dc.models.losses.BinaryCrossEntropy(),
learning_rate=0.005,
log_frequency=10)
losses = []
model.fit(dataset, nb_epoch=1000, all_losses=losses)
# Each epoch is a single step for this model
assert len(losses) == 100
assert np.count_nonzero(np.array(losses)) == 100
@pytest.mark.tensorflow
def test_fit_on_batch():
"""Test fitting a KerasModel to individual batches."""
n_data_points = 10
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model = dc.models.KerasModel(keras_model,
dc.models.losses.BinaryCrossEntropy(),
learning_rate=0.005)
i = 0
for X, y, w, ids in dataset.iterbatches(model.batch_size, 500):
i += 1
model.fit_on_batch(X, y, w, checkpoint=False)
prediction = np.squeeze(model.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
generator = model.default_generator(dataset, pad_batches=False)
scores = model.evaluate_generator(generator, [metric])
assert scores[metric.name] > 0.9
@pytest.mark.tensorflow
def test_checkpointing():
"""Test loading and saving checkpoints with KerasModel."""
# Create two models using the same model directory.
keras_model1 = tf.keras.Sequential([tf.keras.layers.Dense(10)])
keras_model2 = tf.keras.Sequential([tf.keras.layers.Dense(10)])
model1 = dc.models.KerasModel(keras_model1, dc.models.losses.L2Loss())
model2 = dc.models.KerasModel(keras_model2,
dc.models.losses.L2Loss(),
model_dir=model1.model_dir)
# Check that they produce different results.
X = np.random.rand(5, 5)
y1 = model1.predict_on_batch(X)
y2 = model2.predict_on_batch(X)
assert not np.array_equal(y1, y2)
# Save a checkpoint from the first model and load it into the second one,
# and make sure they now match.
model1.save_checkpoint()
model2.restore()
y3 = model1.predict_on_batch(X)
y4 = model2.predict_on_batch(X)
assert np.array_equal(y1, y3)
assert np.array_equal(y1, y4)
@pytest.mark.tensorflow
def test_fit_restore():
"""Test specifying restore=True when calling fit()."""
n_data_points = 10
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
# Train a model to overfit the dataset.
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model = dc.models.KerasModel(keras_model,
dc.models.losses.BinaryCrossEntropy(),
learning_rate=0.005)
model.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(model.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
# Create an identical model, do a single step of fitting with restore=True,
# and make sure it got restored correctly.
keras_model2 = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model2 = dc.models.KerasModel(keras_model2,
dc.models.losses.BinaryCrossEntropy(),
model_dir=model.model_dir)
model2.fit(dataset, nb_epoch=1, restore=True)
prediction = np.squeeze(model2.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
@pytest.mark.tensorflow
def test_uncertainty():
"""Test estimating uncertainty a KerasModel."""
n_samples = 30
n_features = 1
noise = 0.1
X = np.random.rand(n_samples, n_features)
y = (10 * X + np.random.normal(scale=noise, size=(n_samples, n_features)))
dataset = dc.data.NumpyDataset(X, y)
# Build a model that predicts uncertainty.
inputs = tf.keras.Input(shape=(n_features,))
switch = tf.keras.Input(shape=tuple())
hidden = tf.keras.layers.Dense(200, activation='relu')(inputs)
dropout = dc.models.layers.SwitchedDropout(rate=0.1)([hidden, switch])
output = tf.keras.layers.Dense(n_features)(dropout)
log_var = tf.keras.layers.Dense(n_features)(dropout)
var = tf.keras.layers.Activation(tf.exp)(log_var)
keras_model = tf.keras.Model(inputs=[inputs, switch],
outputs=[output, var, output, log_var])
def loss(outputs, labels, weights):
diff = labels[0] - outputs[0]
log_var = outputs[1]
var = tf.exp(log_var)
return tf.reduce_mean(diff * diff / var + log_var)
class UncertaintyModel(dc.models.KerasModel):
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if mode == 'predict':
dropout = np.array(0.0)
else:
dropout = np.array(1.0)
yield ([X_b, dropout], [y_b], [w_b])
model = UncertaintyModel(
keras_model,
loss,
output_types=['prediction', 'variance', 'loss', 'loss'],
learning_rate=0.003)
# Fit the model and see if its predictions are correct.
model.fit(dataset, nb_epoch=2500)
pred, std = model.predict_uncertainty(dataset)
assert np.mean(np.abs(y - pred)) < 1.0
assert noise < np.mean(std) < 1.0
@pytest.mark.tensorflow
def test_saliency_mapping():
"""Test computing a saliency map."""
n_tasks = 3
n_features = 5
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(20, activation='tanh'),
tf.keras.layers.Dense(n_tasks)
])
model = dc.models.KerasModel(keras_model, dc.models.losses.L2Loss())
x = np.random.random(n_features)
s = model.compute_saliency(x)
assert s.shape[0] == n_tasks
assert s.shape[1] == n_features
# Take a tiny step in the direction of s and see if the output changes by
# the expected amount.
delta = 0.01
for task in range(n_tasks):
norm = np.sqrt(np.sum(s[task]**2))
step = 0.5 * delta / norm
pred1 = model.predict_on_batch((x + s[task] * step).reshape(
(1, n_features))).flatten()
pred2 = model.predict_on_batch((x - s[task] * step).reshape(
(1, n_features))).flatten()
assert np.allclose(pred1[task], (pred2 + norm * delta)[task])
@pytest.mark.tensorflow
def test_saliency_shapes():
"""Test computing saliency maps for multiple outputs with multiple dimensions."""
inputs = tf.keras.Input(shape=(2, 3))
flatten = tf.keras.layers.Flatten()(inputs)
output1 = tf.keras.layers.Reshape((4, 1))(tf.keras.layers.Dense(4)(flatten))
output2 = tf.keras.layers.Reshape((1, 5))(tf.keras.layers.Dense(5)(flatten))
keras_model = tf.keras.Model(inputs=inputs, outputs=[output1, output2])
model = dc.models.KerasModel(keras_model, dc.models.losses.L2Loss())
x = np.random.random((2, 3))
s = model.compute_saliency(x)
assert len(s) == 2
assert s[0].shape == (4, 1, 2, 3)
assert s[1].shape == (1, 5, 2, 3)
@pytest.mark.tensorflow
def test_tensorboard():
"""Test logging to Tensorboard."""
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = [[0.0, 1.0] for x in range(n_data_points)]
dataset = dc.data.NumpyDataset(X, y)
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(2, activation='softmax'),
])
model = dc.models.KerasModel(keras_model,
dc.models.losses.CategoricalCrossEntropy(),
tensorboard=True,
log_frequency=1)
model.fit(dataset, nb_epoch=10)
files_in_dir = os.listdir(model.model_dir)
event_file = list(filter(lambda x: x.startswith("events"), files_in_dir))
assert len(event_file) > 0
event_file = os.path.join(model.model_dir, event_file[0])
file_size = os.stat(event_file).st_size
assert file_size > 0
@pytest.mark.tensorflow
@unittest.skipIf(not has_wandb, 'Wandb is not installed')
def test_wandblogger():
"""Test logging to Weights & Biases."""
# Load dataset and Models
tasks, datasets, transformers = dc.molnet.load_delaney(featurizer='ECFP',
splitter='random')
train_dataset, valid_dataset, test_dataset = datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
wandblogger = dc.models.WandbLogger(anonymous="allow",
save_run_history=True)
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1)
])
model = dc.models.KerasModel(keras_model,
dc.models.losses.L2Loss(),
wandb_logger=wandblogger)
vc_train = dc.models.ValidationCallback(train_dataset, 1, [metric])
vc_valid = dc.models.ValidationCallback(valid_dataset, 1, [metric])
model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid])
# call model.fit again to test multiple fit() calls
model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid])
wandblogger.finish()
run_data = wandblogger.run_history
valid_score = model.evaluate(valid_dataset, [metric], transformers)
assert math.isclose(valid_score["pearson_r2_score"],
run_data['eval/pearson_r2_score_(1)'],
abs_tol=0.0005)
@pytest.mark.tensorflow
def test_fit_variables():
"""Test training a subset of the variables in a model."""
class VarModel(tf.keras.Model):
def __init__(self, **kwargs):
super(VarModel, self).__init__(**kwargs)
self.var1 = tf.Variable([0.5])
self.var2 = tf.Variable([0.5])
def call(self, inputs, training=False):
return [self.var1, self.var2]
def loss(outputs, labels, weights):
return (outputs[0] * outputs[1] - labels[0])**2
keras_model = VarModel()
model = dc.models.KerasModel(keras_model, loss, learning_rate=0.01)
x = np.ones((1, 1))
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 0.5)
assert np.allclose(vars[1], 0.5)
model.fit_generator([(x, x, x)] * 300)
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 1.0)
assert np.allclose(vars[1], 1.0)
model.fit_generator([(x, 2 * x, x)] * 300, variables=[keras_model.var1])
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 2.0)
assert np.allclose(vars[1], 1.0)
model.fit_generator([(x, x, x)] * 300, variables=[keras_model.var2])
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 2.0)
assert np.allclose(vars[1], 0.5)
@pytest.mark.tensorflow
def test_fit_loss():
"""Test specifying a different loss function when calling fit()."""
class VarModel(tf.keras.Model):
def __init__(self, **kwargs):
super(VarModel, self).__init__(**kwargs)
self.var1 = tf.Variable([0.5])
self.var2 = tf.Variable([0.5])
def call(self, inputs, training=False):
return [self.var1, self.var2]
def loss1(outputs, labels, weights):
return (outputs[0] * outputs[1] - labels[0])**2
def loss2(outputs, labels, weights):
return (outputs[0] + outputs[1] - labels[0])**2
keras_model = VarModel()
model = dc.models.KerasModel(keras_model, loss1, learning_rate=0.01)
x = np.ones((1, 1))
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 0.5)
assert np.allclose(vars[1], 0.5)
model.fit_generator([(x, x, x)] * 300)
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 1.0)
assert np.allclose(vars[1], 1.0)
model.fit_generator([(x, 3 * x, x)] * 300, loss=loss2)
vars = model.predict_on_batch(x)
assert np.allclose(vars[0] + vars[1], 3.0)
<file_sep># flake8: ignore
from deepchem.models.sklearn_models.sklearn_model import SklearnModel
<file_sep>"""
Script that trains progressive multitask models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
from deepchem.molnet import load_tox21
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
n_features = 1024
tox21_tasks, tox21_datasets, transformers = load_tox21()
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
model = dc.models.ProgressiveMultitaskClassifier(
len(tox21_tasks),
n_features,
layer_sizes=[1000],
dropouts=[.25],
learning_rate=0.001,
batch_size=50)
# Fit trained model
model.fit(train_dataset, nb_epoch=10)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import unittest
import pytest
import numpy as np
from flaky import flaky
import deepchem as dc
try:
import tensorflow as tf
has_tensorflow = True
except:
has_tensorflow = False
class TestMAML(unittest.TestCase):
@flaky
@pytest.mark.tensorflow
def test_sine(self):
"""Test meta-learning for sine function."""
# This is a MetaLearner that learns to generate sine functions with variable
# amplitude and phase.
class SineLearner(dc.metalearning.MetaLearner):
def __init__(self):
self.batch_size = 10
self.w1 = tf.Variable(np.random.normal(size=[1, 40], scale=1.0))
self.w2 = tf.Variable(
np.random.normal(size=[40, 40], scale=np.sqrt(1 / 40)))
self.w3 = tf.Variable(
np.random.normal(size=[40, 1], scale=np.sqrt(1 / 40)))
self.b1 = tf.Variable(np.zeros(40))
self.b2 = tf.Variable(np.zeros(40))
self.b3 = tf.Variable(np.zeros(1))
def compute_model(self, inputs, variables, training):
x, y = inputs
w1, w2, w3, b1, b2, b3 = variables
dense1 = tf.nn.relu(tf.matmul(x, w1) + b1)
dense2 = tf.nn.relu(tf.matmul(dense1, w2) + b2)
output = tf.matmul(dense2, w3) + b3
loss = tf.reduce_mean(tf.square(output - y))
return loss, [output]
@property
def variables(self):
return [self.w1, self.w2, self.w3, self.b1, self.b2, self.b3]
def select_task(self):
self.amplitude = 5.0 * np.random.random()
self.phase = np.pi * np.random.random()
def get_batch(self):
x = np.random.uniform(-5.0, 5.0, (self.batch_size, 1))
return [x, self.amplitude * np.sin(x + self.phase)]
# Optimize it.
learner = SineLearner()
optimizer = dc.models.optimizers.Adam(learning_rate=5e-3)
maml = dc.metalearning.MAML(learner,
meta_batch_size=4,
optimizer=optimizer)
maml.fit(9000)
# Test it out on some new tasks and see how it works.
loss1 = []
loss2 = []
for i in range(50):
learner.select_task()
maml.restore()
batch = learner.get_batch()
loss, outputs = maml.predict_on_batch(batch)
loss1.append(np.sqrt(loss))
maml.train_on_current_task()
loss, outputs = maml.predict_on_batch(batch)
loss2.append(np.sqrt(loss))
# Initially the model should do a bad job of fitting the sine function.
assert np.average(loss1) > 1.0
# After one step of optimization it should do much better.
assert np.average(loss2) < np.average(loss1)
# Verify that we can create a new MAML object, reload the parameters from the first one, and
# get the same result.
new_maml = dc.metalearning.MAML(SineLearner(), model_dir=maml.model_dir)
new_maml.restore()
loss, outputs = new_maml.predict_on_batch(batch)
assert np.sqrt(loss) == loss1[-1]
# Do the same thing, only using the "restore" argument to fit().
new_maml = dc.metalearning.MAML(SineLearner(), model_dir=maml.model_dir)
new_maml.fit(0, restore=True)
loss, outputs = new_maml.predict_on_batch(batch)
assert np.sqrt(loss) == loss1[-1]
<file_sep>"""
Simple Tests for Support Generation
"""
import logging
import numpy as np
import unittest
import deepchem as dc
logger = logging.getLogger(__name__)
class TestSupports(unittest.TestCase):
"""
Test that support generation happens properly.
"""
def test_remove_dead_examples(self):
"""Tests that examples with zero weight are removed."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.random.binomial(1, p, size=(n_samples, n_tasks))
num_nonzero = np.count_nonzero(np.sum(w, axis=1))
dataset = dc.data.NumpyDataset(X, y, w, ids)
cleared_dataset = dc.data.remove_dead_examples(dataset)
assert len(cleared_dataset) == num_nonzero
def test_get_task_support_simple(self):
"""Tests that get_task_support samples correctly."""
n_samples = 20
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_episodes = 20
n_pos = 1
n_neg = 5
supports = dc.data.get_task_support(dataset,
n_episodes,
n_pos,
n_neg,
task=0,
log_every_n=10)
assert len(supports) == n_episodes
for support in supports:
assert len(support) == n_pos + n_neg
assert np.count_nonzero(support.y) == n_pos
def test_get_task_support_missing(self):
"""Test that task support works in presence of missing data."""
n_samples = 20
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
# Set last n_samples/2 weights to 0
w[n_samples // 2:] = 0
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_episodes = 20
n_pos = 1
n_neg = 2
supports = dc.data.get_task_support(dataset,
n_episodes,
n_pos,
n_neg,
task=0,
log_every_n=10)
assert len(supports) == n_episodes
for support in supports:
assert len(support) == n_pos + n_neg
assert np.count_nonzero(support.y) == n_pos
# Check that no support elements are sample from zero-weight samples
for identifier in support.ids:
assert identifier < n_samples / 2
def test_get_task_test(self):
"""Tests that get_task_testsamples correctly."""
n_samples = 20
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_episodes = 20
n_test = 10
tests = dc.data.get_task_test(dataset,
n_episodes,
n_test,
task=0,
log_every_n=10)
assert len(tests) == n_episodes
for test in tests:
assert len(test) == n_test
def test_simple_support_generator(self):
"""Conducts simple test that support generator runs."""
n_samples = 20
n_features = 3
n_tasks = 1
n_pos = 1
n_neg = 5
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Create support generator
_ = dc.data.SupportGenerator(dataset, n_pos, n_neg, n_trials)
def test_simple_episode_generator(self):
"""Conducts simple test that episode generator runs."""
n_samples = 20
n_features = 3
n_tasks = 1
n_pos = 1
n_neg = 5
n_test = 10
n_episodes = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Create support generator
episode_gen = dc.data.EpisodeGenerator(dataset, n_pos, n_neg, n_test,
n_episodes)
n_episodes_found = 0
for (task, support, test) in episode_gen:
assert task >= 0
assert task < n_tasks
assert len(support) == n_pos + n_neg
assert np.count_nonzero(support.y) == n_pos
assert len(test) == n_test
n_episodes_found += 1
assert n_episodes_found == n_episodes
def test_get_task_minus_support_simple(self):
"""Test that fixed index support can be removed from dataset."""
n_samples = 20
n_support = 5
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
support_dataset = dc.data.NumpyDataset(X[:n_support], y[:n_support],
w[:n_support], ids[:n_support])
task_dataset = dc.data.get_task_dataset_minus_support(dataset,
support_dataset,
task=0)
# Assert all support elements have been removed
assert len(task_dataset) == n_samples - n_support
np.testing.assert_array_equal(task_dataset.X, X[n_support:])
np.testing.assert_array_equal(task_dataset.y, y[n_support:])
np.testing.assert_array_equal(task_dataset.w, w[n_support:])
np.testing.assert_array_equal(task_dataset.ids, ids[n_support:])
def test_dataset_difference_simple(self):
"""Test that fixed index can be removed from dataset."""
n_samples = 20
n_remove = 5
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
remove_dataset = dc.data.NumpyDataset(X[:n_remove], y[:n_remove],
w[:n_remove], ids[:n_remove])
out_dataset = dc.data.dataset_difference(dataset, remove_dataset)
# Assert all remove elements have been removed
assert len(out_dataset) == n_samples - n_remove
np.testing.assert_array_equal(out_dataset.X, X[n_remove:])
np.testing.assert_array_equal(out_dataset.y, y[n_remove:])
np.testing.assert_array_equal(out_dataset.w, w[n_remove:])
np.testing.assert_array_equal(out_dataset.ids, ids[n_remove:])
def test_get_task_minus_support(self):
"""Test that random index support can be removed from dataset."""
n_samples = 10
n_support = 4
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
support_inds = sorted(
np.random.choice(np.arange(n_samples), (n_support,), replace=False))
support_dataset = dc.data.NumpyDataset(X[support_inds], y[support_inds],
w[support_inds],
ids[support_inds])
task_dataset = dc.data.get_task_dataset_minus_support(dataset,
support_dataset,
task=0)
# Assert all support elements have been removed
data_inds = sorted(list(set(range(n_samples)) - set(support_inds)))
assert len(task_dataset) == n_samples - n_support
np.testing.assert_array_equal(task_dataset.X, X[data_inds])
np.testing.assert_array_equal(task_dataset.y, y[data_inds])
np.testing.assert_array_equal(task_dataset.w, w[data_inds])
np.testing.assert_array_equal(task_dataset.ids, ids[data_inds])
def test_dataset_difference(self):
"""Test that random index can be removed from dataset."""
n_samples = 10
n_remove = 4
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
remove_inds = sorted(
np.random.choice(np.arange(n_samples), (n_remove,), replace=False))
remove_dataset = dc.data.NumpyDataset(X[remove_inds], y[remove_inds],
w[remove_inds], ids[remove_inds])
out_dataset = dc.data.dataset_difference(dataset, remove_dataset)
# Assert all remove elements have been removed
data_inds = sorted(list(set(range(n_samples)) - set(remove_inds)))
assert len(out_dataset) == n_samples - n_remove
np.testing.assert_array_equal(out_dataset.X, X[data_inds])
np.testing.assert_array_equal(out_dataset.y, y[data_inds])
np.testing.assert_array_equal(out_dataset.w, w[data_inds])
np.testing.assert_array_equal(out_dataset.ids, ids[data_inds])
def test_get_task_minus_support_missing(self):
"""Test that support can be removed from dataset with missing data"""
n_samples = 20
n_support = 4
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
# Set last n_samples/2 weights to 0
w[n_samples // 2:] = 0
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Sample from first n_samples/2 elements for support
support_inds = sorted(
np.random.choice(np.arange(n_samples // 2), (n_support,),
replace=False))
support_dataset = dc.data.NumpyDataset(X[support_inds], y[support_inds],
w[support_inds],
ids[support_inds])
task_dataset = dc.data.get_task_dataset_minus_support(dataset,
support_dataset,
task=0)
# Should lie within first n_samples/2 samples only
assert len(task_dataset) == n_samples / 2 - n_support
for identifier in task_dataset.ids:
assert identifier < n_samples / 2
def test_support_generator_correct_samples(self):
"""Tests that samples from support generator have desired shape."""
n_samples = 20
n_features = 3
n_tasks = 1
n_pos = 1
n_neg = 5
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Create support generator
supp_gen = dc.data.SupportGenerator(dataset, n_pos, n_neg, n_trials)
num_supports = 0
for (task, support) in supp_gen:
assert support.X.shape == (n_pos + n_neg, n_features)
num_supports += 1
assert task == 0 # Only one task in this example
n_supp_pos = np.count_nonzero(support.y)
assert n_supp_pos == n_pos
assert num_supports == n_trials
def test_evaluation_strategy(self):
"""Tests that sampling supports for eval works properly."""
n_samples = 2000
n_features = 3
n_tasks = 5
n_pos = 1
n_neg = 5
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.random.randint(2, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
support_generator = dc.data.SupportGenerator(dataset, n_pos, n_neg,
n_trials)
for ind, (task, support) in enumerate(support_generator):
task_dataset = dc.data.get_task_dataset_minus_support(
dataset, support, task)
task_y = dataset.y[:, task]
task_w = dataset.w[:, task]
task_y = task_y[task_w != 0]
assert len(task_y) == len(support) + len(task_dataset)
logger.info(
"Verifying that task_dataset doesn't overlap with support.")
for task_id in task_dataset.ids:
assert task_id not in set(support.ids)
<file_sep>Reinforcement Learning
======================
Reinforcement Learning is a powerful technique for learning when you
have access to a simulator. That is, suppose that you have a high
fidelity way of predicting the outcome of an experiment. This is
perhaps a physics engine, perhaps a chemistry engine, or anything. And
you'd like to solve some task within this engine. You can use
reinforcement learning for this purpose.
Environments
------------
.. autoclass:: deepchem.rl.Environment
:members:
.. autoclass:: deepchem.rl.GymEnvironment
:members:
Policies
--------
.. autoclass:: deepchem.rl.Policy
:members:
A2C
---
.. autoclass:: deepchem.rl.a2c.A2C
:members:
.. autoclass:: deepchem.rl.a2c.A2CLossDiscrete
:members:
PPO
---
.. autoclass:: deepchem.rl.ppo.PPO
:members:
.. autoclass:: deepchem.rl.ppo.PPOLoss
:members:
<file_sep>"""
HOPV dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
HOPV_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/hopv.tar.gz"
HOPV_TASKS = [
'HOMO', 'LUMO', 'electrochemical_gap', 'optical_gap', 'PCE', 'V_OC', 'J_SC',
'fill_factor'
]
class _HOPVLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "hopv.csv")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=HOPV_URL,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(
os.path.join(self.data_dir, 'hopv.tar.gz'), self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_hopv(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load HOPV datasets. Does not do train/test split
The HOPV datasets consist of the "Harvard Organic
Photovoltaic Dataset. This dataset includes 350 small
molecules and polymers that were utilized as p-type materials
in OPVs. Experimental properties include: HOMO [a.u.], LUMO
[a.u.], Electrochemical gap [a.u.], Optical gap [a.u.], Power
conversion efficiency [%], Open circuit potential [V], Short
circuit current density [mA/cm^2], and fill factor [%].
Theoretical calculations in the original dataset have been
removed (for now).
Lopez, <NAME>., et al. "The Harvard organic photovoltaic dataset." Scientific data 3.1 (2016): 1-7.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
loader = _HOPVLoader(featurizer, splitter, transformers, HOPV_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('hopv', reload)
<file_sep>"""
DGL-based PAGTN for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class Pagtn(nn.Module):
"""Model for Graph Property Prediction
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT, where a
linear additive form of attention is applied. Attention Weights are derived
by concatenating the node and edge features for each bond.
* Update node representations with multiple rounds of message passing.
* For each layer has, residual connections with its previous layer.
* The final molecular representation is computed by combining the representations
of all nodes in the molecule.
* Perform the final prediction using a linear layer
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models import Pagtn
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph() for i in range(len(graphs))]
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = Pagtn(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] <NAME>, <NAME>, <NAME>. "Path-Augmented
Graph Transformer Network." arXiv:1905.12712
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
number_atom_features: int = 94,
number_bond_features: int = 42,
mode: str = 'regression',
n_classes: int = 2,
output_node_features: int = 256,
hidden_features: int = 32,
num_layers: int = 5,
num_heads: int = 1,
dropout: float = 0.1,
nfeat_name: str = 'x',
efeat_name: str = 'edge_attr',
pool_mode: str = 'sum'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
number_atom_features : int
Size for the input node features. Default to 94.
number_bond_features : int
Size for the input edge features. Default to 42.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
output_node_features : int
Size for the output node features in PAGTN layers. Default to 256.
hidden_features : int
Size for the hidden node features in PAGTN layers. Default to 32.
num_layers : int
Number of PAGTN layers to be applied. Default to 5.
num_heads : int
Number of attention heads. Default to 1.
dropout : float
The probability for performing dropout. Default to 0.1
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
efeat_name: str
For an input graph ``g``, the model assumes that it stores edge features in
``g.edata[efeat_name]`` and will retrieve input edge features from that.
Default to 'edge_attr'.
pool_mode : 'max' or 'mean' or 'sum'
Whether to compute elementwise maximum, mean or sum of the node representations.
"""
try:
import dgl # noqa: F401
except:
raise ImportError('This class requires dgl.')
try:
import dgllife # noqa: F401
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
super(Pagtn, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
self.efeat_name = efeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import PAGTNPredictor as DGLPAGTNPredictor
self.model = DGLPAGTNPredictor(node_in_feats=number_atom_features,
node_out_feats=output_node_features,
node_hid_feats=hidden_features,
edge_feats=number_bond_features,
depth=num_layers,
nheads=num_heads,
dropout=dropout,
n_tasks=out_size,
mode=pool_mode)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]`` and edge features in
``dgl_graph.edata[self.efeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be
``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1;
its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
edge_feats = g.edata[self.efeat_name]
out = self.model(g, node_feats, edge_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class PagtnModel(TorchModel):
"""Model for Graph Property Prediction.
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT, where a
linear additive form of attention is applied. Attention Weights are derived
by concatenating the node and edge features for each bond.
* Update node representations with multiple rounds of message passing.
* For each layer has, residual connections with its previous layer.
* The final molecular representation is computed by combining the representations
of all nodes in the molecule.
* Perform the final prediction using a linear layer
Examples
--------
>>> import deepchem as dc
>>> from deepchem.models import PagtnModel
>>> # preparing dataset
>>> smiles = ["C1CCC1", "CCC"]
>>> labels = [0., 1.]
>>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
>>> X = featurizer.featurize(smiles)
>>> dataset = dc.data.NumpyDataset(X=X, y=labels)
>>> # training model
>>> model = PagtnModel(mode='classification', n_tasks=1,
... batch_size=16, learning_rate=0.001)
>>> loss = model.fit(dataset, nb_epoch=5)
References
----------
.. [1] <NAME>, <NAME>, Tommi Jaakkola. "Path-Augmented
Graph Transformer Network." arXiv:1905.12712
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
number_atom_features: int = 94,
number_bond_features: int = 42,
mode: str = 'regression',
n_classes: int = 2,
output_node_features: int = 256,
hidden_features: int = 32,
num_layers: int = 5,
num_heads: int = 1,
dropout: float = 0.1,
pool_mode: str = 'sum',
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
number_atom_features : int
Size for the input node features. Default to 94.
number_bond_features : int
Size for the input edge features. Default to 42.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
output_node_features : int
Size for the output node features in PAGTN layers. Default to 256.
hidden_features : int
Size for the hidden node features in PAGTN layers. Default to 32.
num_layers: int
Number of graph neural network layers, i.e. number of rounds of message passing.
Default to 2.
num_heads : int
Number of attention heads. Default to 1.
dropout: float
Dropout probability. Default to 0.1
pool_mode : 'max' or 'mean' or 'sum'
Whether to compute elementwise maximum, mean or sum of the node representations.
kwargs
This can include any keyword argument of TorchModel.
"""
model = Pagtn(n_tasks=n_tasks,
number_atom_features=number_atom_features,
number_bond_features=number_bond_features,
mode=mode,
n_classes=n_classes,
output_node_features=output_node_features,
hidden_features=hidden_features,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout,
pool_mode=pool_mode)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(PagtnModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
def _prepare_batch(self, batch):
"""Create batch data for Pagtn.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(PagtnModel, self)._prepare_batch(
([], labels, weights))
return inputs, labels, weights
<file_sep>#!/bin/bash
cd v2015
for x in */
do
cd $x
babel -isdf ${x%/}_ligand.sdf -opdb ${x%/}_ligand.pdb
cd ../
done
cd ../
<file_sep>import pytest
@pytest.mark.pytorch
def test_Net3DLayer():
import dgl
import numpy as np
import torch
from deepchem.models.torch_models.gnn3d import Net3DLayer
g = dgl.graph(([0, 1], [1, 2]))
g.ndata['feat'] = torch.tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
g.edata['d'] = torch.tensor([[0.5, 0.6, 0.7], [0.8, 0.9, 1.0]])
hidden_dim = 3
batch_norm = True
batch_norm_momentum = 0.1
dropout = 0.1
net3d_layer = Net3DLayer(edge_dim=hidden_dim,
hidden_dim=hidden_dim,
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum,
dropout=dropout)
output_graph = net3d_layer(g)
assert output_graph.number_of_nodes() == g.number_of_nodes()
assert output_graph.number_of_edges() == g.number_of_edges()
output_feats = output_graph.ndata['feat'].detach().numpy()
assert output_feats.shape == (3, 3)
assert not np.allclose(output_feats, g.ndata['feat'].detach().numpy())
output_edge_feats = output_graph.edata['d'].detach().numpy()
assert output_edge_feats.shape == (2, 3)
assert not np.allclose(output_edge_feats, g.edata['d'].detach().numpy())
def get_regression_dataset():
import os
import numpy as np
import deepchem as dc
from deepchem.feat.molecule_featurizers.conformer_featurizer import (
RDKitConformerFeaturizer,)
np.random.seed(123)
featurizer = RDKitConformerFeaturizer(num_conformers=2, rmsd_cutoff=3)
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/example_regression.csv')
loader = dc.data.CSVLoader(tasks=["outcome"],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
return dataset, metric
@pytest.mark.torch
def test_net3d():
import numpy as np
from deepchem.feat.graph_data import BatchGraphData
from deepchem.models.torch_models.gnn3d import Net3D
data, _ = get_regression_dataset()
features = BatchGraphData(np.concatenate(data.X))
graph = features.to_dgl_graph()
target_dim = 2
net3d = Net3D(hidden_dim=3,
target_dim=target_dim,
readout_aggregators=['sum', 'mean'])
output = net3d(graph)
assert output.shape[1] == target_dim
def compare_weights(key, model1, model2):
import torch
return torch.all(
torch.eq(model1.components[key].weight,
model2.components[key].weight)).item()
@pytest.mark.torch
def test_InfoMax3DModular():
from deepchem.models.torch_models.gnn3d import InfoMax3DModular
data, _ = get_regression_dataset()
model = InfoMax3DModular(hidden_dim=64,
target_dim=10,
aggregators=['sum', 'mean', 'max'],
readout_aggregators=['sum', 'mean'],
scalers=['identity'])
loss1 = model.fit(data, nb_epoch=1)
loss2 = model.fit(data, nb_epoch=9)
assert loss1 > loss2
@pytest.mark.torch
def test_InfoMax3DModular_save_reload():
from deepchem.models.torch_models.gnn3d import InfoMax3DModular
data, _ = get_regression_dataset()
model = InfoMax3DModular(hidden_dim=64,
target_dim=10,
aggregators=['sum', 'mean', 'max'],
readout_aggregators=['sum', 'mean'],
scalers=['identity'])
model.fit(data, nb_epoch=1)
model2 = InfoMax3DModular(hidden_dim=64,
target_dim=10,
aggregators=['sum', 'mean', 'max'],
readout_aggregators=['sum', 'mean'],
scalers=['identity'])
model2.load_from_pretrained(model_dir=model.model_dir)
assert model.components.keys() == model2.components.keys()
keys_with_weights = [
key for key in model.components.keys()
if hasattr(model.components[key], 'weight')
]
assert all(compare_weights(key, model, model2) for key in keys_with_weights)
<file_sep>import numpy as np
import deepchem as dc
import pytest
try:
import tensorflow as tf
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def test_singletask_robust_multitask_classification():
"""Test robust multitask singletask classification."""
n_tasks = 1
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
_ = dc.metrics.Metric(dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.RobustMultitaskClassifier(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=1)
@pytest.mark.tensorflow
def test_singletask_robust_multitask_regression():
"""Test singletask robust multitask regression."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
_ = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean,
mode="regression")
model = dc.models.RobustMultitaskRegressor(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=1)
<file_sep>"""Unit tests for evaluators."""
import pytest
import deepchem as dc
import numpy as np
import sklearn
from deepchem.utils.evaluate import Evaluator
from deepchem.utils.evaluate import GeneratorEvaluator
try:
import tensorflow as tf # noqa
has_tensorflow = True
except:
has_tensorflow = False
try:
import torch # noqa
has_pytorch = True
except:
has_pytorch = False
def test_multiclass_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 5)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.argmax(y, axis=1))
def test_binary_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 2)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y, threshold=0.3)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.where(y[:, 1] >= 0.3, np.ones(10),
np.zeros(10)))
@pytest.mark.torch
def test_evaluator_dc_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
@pytest.mark.torch
def test_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_sklearn_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
rf = sklearn.ensemble.RandomForestClassifier(50)
model = dc.models.SklearnModel(rf)
model.fit(dataset)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
@pytest.mark.torch
def test_evaluate_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
multitask_scores = model.evaluate(dataset,
dc.metrics.roc_auc_score,
n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
@pytest.mark.torch
def test_multitask_evaluator():
"""Test evaluation of a multitask metric."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 2, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores, all_task_scores = evaluator.compute_model_performance(
metric, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
@pytest.mark.torch
def test_model_evaluate_dc_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = model.evaluate(dataset, metric, [])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
@pytest.mark.torch
def test_multitask_model_evaluate_sklearn():
"""Test evaluation of a multitask metric."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores, all_task_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['metric-1'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
@pytest.mark.torch
def test_multitask_model_evaluate():
"""Test evaluation of a multitask metric."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
multitask_scores, all_task_scores = model.evaluate(
dataset, dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] > 0
assert isinstance(all_task_scores, dict)
@pytest.mark.torch
def test_evaluator_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric1 = dc.metrics.Metric(dc.metrics.mae_score, n_tasks=2)
metric2 = dc.metrics.Metric(dc.metrics.r2_score, n_tasks=2)
multitask_scores = evaluator.compute_model_performance([metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
@pytest.mark.torch
def test_model_evaluate_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric1 = dc.metrics.Metric(dc.metrics.mae_score)
metric2 = dc.metrics.Metric(dc.metrics.r2_score)
multitask_scores = model.evaluate(dataset, [metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
@pytest.mark.torch
def test_generator_evaluator_dc_metric_multitask_single_point():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert len(multitask_scores) == 1
@pytest.mark.torch
def test_evaluator_sklearn_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
@pytest.mark.torch
def test_generator_evaluator_dc_metric_multitask():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
@pytest.mark.torch
def test_model_evaluate_sklearn_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(dataset, dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
@pytest.mark.torch
def test_evaluator_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
[dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
@pytest.mark.torch
def test_model_evaluate_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(
dataset, [dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
@pytest.mark.tensorflow
def test_gc_binary_classification():
"""Test multiclass classification evaluation."""
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
@pytest.mark.tensorflow
def test_gc_binary_kappa_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC", "CO", "CCC", "CCCC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.kappa_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] <= 1
assert multitask_scores["metric-1"] >= -1
@pytest.mark.tensorflow
def test_gc_multiclass_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(5, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification", n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
<file_sep>import pytest
try:
from deepchem.models.dft.scf import XCNNSCF
import torch
from deepchem.feat.dft_data import DFTEntry, DFTSystem
from deepchem.models.dft.nnxc import HybridXC
has_dqc = True
except ModuleNotFoundError:
has_dqc = False
pass
@pytest.mark.dqc
def test_scf():
torch.manual_seed(42)
nnmodel = (torch.nn.Sequential(torch.nn.Linear(2, 10), torch.nn.Softplus(),
torch.nn.Linear(10, 1, bias=False))).to(
torch.double)
hybridxc = HybridXC("lda_x", nnmodel, aweight0=0.0)
e_type = 'ae'
true_val = '0.09194410469'
systems = [{
'moldesc': 'Li 1.5070 0 0; H -1.5070 0 0',
'basis': '6-311++G(3df,3pd)'
}, {
'moldesc': 'Li 0 0 0',
'basis': '6-311++G(3df,3pd)',
'spin': 1
}, {
'moldesc': 'H 0 0 0',
'basis': '6-311++G(3df,3pd)',
'spin': 1
}]
entry = DFTEntry.create(e_type, true_val, systems)
evl = XCNNSCF(hybridxc, entry)
system = DFTSystem(systems[1])
run = evl.run(system)
output = run.energy()
expected_output = torch.tensor(-7.1914, dtype=torch.float64)
torch.testing.assert_close(output, expected_output, atol=1e-4, rtol=0)
<file_sep>import numpy as np
try:
import torch
except ImportError:
print("This module requires PyTorch to be installed.")
def fourier_encode_dist(x, num_encodings=4, include_self=True):
"""
Fourier encode the input tensor `x` based on the specified number of encodings.
This function applies a Fourier encoding to the input tensor `x` by dividing
it by a range of scales (2^i for i in range(num_encodings)) and then
concatenating the sine and cosine of the scaled values. Optionally, the
original input tensor can be included in the output.
Parameters
----------
x : torch.Tensor
Input tensor to be Fourier encoded.
num_encodings : int, optional, default=4
Number of Fourier encodings to apply.
include_self : bool, optional, default=True
Whether to include the original input tensor in the output.
Returns
-------
torch.Tensor
Fourier encoded tensor.
Examples
--------
>>> import torch
>>> x = torch.tensor([1.0, 2.0, 3.0])
>>> encoded_x = fourier_encode_dist(x, num_encodings=4, include_self=True)
"""
x = x.unsqueeze(-1)
device, dtype, orig_x = x.device, x.dtype, x
scales = 2**torch.arange(num_encodings, device=device, dtype=dtype)
x = x / scales
x = torch.cat([x.sin(), x.cos()], dim=-1)
x = torch.cat((x, orig_x), dim=-1) if include_self else x
return x.squeeze()
EPS = 1e-5
def aggregate_mean(h, **kwargs):
"""
Compute the mean of the input tensor along the second to last dimension.
Parameters
----------
h : torch.Tensor
Input tensor.
Returns
-------
torch.Tensor
Mean of the input tensor along the second to last dimension.
"""
return torch.mean(h, dim=-2)
def aggregate_max(h, **kwargs):
"""
Compute the max of the input tensor along the second to last dimension.
Parameters
----------
h : torch.Tensor
Input tensor.
Returns
-------
torch.Tensor
Max of the input tensor along the second to last dimension.
"""
return torch.max(h, dim=-2)[0]
def aggregate_min(h, **kwargs):
"""
Compute the min of the input tensor along the second to last dimension.
Parameters
----------
h : torch.Tensor
Input tensor.
**kwargs
Additional keyword arguments.
Returns
-------
torch.Tensor
Min of the input tensor along the second to last dimension.
"""
return torch.min(h, dim=-2)[0]
def aggregate_std(h, **kwargs):
"""
Compute the standard deviation of the input tensor along the second to last dimension.
Parameters
----------
h : torch.Tensor
Input tensor.
Returns
-------
torch.Tensor
Standard deviation of the input tensor along the second to last dimension.
"""
return torch.sqrt(aggregate_var(h) + EPS)
def aggregate_var(h, **kwargs):
"""
Compute the variance of the input tensor along the second to last dimension.
Parameters
----------
h : torch.Tensor
Input tensor.
Returns
-------
torch.Tensor
Variance of the input tensor along the second to last dimension.
"""
h_mean_squares = torch.mean(h * h, dim=-2)
h_mean = torch.mean(h, dim=-2)
var = torch.relu(h_mean_squares - h_mean * h_mean)
return var
def aggregate_moment(h, n=3, **kwargs):
"""
Compute the nth moment of the input tensor along the second to last dimension.
Parameters
----------
h : torch.Tensor
Input tensor.
n : int, optional, default=3
The order of the moment to compute.
Returns
-------
torch.Tensor
Nth moment of the input tensor along the second to last dimension.
"""
# for each node (E[(X-E[X])^n])^{1/n}
# EPS is added to the absolute value of expectation before taking the nth root for stability
h_mean = torch.mean(h, dim=-2, keepdim=True)
h_n = torch.mean(torch.pow(h - h_mean, n), dim=-2)
rooted_h_n = torch.sign(h_n) * torch.pow(torch.abs(h_n) + EPS, 1.0 / n)
return rooted_h_n
def aggregate_sum(h, **kwargs):
"""
Compute the sum of the input tensor along the second to last dimension.
Parameters
----------
h : torch.Tensor
Input tensor.
Returns
-------
torch.Tensor
Sum of the input tensor along the second to last dimension.
"""
return torch.sum(h, dim=-2)
# each scaler is a function that takes as input X (B x N x Din), adj (B x N x N) and
# avg_d (dictionary containing averages over training set) and returns X_scaled (B x N x Din) as output
def scale_identity(h, D=None, avg_d=None):
"""
Identity scaling function.
Parameters
----------
h : torch.Tensor
Input tensor.
D : torch.Tensor, optional
Degree tensor.
avg_d : dict, optional
Dictionary containing averages over the training set.
Returns
-------
torch.Tensor
Scaled input tensor.
"""
return h
def scale_amplification(h, D, avg_d):
"""
Amplification scaling function. log(D + 1) / d * h where d is the average of the ``log(D + 1)`` in the training set
Parameters
----------
h : torch.Tensor
Input tensor.
D : torch.Tensor
Degree tensor.
avg_d : dict
Dictionary containing averages over the training set.
Returns
-------
torch.Tensor
Scaled input tensor.
"""
return h * (np.log(D + 1) / avg_d["log"])
def scale_attenuation(h, D, avg_d):
"""
Attenuation scaling function. (log(D + 1))^-1 / d * X where d is the average of the ``log(D + 1))^-1`` in the training set
Parameters
----------
h : torch.Tensor
Input tensor.
D : torch.Tensor
Degree tensor.
avg_d : dict
Dictionary containing averages over the training set.
Returns
-------
torch.Tensor
Scaled input tensor.
"""
return h * (avg_d["log"] / np.log(D + 1))
<file_sep>import math
from typing import Optional
try:
import torch
import torch.nn as nn
except ModuleNotFoundError:
raise ImportError("These classes require PyTorch to be installed")
class ScaledDotProductAttention(nn.Module):
"""The Scaled Dot Production Attention operation from `Attention Is All You Need <https://arxiv.org/abs/1706.03762>_` paper.
Example
-------
>>> from deepchem.models import ScaledDotProductAttention as SDPA
>>> attn = SDPA()
>>> x = torch.ones(1, 5)
>>> # Linear layers for making query, key, value
>>> Q, K, V = nn.Parameter(torch.ones(5)), nn.Parameter(torch.ones(5)), nn.Parameter(torch.ones(5))
>>> query, key, value = Q * x, K * x, V * x
>>> x_out, attn_score = attn(query, key, value)
"""
def __init__(self):
self.epsilon = -1e9
super(ScaledDotProductAttention, self).__init__()
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None,
dropout: Optional[nn.Dropout] = None):
"""
Parameters
----------
query: torch.Tensor
Query tensor for attention
key: torch.Tensor
Key tensor for attention
value: torch.Tensor
Value tensor for attention
mask: torch.Tensor (optional)
Mask to apply during attention computation
dropout: nn.Dropout (optional)
Dropout layer for attention output
"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, self.epsilon)
p_attn = scores.softmax(dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class SelfAttention(nn.Module):
"""SelfAttention Layer
Given $X\in \mathbb{R}^{n \times in_feature}$, the attention is calculated by: $a=softmax(W_2tanh(W_1X))$, where
$W_1 \in \mathbb{R}^{hidden \times in_feature}$, $W_2 \in \mathbb{R}^{out_feature \times hidden}$.
The final output is $y=aX$ where $y \in \mathbb{R}^{n \times out_feature}$.
Parameters
----------
in_features: int
Dimension of input features
out_features: int
Dimension of output features
hidden_size: int
Dimension of hidden layer
"""
def __init__(self, in_features, out_features, hidden_size=128):
super(SelfAttention, self).__init__()
self.w1 = torch.nn.Parameter(torch.FloatTensor(hidden_size,
in_features))
self.w2 = torch.nn.Parameter(
torch.FloatTensor(out_features, hidden_size))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.w1)
nn.init.xavier_normal_(self.w2)
def forward(self, X):
"""The forward function.
Parameters
----------
X: torch.Tensor
input feature of shape $\mathbb{R}^{n \times in_feature}$.
Returns
-------
embedding: torch.Tensor
The final embedding of shape $\mathbb{R}^{out_features \times in_feature}$
attention-matrix: torch.Tensor
The attention matrix
"""
x = torch.tanh(torch.matmul(self.w1, X.transpose(1, 0)))
x = torch.matmul(self.w2, x)
attn = torch.nn.functional.softmax(x, dim=-1)
x = torch.matmul(attn, X)
return x, attn
<file_sep>"""
Test for DMPNN Featurizer class.
"""
from deepchem.feat.molecule_featurizers.dmpnn_featurizer import DMPNNFeaturizer, GraphConvConstants
import numpy as np
import pytest
edge_index_orignal_ordering = {
"C1=CC=NC=C1":
np.asarray([[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 0],
[1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 0, 5]]),
"CC(=O)C":
np.asarray([[0, 1, 1, 2, 1, 3], [1, 0, 2, 1, 3, 1]]),
"C":
np.empty((2, 0), dtype=int)
}
# Set up tests.
smiles = ["C1=CC=NC=C1", "CC(=O)C", "C"]
edge_index_orignal_order = list(edge_index_orignal_ordering.values())
# Set up testing parameters.
Test1_params = {
'features_generators': None,
'is_adding_hs': False,
'use_original_atom_ranks': False
}
Test2_params = {
'features_generators': None,
'is_adding_hs': False,
'use_original_atom_ranks': True
}
Test3_params = {
'features_generators': None,
'is_adding_hs': True,
'use_original_atom_ranks': False
}
Test4_params = {
'features_generators': ['morgan'],
'is_adding_hs': False,
'use_original_atom_ranks': False
}
Test5_params = {
'features_generators': ['morgan'],
'is_adding_hs': True,
'use_original_atom_ranks': False
}
@pytest.mark.parametrize(
'test_parameters',
[Test1_params, Test2_params, Test3_params, Test4_params, Test5_params])
def test_featurizer_ring(test_parameters):
"""
Test for featurization of "C1=CC=NC=C1" using `DMPNNFeaturizer` class.
"""
features_generators, is_adding_hs, use_original_atom_ranks = test_parameters.values(
)
featurizer = DMPNNFeaturizer(
features_generators=features_generators,
is_adding_hs=is_adding_hs,
use_original_atom_ranks=use_original_atom_ranks)
graph_feat = featurizer.featurize(smiles)
assert len(graph_feat) == 3
if is_adding_hs:
assert graph_feat[0].num_nodes == 11
assert graph_feat[0].num_node_features == GraphConvConstants.ATOM_FDIM
assert graph_feat[0].node_features.shape == (
11, GraphConvConstants.ATOM_FDIM)
assert graph_feat[0].num_edges == 22
assert graph_feat[0].num_edge_features == GraphConvConstants.BOND_FDIM
assert graph_feat[0].edge_features.shape == (
22, GraphConvConstants.BOND_FDIM)
else:
assert graph_feat[0].num_nodes == 6
assert graph_feat[0].num_node_features == GraphConvConstants.ATOM_FDIM
assert graph_feat[0].node_features.shape == (
6, GraphConvConstants.ATOM_FDIM)
assert graph_feat[0].num_edges == 12
assert graph_feat[0].num_edge_features == GraphConvConstants.BOND_FDIM
assert graph_feat[0].edge_features.shape == (
12, GraphConvConstants.BOND_FDIM)
if features_generators:
assert len(graph_feat[0].global_features
) == 2048 # for `morgan` features generator
nonzero_features_indicies = graph_feat[0].global_features.nonzero()[0]
if is_adding_hs:
assert len(nonzero_features_indicies) == 10
else:
assert len(nonzero_features_indicies) == 9
else:
assert graph_feat[0].global_features.size == 0
if use_original_atom_ranks:
assert (graph_feat[0].edge_index == edge_index_orignal_order[0]).all()
else:
if graph_feat[0].edge_index.shape == edge_index_orignal_order[0]:
assert (graph_feat[0].edge_index !=
edge_index_orignal_order[0]).any()
@pytest.mark.parametrize(
'test_parameters',
[Test1_params, Test2_params, Test3_params, Test4_params, Test5_params])
def test_featurizer_general_case(test_parameters):
"""
Test for featurization of "CC(=O)C" using `DMPNNFeaturizer` class.
"""
features_generators, is_adding_hs, use_original_atom_ranks = test_parameters.values(
)
featurizer = DMPNNFeaturizer(
features_generators=features_generators,
is_adding_hs=is_adding_hs,
use_original_atom_ranks=use_original_atom_ranks)
graph_feat = featurizer.featurize(smiles)
assert len(graph_feat) == 3
if is_adding_hs:
assert graph_feat[1].num_nodes == 10
assert graph_feat[1].num_node_features == GraphConvConstants.ATOM_FDIM
assert graph_feat[1].node_features.shape == (
10, GraphConvConstants.ATOM_FDIM)
assert graph_feat[1].num_edges == 18
assert graph_feat[1].num_edge_features == GraphConvConstants.BOND_FDIM
assert graph_feat[1].edge_features.shape == (
18, GraphConvConstants.BOND_FDIM)
else:
assert graph_feat[1].num_nodes == 4
assert graph_feat[1].num_node_features == GraphConvConstants.ATOM_FDIM
assert graph_feat[1].node_features.shape == (
4, GraphConvConstants.ATOM_FDIM)
assert graph_feat[1].num_edges == 6
assert graph_feat[1].num_edge_features == GraphConvConstants.BOND_FDIM
assert graph_feat[1].edge_features.shape == (
6, GraphConvConstants.BOND_FDIM)
if features_generators:
assert len(graph_feat[1].global_features
) == 2048 # for `morgan` features generator
nonzero_features_indicies = graph_feat[1].global_features.nonzero()[0]
if is_adding_hs:
assert len(nonzero_features_indicies) == 10
else:
assert len(nonzero_features_indicies) == 6
else:
assert graph_feat[1].global_features.size == 0
if use_original_atom_ranks:
assert (graph_feat[1].edge_index == edge_index_orignal_order[1]).all()
else:
if graph_feat[1].edge_index.shape == edge_index_orignal_order[1]:
assert (graph_feat[1].edge_index !=
edge_index_orignal_order[1]).any()
@pytest.mark.parametrize(
'test_parameters',
[Test1_params, Test2_params, Test3_params, Test4_params, Test5_params])
def test_featurizer_single_atom(test_parameters):
"""
Test for featurization of "C" using `DMPNNFeaturizer` class.
"""
features_generators, is_adding_hs, use_original_atom_ranks = test_parameters.values(
)
featurizer = DMPNNFeaturizer(
features_generators=features_generators,
is_adding_hs=is_adding_hs,
use_original_atom_ranks=use_original_atom_ranks)
graph_feat = featurizer.featurize(smiles)
assert len(graph_feat) == 3
if is_adding_hs:
assert graph_feat[2].num_nodes == 5
assert graph_feat[2].num_node_features == GraphConvConstants.ATOM_FDIM
assert graph_feat[2].node_features.shape == (
5, GraphConvConstants.ATOM_FDIM)
assert graph_feat[2].num_edges == 8
assert graph_feat[2].num_edge_features == GraphConvConstants.BOND_FDIM
assert graph_feat[2].edge_features.shape == (
8, GraphConvConstants.BOND_FDIM)
else:
assert graph_feat[2].num_nodes == 1
assert graph_feat[2].num_node_features == GraphConvConstants.ATOM_FDIM
assert graph_feat[2].node_features.shape == (
1, GraphConvConstants.ATOM_FDIM)
assert graph_feat[2].num_edges == 0
assert graph_feat[2].num_edge_features == GraphConvConstants.BOND_FDIM
assert graph_feat[2].edge_features.shape == (
0, GraphConvConstants.BOND_FDIM)
if features_generators:
assert len(graph_feat[2].global_features
) == 2048 # for `morgan` features generator
nonzero_features_indicies = graph_feat[2].global_features.nonzero()[0]
if is_adding_hs:
assert len(nonzero_features_indicies) == 4
else:
assert len(nonzero_features_indicies) == 1
else:
assert graph_feat[2].global_features.size == 0
if use_original_atom_ranks:
assert (graph_feat[2].edge_index == edge_index_orignal_order[2]).all()
else:
if graph_feat[2].edge_index.shape == edge_index_orignal_order[2]:
# the atom order for 'C' is same in case of canonical and original ordering
assert (
graph_feat[2].edge_index == edge_index_orignal_order[2]).all()
<file_sep>Metalearning
============
One of the hardest challenges in scientific machine learning is lack of access of sufficient data. Sometimes experiments are slow and expensive and there's no easy way to gain access to more data. What do you do then?
This module contains a collection of techniques for doing low data
learning. "Metalearning" traditionally refers to techniques for
"learning to learn" but here we take it to mean any technique which
proves effective for learning with low amounts of data.
MetaLearner
-----------
This is the abstract superclass for metalearning algorithms.
.. autoclass:: deepchem.metalearning.MetaLearner
:members:
MAML
----
.. autoclass:: deepchem.metalearning.MAML
:members:
<file_sep>"""
MUV dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
MUV_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/muv.csv.gz"
MUV_TASKS = sorted([
'MUV-692', 'MUV-689', 'MUV-846', 'MUV-859', 'MUV-644', 'MUV-548', 'MUV-852',
'MUV-600', 'MUV-810', 'MUV-712', 'MUV-737', 'MUV-858', 'MUV-713', 'MUV-733',
'MUV-652', 'MUV-466', 'MUV-832'
])
class _MuvLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "muv.csv.gz")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=MUV_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_muv(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load MUV dataset
The Maximum Unbiased Validation (MUV) group is a benchmark dataset selected
from PubChem BioAssay by applying a refined nearest neighbor analysis.
The MUV dataset contains 17 challenging tasks for around 90 thousand
compounds and is specifically designed for validation of virtual screening
techniques.
Scaffold splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "mol_id" - PubChem CID of the compound
- "smiles" - SMILES representation of the molecular structure
- "MUV-XXX" - Measured results (Active/Inactive) for bioassays
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Rohrer, <NAME>., and <NAME>. "Maximum unbiased validation
(MUV) data sets for virtual screening based on PubChem bioactivity data."
Journal of chemical information and modeling 49.2 (2009): 169-184.
"""
loader = _MuvLoader(featurizer, splitter, transformers, MUV_TASKS, data_dir,
save_dir, **kwargs)
return loader.load_dataset('muv', reload)
<file_sep># Splitter Examples
The DeepChem library has a collection of splitters which demonstrate
how to use DeepChem to split chemical and other datasets in
interesting ways. This folder contains a number of examples which
demonstrate the use of splitters on data
DeepChem has a number of different splitters you can use on your data. Here's the current set
```
from deepchem.splits.splitters import RandomGroupSplitter
from deepchem.splits.splitters import RandomStratifiedSplitter
from deepchem.splits.splitters import SingletaskStratifiedSplitter
from deepchem.splits.splitters import MolecularWeightSplitter
from deepchem.splits.splitters import MaxMinSplitter
from deepchem.splits.splitters import RandomSplitter
from deepchem.splits.splitters import IndexSplitter
from deepchem.splits.splitters import IndiceSplitter
from deepchem.splits.splitters import ClusterFps
from deepchem.splits.splitters import ButinaSplitter
from deepchem.splits.splitters import ScaffoldSplitter
from deepchem.splits.splitters import FingerprintSplitter
from deepchem.splits.splitters import SpecifiedSplitter
from deepchem.splits.splitters import FingerprintSplitter
from deepchem.splits.splitters import TimeSplitterPDBbind
from deepchem.splits.task_splitter import TaskSplitter
```
<file_sep>from typing import List
import torch.nn as nn
import torch
from deepchem.models.losses import L2Loss
from deepchem.models.torch_models import layers
from deepchem.models.torch_models import TorchModel
from deepchem.data.datasets import Dataset
from deepchem.utils import batch_coulomb_matrix_features
class DTNN(nn.Module):
"""Deep Tensor Neural Networks
DTNN is based on the many-body Hamiltonian concept, which is a fundamental principle in quantum mechanics.
The DTNN recieves a molecule's distance matrix and membership of its atom from its Coulomb Matrix representation.
Then, it iteratively refines the representation of each atom by considering its interactions with neighboring atoms.
Finally, it predicts the energy of the molecule by summing up the energies of the individual atoms.
In this class, we establish a sequential model for the Deep Tensor Neural Network (DTNN) [1]_.
Examples
--------
>>> import os
>>> import torch
>>> from deepchem.models.torch_models import DTNN
>>> from deepchem.data import SDFLoader
>>> from deepchem.feat import CoulombMatrix
>>> from deepchem.utils import batch_coulomb_matrix_features
>>> # Get Data
>>> model_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
>>> dataset_file = os.path.join(model_dir, 'tests/assets/qm9_mini.sdf')
>>> TASKS = ["alpha", "homo"]
>>> loader = SDFLoader(tasks=TASKS, featurizer=CoulombMatrix(29), sanitize=True)
>>> data = loader.create_dataset(dataset_file, shard_size=100)
>>> inputs = batch_coulomb_matrix_features(data.X)
>>> atom_number, distance, atom_membership, distance_membership_i, distance_membership_j = inputs
>>> inputs = [torch.tensor(atom_number).to(torch.int64),
... torch.tensor(distance).to(torch.float32),
... torch.tensor(atom_membership).to(torch.int64),
... torch.tensor(distance_membership_i).to(torch.int64),
... torch.tensor(distance_membership_j).to(torch.int64)]
>>> n_tasks = data.y.shape[0]
>>> model = DTNN(n_tasks)
>>> pred = model(inputs)
References
----------
.. [1] Schütt, <NAME>., et al. "Quantum-chemical insights from deep
tensor neural networks." Nature communications 8.1 (2017): 1-8.
"""
def __init__(self,
n_tasks: int,
n_embedding: int = 30,
n_hidden: int = 100,
n_distance: int = 100,
distance_min: float = -1,
distance_max: float = 18,
output_activation: bool = True,
mode: str = "regression",
dropout: float = 0.0,
n_steps: int = 2):
"""
Parameters
----------
n_tasks: int
Number of tasks
n_embedding: int (default 30)
Number of features per atom.
n_hidden: int (default 100)
Number of features for each molecule after DTNNStep
n_distance: int (default 100)
granularity of distance matrix
step size will be (distance_max-distance_min)/n_distance
distance_min: float (default -1)
minimum distance of atom pairs (in Angstrom)
distance_max: float (default 18)
maximum distance of atom pairs (in Angstrom)
output_activation: bool (default True)
determines whether an activation function should be apply to its output.
mode: str (default "regression")
Only "regression" is currently supported.
dropout: float (default 0.0)
the dropout probablity to use.
n_steps: int (default 2)
Number of DTNNStep Layers to use.
"""
super(DTNN, self).__init__()
self.n_tasks = n_tasks
self.n_embedding = n_embedding
self.n_hidden = n_hidden
self.n_distance = n_distance
self.distance_min = distance_min
self.distance_max = distance_max
self.output_activation = output_activation
self.mode = mode
self.dropout = dropout
self.n_steps = n_steps
# get DTNNEmbedding
self.dtnn_embedding = layers.DTNNEmbedding(n_embedding=self.n_embedding)
# get DTNNSteps
self.dtnn_step = nn.ModuleList()
for i in range(self.n_steps):
self.dtnn_step.append(
layers.DTNNStep(n_embedding=self.n_embedding,
n_distance=self.n_distance))
# get DTNNGather
self.dtnn_gather = layers.DTNNGather(
n_embedding=self.n_embedding,
layer_sizes=[self.n_hidden],
n_outputs=self.n_tasks,
output_activation=self.output_activation)
# get Final Linear Layer
self.linear = nn.LazyLinear(self.n_tasks)
def forward(self, inputs: List[torch.Tensor]):
"""
Parameters
----------
inputs: List
A list of tensors containing atom_number, distance,
atom_membership, distance_membership_i, and distance_membership_j.
Returns
-------
output: torch.Tensor
Predictions of the Molecular Energy.
"""
dtnn_embedding = self.dtnn_embedding(inputs[0])
for i in range(self.n_steps):
dtnn_embedding = nn.Dropout(self.dropout)(dtnn_embedding)
dtnn_embedding = self.dtnn_step[i](
[dtnn_embedding, inputs[1], inputs[3], inputs[4]])
dtnn_step = nn.Dropout(self.dropout)(dtnn_embedding)
dtnn_gather = self.dtnn_gather([dtnn_step, inputs[2]])
dtnn_gather = nn.Dropout(self.dropout)(dtnn_gather)
output = self.linear(dtnn_gather)
return output
class DTNNModel(TorchModel):
"""Implements DTNN models for regression.
DTNN is based on the many-body Hamiltonian concept, which is a fundamental principle in quantum mechanics.
DTNN recieves a molecule's distance matrix and membership of its atom from its Coulomb Matrix representation.
Then, it iteratively refines the representation of each atom by considering its interactions with neighboring atoms.
Finally, it predicts the energy of the molecule by summing up the energies of the individual atoms.
This class implements the Deep Tensor Neural Network (DTNN) [1]_.
Examples
--------
>>> import os
>>> from deepchem.data import SDFLoader
>>> from deepchem.feat import CoulombMatrix
>>> from deepchem.models.torch_models import DTNNModel
>>> model_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
>>> dataset_file = os.path.join(model_dir, 'tests/assets/qm9_mini.sdf')
>>> TASKS = ["alpha", "homo"]
>>> loader = SDFLoader(tasks=TASKS, featurizer=CoulombMatrix(29), sanitize=True)
>>> data = loader.create_dataset(dataset_file, shard_size=100)
>>> n_tasks = data.y.shape[1]
>>> model = DTNNModel(n_tasks,
... n_embedding=20,
... n_distance=100,
... learning_rate=1.0,
... mode="regression")
>>> loss = model.fit(data, nb_epoch=250)
>>> pred = model.predict(data)
References
----------
.. [1] Schütt, <NAME>., et al. "Quantum-chemical insights from deep
tensor neural networks." Nature communications 8.1 (2017): 1-8.
"""
def __init__(self,
n_tasks: int,
n_embedding: int = 30,
n_hidden: int = 100,
n_distance: int = 100,
distance_min: float = -1,
distance_max: float = 18,
output_activation: bool = True,
mode: str = "regression",
dropout: float = 0.0,
n_steps: int = 2,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
n_embedding: int (default 30)
Number of features per atom.
n_hidden: int (default 100)
Number of features for each molecule after DTNNStep
n_distance: int (default 100)
granularity of distance matrix
step size will be (distance_max-distance_min)/n_distance
distance_min: float (default -1)
minimum distance of atom pairs (in Angstrom)
distance_max: float (default = 18)
maximum distance of atom pairs (in Angstrom)
output_activation: bool (default True)
determines whether an activation function should be apply to its output.
mode: str (default "regression")
Only "regression" is currently supported.
dropout: float (default 0.0)
the dropout probablity to use.
n_steps: int (default 2)
Number of DTNNStep Layers to use.
"""
if dropout < 0 or dropout > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(dropout))
model = DTNN(n_tasks=n_tasks,
n_embedding=n_embedding,
n_hidden=n_hidden,
n_distance=n_distance,
distance_min=distance_min,
distance_max=distance_max,
output_activation=output_activation,
mode=mode,
dropout=dropout,
n_steps=n_steps)
if mode not in ['regression']:
raise ValueError("Only 'regression' mode is currently supported")
super(DTNNModel, self).__init__(model, L2Loss(), ["prediction"],
**kwargs)
def default_generator(self,
dataset: Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True):
"""Create a generator that iterates batches for a dataset.
It processes inputs through the _compute_features_on_batch function to calculate required features of input.
Parameters
----------
dataset: Dataset
the data to iterate
epochs: int
the number of times to iterate over the full dataset
mode: str
allowed values are 'fit' (called during training), 'predict' (called
during prediction), and 'uncertainty' (called during uncertainty
prediction)
deterministic: bool
whether to iterate over the dataset in order, or randomly shuffle the
data for each epoch
pad_batches: bool
whether to pad each batch up to this model's preferred batch size
Returns
-------
a generator that iterates batches, each represented as a tuple of lists:
([inputs], [outputs], [weights])
"""
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
yield (batch_coulomb_matrix_features(X_b,
self.model.distance_max,
self.model.distance_min,
self.model.n_distance),
[y_b], [w_b])
<file_sep>import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class PubChemFingerprint(MolecularFeaturizer):
"""PubChem Fingerprint.
The PubChem fingerprint is a 881 bit structural key,
which is used by PubChem for similarity searching.
Please confirm the details in [1]_.
References
----------
.. [1] ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.pdf
Note
-----
This class requires RDKit and PubChemPy to be installed.
PubChemPy use REST API to get the fingerprint, so you need the internet access.
Examples
--------
>>> import deepchem as dc
>>> smiles = ['CCC']
>>> featurizer = dc.feat.PubChemFingerprint()
>>> features = featurizer.featurize(smiles)
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(881,)
"""
def __init__(self):
"""Initialize this featurizer."""
try:
from rdkit import Chem # noqa
import pubchempy as pcp # noqa
except ModuleNotFoundError:
raise ImportError("This class requires PubChemPy to be installed.")
self.get_pubchem_compounds = pcp.get_compounds
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate PubChem fingerprint.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of RDKit descriptors for `mol`. The length is 881.
"""
try:
from rdkit import Chem
import pubchempy as pcp
except ModuleNotFoundError:
raise ImportError("This class requires PubChemPy to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
smiles = Chem.MolToSmiles(datapoint)
pubchem_compound = pcp.get_compounds(smiles, 'smiles')[0]
feature = [int(bit) for bit in pubchem_compound.cactvs_fingerprint]
return np.asarray(feature)
<file_sep># Transformer Examples
In this example directory, we provide usage examples for the
transformers DeepChem supports. Here's a list of the transformers DeepChem
currently supports:
- `LogTransformer`
- `ClippingTransformer`
- `NormalizationTransformer`
- `BalancingTransformer`
- `CDFTransformer`
- `PowerTransformer`
- `CoulombFitTransformer`
- `IRVTransformer`
- `DAGTransformer`
- `ANITransformer`
- `MinMaxTransformer`
<file_sep>Contibuting to DeepChem as a Scientist
======================================
The scientific community in many ways is quite traditional.
Students typically learn in apprenticeship from advisors who
teach a small number of students directly. This system has endured
for centuries and allows for expert scientists to teach their ways of
thinking to new students.
For more context, most scientific research today is done in "labs"
run in this mostly traditional fashion. A principal investigator (PI)
will run the lab and work with undergraduate, graduate, and
postdoctoral students who produce research papers. Labs are funded by
"grants," typically from governments and philanthropic agencies.
Papers and citations are the critical currencies of this system, and a
strong publication record is necessary for any scientist to establish
themselves.
This traditional model can find it difficult to fund the development
of high quality software for a few reasons. First, students are in a
lab for limited periods of time (3-5 years often). This means there's
high turnover, and critical knowledge can be lost when a student moves
on. Second, grants for software are still new and not broadly
available. A lab might very reasonably choose to focus on scientific
discovery rather than on necessary software engineering. (Although,
it's worth noting there are many exceptions that prove the rule!
DeepChem was born in an academic lab like many other quality
projects.)
We believe that contributing to and using DeepChem can be highly
valuable for scientific careers. DeepChem can help maintain new
scientific algorithms for the long term, making sure that your
discoveries continue to be used after students graduate. We've seen
too many brilliant projects flounder after students move on, and we'd
like to help you make sure that your algorithms have the most impact.
Scientist FAQ
-------------
.. contents:: Contents
:local:
Wouldn't it be better for my career to make my own package rather than use DeepChem?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The answer to this really depends on what you're looking for out of
your career! Making and maintaining good software is hard. It requires
careful testing and continued maintenance. Your code will bitrot over
time without attention. If your focus is on new inventions and you
find software engineering less compelling, working with DeepChem may
enable you to go further in your career by letting you focus on new
algorithms and leveraging the DeepChem Project's infrastructure to
maintain your inventions.
In addition, you may find considerable inspiration from participating
in the DeepChem community. Looking at how other scientists solve
problems, and connecting with new collaborators across the world can
help you look at problems in a new way. Longtime DeepChem contributors
find that they often end up writing papers together!
All that said, there may be very solid reasons for you to build your
own project! Especially if you want to explore designs that we haven't
or can't easily. In that case, we'd still love to collaborate with
you. DeepChem depends on a broad constellation of scientific packages
and we'd love to make your package's features accessible to our users.
Is there a DeepChem PI?
^^^^^^^^^^^^^^^^^^^^^^^
While DeepChem was born in the Pande lab at Stanford,
the project now lives as a "decentralized research organization."
It would be more accurate to say that there are informally multiple "DeepChem PIs,"
who use it in their work. You too can be a DeepChem PI!
Do I need to add DeepChem team members as co-authors to my paper?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Our suggestion is to use good judgment and usual scientific etiquette.
If a particular DeepChem team member has contributed a lot to your effort,
adding them might make sense. If no one person has contributed sufficiently,
an acknowledgment or citation would be great!
I want to establish my scientific niche. How can I do that as a DeepChem contributor? Won't my contribution be lost in the noise?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's critically important for a new scientist to establish themselves and
their contributions in order to launch a scientific career. We believe that
DeepChem can help you do this! If you add a significant set of new features to DeepChem,
it might be appropriate for you to write a paper (as lead or corresponding author or however makes sense)
that introduces the new feature and your contribution.
As a decentralized research organization, we want to help you launch
your careers. We're very open to other collaboration structures that
work for your career needs.
I'm an aspiring scientist, not part of a lab. Can I join DeepChem?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Yes! DeepChem's core mission is to democratize the use of deep learning for the sciences.
This means no barriers, no walls. Anyone is welcome to join and contribute.
Join our developer calls, chat one-on-one with our scientists,
many of whom are glad to work with new students. You may form connections that
help you join a more traditional lab, or you may choose to form your own path.
We're glad to support either.
Is there DeepChem Grant Money?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Not yet, but we're actively looking into getting grants to support DeepChem researchers.
If you're a PI who wants to collaborate with us, please get in touch!
I'm an industry researcher. Can I participate too?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Yes! The most powerful features of DeepChem is its community.
Becoming part of the DeepChem project can let you build a network that lasts across jobs and roles.
Lifelong employment at a corporation is less and less common. Joining our community will
let you build bonds that cross jobs and could help you do your job today better too!
What about intellectual property?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
One of the core goals for DeepChem is to build a shared set of
scientific resources and techniques that aren't locked up by patents.
Our hope is to enable your company or organization to leverage
techniques with less worry about patent infringement.
We ask in return that you act as a responsible community member
and put in as much as you get out. If you find DeepChem very
valuable, please consider contributing back some innovations or
improvements so others can benefit. If you're getting a patent on your
invention, try to make sure that you don't infringe on anything in
DeepChem. Lots of things sneak past patent review. As an open source
community, we don't have the resources to actively defend ourselves
and we rely on your good judgment and help!
If I use DeepChem on my organization's data, do I have to release the data?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Not at all! DeepChem is released with a permissive MIT license. Any
analyses you perform belong entirely to you. You are under no
obligation to release your proprietary data or inventions.
What if I want to release data? Can DeepChem help?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you are interested in open sourcing data, the DeepChem project
maintains the
[MoleculeNet](https://deepchem.readthedocs.io/en/latest/moleculenet.html)
suite of datasets. Adding your dataset to MoleculeNet can be a
powerful way to ensure that a broad community of users can access your
released data in convenient fashion. It's important to note that
MoleculeNet provides programmatic access to data, which may not be
appropriate for all types of data (especially for clinical or patient
data which may be governed by regulations/laws). Open source
datasets can be a powerful resource, but need to be handled with care.
Is MoleculeNet just about molecules?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Not anymore! Any scientific datasets are welcome in MoleculeNet. At
some point in the future, we may rename the effort to avoid confusion,
but for now, we emphasize that non-molecular datasets are welcome too.
Does MoleculeNet allow for releasing data under different licenses?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
MoleculeNet already supports datasets released under different
licenses. We can make work with you to use your license of choice.
<file_sep>"""Sequence to sequence translation models."""
from deepchem.models import KerasModel, layers
from heapq import heappush, heappushpop
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Layer, Dense, Dropout, GRU, Lambda, Conv1D, Flatten, BatchNormalization
class VariationalRandomizer(Layer):
"""Add random noise to the embedding and include a corresponding loss."""
def __init__(self, embedding_dimension, annealing_start_step,
annealing_final_step, **kwargs):
super(VariationalRandomizer, self).__init__(**kwargs)
self._embedding_dimension = embedding_dimension
self._annealing_final_step = annealing_final_step
self._annealing_start_step = annealing_start_step
self.dense_mean = Dense(embedding_dimension)
self.dense_stddev = Dense(embedding_dimension)
self.combine = layers.CombineMeanStd(training_only=True)
def call(self, inputs, training=True):
input, global_step = inputs
embedding_mean = self.dense_mean(input)
embedding_stddev = self.dense_stddev(input)
embedding = self.combine([embedding_mean, embedding_stddev],
training=training)
mean_sq = embedding_mean * embedding_mean
stddev_sq = embedding_stddev * embedding_stddev
kl = mean_sq + stddev_sq - tf.math.log(stddev_sq + 1e-20) - 1
anneal_steps = self._annealing_final_step - self._annealing_start_step
if anneal_steps > 0:
current_step = tf.cast(global_step,
tf.float32) - self._annealing_start_step
anneal_frac = tf.maximum(0.0, current_step) / anneal_steps
kl_scale = tf.minimum(1.0, anneal_frac * anneal_frac)
else:
kl_scale = 1.0
self.add_loss(0.5 * kl_scale * tf.reduce_mean(kl))
return embedding
class SeqToSeq(KerasModel):
"""Implements sequence to sequence translation models.
The model is based on the description in Sutskever et al., "Sequence to
Sequence Learning with Neural Networks" (https://arxiv.org/abs/1409.3215),
although this implementation uses GRUs instead of LSTMs. The goal is to
take sequences of tokens as input, and translate each one into a different
output sequence. The input and output sequences can both be of variable
length, and an output sequence need not have the same length as the input
sequence it was generated from. For example, these models were originally
developed for use in natural language processing. In that context, the
input might be a sequence of English words, and the output might be a
sequence of French words. The goal would be to train the model to translate
sentences from English to French.
The model consists of two parts called the "encoder" and "decoder". Each one
consists of a stack of recurrent layers. The job of the encoder is to
transform the input sequence into a single, fixed length vector called the
"embedding". That vector contains all relevant information from the input
sequence. The decoder then transforms the embedding vector into the output
sequence.
These models can be used for various purposes. First and most obviously,
they can be used for sequence to sequence translation. In any case where you
have sequences of tokens, and you want to translate each one into a different
sequence, a SeqToSeq model can be trained to perform the translation.
Another possible use case is transforming variable length sequences into
fixed length vectors. Many types of models require their inputs to have a
fixed shape, which makes it difficult to use them with variable sized inputs
(for example, when the input is a molecule, and different molecules have
different numbers of atoms). In that case, you can train a SeqToSeq model as
an autoencoder, so that it tries to make the output sequence identical to the
input one. That forces the embedding vector to contain all information from
the original sequence. You can then use the encoder for transforming
sequences into fixed length embedding vectors, suitable to use as inputs to
other types of models.
Another use case is to train the decoder for use as a generative model. Here
again you begin by training the SeqToSeq model as an autoencoder. Once
training is complete, you can supply arbitrary embedding vectors, and
transform each one into an output sequence. When used in this way, you
typically train it as a variational autoencoder. This adds random noise to
the encoder, and also adds a constraint term to the loss that forces the
embedding vector to have a unit Gaussian distribution. You can then pick
random vectors from a Gaussian distribution, and the output sequences should
follow the same distribution as the training data.
When training as a variational autoencoder, it is best to use KL cost
annealing, as described in https://arxiv.org/abs/1511.06349. The constraint
term in the loss is initially set to 0, so the optimizer just tries to
minimize the reconstruction loss. Once it has made reasonable progress
toward that, the constraint term can be gradually turned back on. The range
of steps over which this happens is configurable.
"""
sequence_end = object()
def __init__(self,
input_tokens,
output_tokens,
max_output_length,
encoder_layers=4,
decoder_layers=4,
embedding_dimension=512,
dropout=0.0,
reverse_input=True,
variational=False,
annealing_start_step=5000,
annealing_final_step=10000,
**kwargs):
"""Construct a SeqToSeq model.
In addition to the following arguments, this class also accepts all the keyword arguments
from TensorGraph.
Parameters
----------
input_tokens: list
a list of all tokens that may appear in input sequences
output_tokens: list
a list of all tokens that may appear in output sequences
max_output_length: int
the maximum length of output sequence that may be generated
encoder_layers: int
the number of recurrent layers in the encoder
decoder_layers: int
the number of recurrent layers in the decoder
embedding_dimension: int
the width of the embedding vector. This also is the width of all
recurrent layers.
dropout: float
the dropout probability to use during training
reverse_input: bool
if True, reverse the order of input sequences before sending them into
the encoder. This can improve performance when working with long sequences.
variational: bool
if True, train the model as a variational autoencoder. This adds random
noise to the encoder, and also constrains the embedding to follow a unit
Gaussian distribution.
annealing_start_step: int
the step (that is, batch) at which to begin turning on the constraint term
for KL cost annealing
annealing_final_step: int
the step (that is, batch) at which to finish turning on the constraint term
for KL cost annealing
"""
if SeqToSeq.sequence_end not in input_tokens:
input_tokens = input_tokens + [SeqToSeq.sequence_end]
if SeqToSeq.sequence_end not in output_tokens:
output_tokens = output_tokens + [SeqToSeq.sequence_end]
self._input_tokens = input_tokens
self._output_tokens = output_tokens
self._input_dict = dict((x, i) for i, x in enumerate(input_tokens))
self._output_dict = dict((x, i) for i, x in enumerate(output_tokens))
self._max_output_length = max_output_length
self._embedding_dimension = embedding_dimension
self._reverse_input = reverse_input
self.encoder = self._create_encoder(encoder_layers, dropout)
self.decoder = self._create_decoder(decoder_layers, dropout)
features = self._create_features()
gather_indices = Input(shape=(2,), dtype=tf.int32)
global_step = Input(shape=tuple(), dtype=tf.int32)
embedding = self.encoder([features, gather_indices])
self._embedding = self.encoder([features, gather_indices],
training=False)
if variational:
randomizer = VariationalRandomizer(self._embedding_dimension,
annealing_start_step,
annealing_final_step)
embedding = randomizer([self._embedding, global_step])
self._embedding = randomizer([self._embedding, global_step],
training=False)
output = self.decoder(embedding)
model = tf.keras.Model(inputs=[features, gather_indices, global_step],
outputs=output)
super(SeqToSeq, self).__init__(model, self._create_loss(), **kwargs)
def _create_features(self):
return Input(shape=(None, len(self._input_tokens)))
def _create_encoder(self, n_layers, dropout):
"""Create the encoder as a tf.keras.Model."""
input = self._create_features()
gather_indices = Input(shape=(2,), dtype=tf.int32)
prev_layer = input
for i in range(n_layers):
if dropout > 0.0:
prev_layer = Dropout(rate=dropout)(prev_layer)
prev_layer = GRU(self._embedding_dimension,
return_sequences=True)(prev_layer)
prev_layer = Lambda(lambda x: tf.gather_nd(x[0], x[1]))(
[prev_layer, gather_indices])
return tf.keras.Model(inputs=[input, gather_indices],
outputs=prev_layer)
def _create_decoder(self, n_layers, dropout):
"""Create the decoder as a tf.keras.Model."""
input = Input(shape=(self._embedding_dimension,))
prev_layer = layers.Stack()(self._max_output_length * [input])
for i in range(n_layers):
if dropout > 0.0:
prev_layer = Dropout(dropout)(prev_layer)
prev_layer = GRU(self._embedding_dimension,
return_sequences=True)(prev_layer)
output = Dense(len(self._output_tokens),
activation=tf.nn.softmax)(prev_layer)
return tf.keras.Model(inputs=input, outputs=output)
def _create_loss(self):
"""Create the loss function."""
def loss_fn(outputs, labels, weights):
prob = tf.reduce_sum(outputs[0] * labels[0], axis=2)
mask = tf.reduce_sum(labels[0], axis=2)
log_prob = tf.math.log(prob + 1e-20) * mask
loss = -tf.reduce_mean(tf.reduce_sum(log_prob, axis=1))
return loss + sum(self.model.losses)
return loss_fn
def fit_sequences(self,
sequences,
max_checkpoints_to_keep=5,
checkpoint_interval=1000,
restore=False):
"""Train this model on a set of sequences
Parameters
----------
sequences: iterable
the training samples to fit to. Each sample should be
represented as a tuple of the form (input_sequence, output_sequence).
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in training steps.
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
"""
self.fit_generator(self._generate_batches(sequences),
max_checkpoints_to_keep=max_checkpoints_to_keep,
checkpoint_interval=checkpoint_interval,
restore=restore)
def predict_from_sequences(self, sequences, beam_width=5):
"""Given a set of input sequences, predict the output sequences.
The prediction is done using a beam search with length normalization.
Parameters
----------
sequences: iterable
the input sequences to generate a prediction for
beam_width: int
the beam width to use for searching. Set to 1 to use a simple greedy search.
"""
result = []
for batch in self._batch_elements(sequences):
features = self._create_input_array(batch)
indices = np.array([(i, len(batch[i]) if i < len(batch) else 0)
for i in range(self.batch_size)])
probs = self.predict_on_generator([[
(features, indices, np.array(self.get_global_step())), None,
None
]])
for i in range(len(batch)):
result.append(self._beam_search(probs[i], beam_width))
return result
def predict_from_embeddings(self, embeddings, beam_width=5):
"""Given a set of embedding vectors, predict the output sequences.
The prediction is done using a beam search with length normalization.
Parameters
----------
embeddings: iterable
the embedding vectors to generate predictions for
beam_width: int
the beam width to use for searching. Set to 1 to use a simple greedy search.
"""
result = []
for batch in self._batch_elements(embeddings):
embedding_array = np.zeros(
(self.batch_size, self._embedding_dimension), dtype=np.float32)
for i, e in enumerate(batch):
embedding_array[i] = e
probs = self.decoder(embedding_array, training=False)
probs = probs.numpy()
for i in range(len(batch)):
result.append(self._beam_search(probs[i], beam_width))
return result
def predict_embeddings(self, sequences):
"""Given a set of input sequences, compute the embedding vectors.
Parameters
----------
sequences: iterable
the input sequences to generate an embedding vector for
"""
result = []
for batch in self._batch_elements(sequences):
features = self._create_input_array(batch)
indices = np.array([(i, len(batch[i]) if i < len(batch) else 0)
for i in range(self.batch_size)])
embeddings = self.predict_on_generator(
[[(features, indices, np.array(self.get_global_step())), None,
None]],
outputs=self._embedding)
for i in range(len(batch)):
result.append(embeddings[i])
return np.array(result, dtype=np.float32)
def _beam_search(self, probs, beam_width):
"""Perform a beam search for the most likely output sequence."""
if beam_width == 1:
# Do a simple greedy search.
s = []
for i in range(len(probs)):
token = self._output_tokens[np.argmax(probs[i])]
if token == SeqToSeq.sequence_end:
break
s.append(token)
return s
# Do a beam search with length normalization.
logprobs = np.log(probs)
# Represent each candidate as (normalized prob, raw prob, sequence)
candidates = [(0.0, 0.0, [])]
for i in range(len(logprobs)):
new_candidates = []
for c in candidates:
if len(c[2]) > 0 and c[2][-1] == SeqToSeq.sequence_end:
# This candidate sequence has already been terminated
if len(new_candidates) < beam_width:
heappush(new_candidates, c)
else:
heappushpop(new_candidates, c)
else:
# Consider all possible tokens we could add to this candidate sequence.
for j, logprob in enumerate(logprobs[i]):
new_logprob = logprob + c[1]
newc = (new_logprob / (len(c[2]) + 1), new_logprob,
c[2] + [self._output_tokens[j]])
if len(new_candidates) < beam_width:
heappush(new_candidates, newc)
else:
heappushpop(new_candidates, newc)
candidates = new_candidates
return sorted(candidates)[-1][2][:-1]
def _create_input_array(self, sequences):
"""Create the array describing the input sequences for a batch."""
lengths = [len(x) for x in sequences]
if self._reverse_input:
sequences = [reversed(s) for s in sequences]
features = np.zeros(
(self.batch_size, max(lengths) + 1, len(self._input_tokens)),
dtype=np.float32)
for i, sequence in enumerate(sequences):
for j, token in enumerate(sequence):
features[i, j, self._input_dict[token]] = 1
features[np.arange(len(sequences)), lengths,
self._input_dict[SeqToSeq.sequence_end]] = 1
return features
def _create_output_array(self, sequences):
"""Create the array describing the target sequences for a batch."""
lengths = [len(x) for x in sequences]
labels = np.zeros(
(self.batch_size, self._max_output_length, len(
self._output_tokens)),
dtype=np.float32)
end_marker_index = self._output_dict[SeqToSeq.sequence_end]
for i, sequence in enumerate(sequences):
for j, token in enumerate(sequence):
labels[i, j, self._output_dict[token]] = 1
for j in range(lengths[i], self._max_output_length):
labels[i, j, end_marker_index] = 1
return labels
def _batch_elements(self, elements):
"""Combine elements into batches."""
batch = []
for s in elements:
batch.append(s)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def _generate_batches(self, sequences):
"""Create feed_dicts for fitting."""
for batch in self._batch_elements(sequences):
inputs = []
outputs = []
for input, output in batch:
inputs.append(input)
outputs.append(output)
for i in range(len(inputs), self.batch_size):
inputs.append([])
outputs.append([])
features = self._create_input_array(inputs)
labels = self._create_output_array(outputs)
gather_indices = np.array([(i, len(x)) for i, x in enumerate(inputs)
])
yield ([features, gather_indices,
np.array(self.get_global_step())], [labels], [])
class AspuruGuzikAutoEncoder(SeqToSeq):
"""
This is an implementation of Automatic Chemical Design Using a Continuous Representation of Molecules
http://pubs.acs.org/doi/full/10.1021/acscentsci.7b00572
Abstract
--------
We report a method to convert discrete representations of molecules to and
from a multidimensional continuous representation. This model allows us to
generate new molecules for efficient exploration and optimization through
open-ended spaces of chemical compounds. A deep neural network was trained on
hundreds of thousands of existing chemical structures to construct three
coupled functions: an encoder, a decoder, and a predictor. The encoder
converts the discrete representation of a molecule into a real-valued
continuous vector, and the decoder converts these continuous vectors back to
discrete molecular representations. The predictor estimates chemical
properties from the latent continuous vector representation of the molecule.
Continuous representations of molecules allow us to automatically generate
novel chemical structures by performing simple operations in the latent space,
such as decoding random vectors, perturbing known chemical structures, or
interpolating between molecules. Continuous representations also allow the use
of powerful gradient-based optimization to efficiently guide the search for
optimized functional compounds. We demonstrate our method in the domain of
drug-like molecules and also in a set of molecules with fewer that nine heavy
atoms.
Notes
-------
This is currently an imperfect reproduction of the paper. One difference is
that teacher forcing in the decoder is not implemented. The paper also
discusses co-learning molecular properties at the same time as training the
encoder/decoder. This is not done here. The hyperparameters chosen are from
ZINC dataset.
This network also currently suffers from exploding gradients. Care has to be taken when training.
NOTE(LESWING): Will need to play around with annealing schedule to not have exploding gradients
TODO(LESWING): Teacher Forcing
TODO(LESWING): Sigmoid variational loss annealing schedule
The output GRU layer had one
additional input, corresponding to the character sampled from the softmax output of the
previous time step and was trained using teacher forcing. 48 This increased the accuracy
of generated SMILES strings, which resulted in higher fractions of valid SMILES strings
for latent points outside the training data, but also made training more difficult, since the
decoder showed a tendency to ignore the (variational) encoding and rely solely on the input
sequence. The variational loss was annealed according to sigmoid schedule after 29 epochs,
running for a total 120 epochs
I also added a BatchNorm before the mean and std embedding layers. This has empiracally
made training more stable, and is discussed in Ladder Variational Autoencoders.
https://arxiv.org/pdf/1602.02282.pdf
Maybe if Teacher Forcing and Sigmoid variational loss annealing schedule are used the
BatchNorm will no longer be neccessary.
"""
def __init__(self,
num_tokens,
max_output_length,
embedding_dimension=196,
filter_sizes=[9, 9, 10],
kernel_sizes=[9, 9, 11],
decoder_dimension=488,
**kwargs):
"""
Parameters
----------
filter_sizes: list of int
Number of filters for each 1D convolution in the encoder
kernel_sizes: list of int
Kernel size for each 1D convolution in the encoder
decoder_dimension: int
Number of channels for the GRU Decoder
"""
if len(filter_sizes) != len(kernel_sizes):
raise ValueError("Must have same number of layers and kernels")
self._filter_sizes = filter_sizes
self._kernel_sizes = kernel_sizes
self._decoder_dimension = decoder_dimension
super(AspuruGuzikAutoEncoder,
self).__init__(input_tokens=num_tokens,
output_tokens=num_tokens,
max_output_length=max_output_length,
embedding_dimension=embedding_dimension,
variational=True,
reverse_input=False,
**kwargs)
def _create_features(self):
return Input(shape=(self._max_output_length, len(self._input_tokens)))
def _create_encoder(self, n_layers, dropout):
"""Create the encoder as a tf.keras.Model."""
input = self._create_features()
gather_indices = Input(shape=(2,), dtype=tf.int32)
prev_layer = input
for i in range(len(self._filter_sizes)):
filter_size = self._filter_sizes[i]
kernel_size = self._kernel_sizes[i]
if dropout > 0.0:
prev_layer = Dropout(rate=dropout)(prev_layer)
prev_layer = Conv1D(filters=filter_size,
kernel_size=kernel_size,
activation=tf.nn.relu)(prev_layer)
prev_layer = Flatten()(prev_layer)
prev_layer = Dense(self._decoder_dimension,
activation=tf.nn.relu)(prev_layer)
prev_layer = BatchNormalization()(prev_layer)
return tf.keras.Model(inputs=[input, gather_indices],
outputs=prev_layer)
def _create_decoder(self, n_layers, dropout):
"""Create the decoder as a tf.keras.Model."""
input = Input(shape=(self._embedding_dimension,))
prev_layer = Dense(self._embedding_dimension,
activation=tf.nn.relu)(input)
prev_layer = layers.Stack()(self._max_output_length * [prev_layer])
for i in range(3):
if dropout > 0.0:
prev_layer = Dropout(dropout)(prev_layer)
prev_layer = GRU(self._decoder_dimension,
return_sequences=True)(prev_layer)
output = Dense(len(self._output_tokens),
activation=tf.nn.softmax)(prev_layer)
return tf.keras.Model(inputs=input, outputs=output)
def _create_input_array(self, sequences):
return self._create_output_array(sequences)
<file_sep># flake8: noqa
from deepchem.feat.sequence_featurizers.position_frequency_matrix_featurizer import PFMFeaturizer<file_sep>"""
Test Coordinate Boxes.
"""
import numpy as np
import unittest
from deepchem.utils import coordinate_box_utils as box_utils
class TestCoordinateBoxUtils(unittest.TestCase):
def test_make_box(self):
x_range = (-10, 10)
y_range = (-20, 20)
z_range = (-30, 30)
box = box_utils.CoordinateBox(x_range, y_range, z_range)
assert box.x_range == x_range
assert box.y_range == y_range
assert box.z_range == z_range
def test_union(self):
x_range = (-10, 10)
y_range = (-20, 20)
z_range = (-30, 30)
box = box_utils.CoordinateBox(x_range, y_range, z_range)
x_range = (-1, 1)
y_range = (-2, 2)
z_range = (-3, 3)
interior_box = box_utils.CoordinateBox(x_range, y_range, z_range)
merged_box = box_utils.union(box, interior_box)
assert merged_box.x_range == box.x_range
assert merged_box.y_range == box.y_range
assert merged_box.z_range == box.z_range
x_range = (-10, 10)
y_range = (-20, 20)
z_range = (-30, 30)
box1 = box_utils.CoordinateBox(x_range, y_range, z_range)
x_range = (-11, 9)
y_range = (-20, 20)
z_range = (-30, 30)
box2 = box_utils.CoordinateBox(x_range, y_range, z_range)
merged_box = box_utils.union(box1, box2)
assert merged_box.x_range == (-11, 10)
assert merged_box.y_range == (-20, 20)
assert merged_box.z_range == (-30, 30)
def test_get_face_boxes(self):
coords = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
boxes = box_utils.get_face_boxes(coords)
# There are 4 faces to the shape created by coords
assert len(boxes) == 4
def test_point_containment(self):
box = box_utils.CoordinateBox((0, 1), (0, 1), (0, 1))
assert (0, 0, 0) in box
assert (-0.1, -0.1, -0.1) not in box
assert (0.5, 0.5, 0.5) in box
assert (5, 5, 5) not in box
def test_volume(self):
box = box_utils.CoordinateBox((0, 1), (0, 1), (0, 1))
assert box.volume() == 1.0
box = box_utils.CoordinateBox((0, 0), (0, 1), (0, 1))
assert box.volume() == 0
def test_box_containment(self):
box = box_utils.CoordinateBox((0, 1), (0, 1), (0, 1))
int_box = box_utils.CoordinateBox((0, 1), (0, 1), (0, 1))
assert box.contains(int_box)
ext_box = box_utils.CoordinateBox((0, 2), (0, 1), (0, 1))
assert not box.contains(ext_box)
def test_box_hash(self):
box1 = box_utils.CoordinateBox((0, 1), (0, 1), (0, 1))
box2 = box_utils.CoordinateBox((0, 2), (0, 2), (0, 2))
mapping = {}
mapping[box1] = 1
mapping[box2] = 2
assert len(mapping) == 2
def test_intersect_interval(self):
int1 = (0, 1)
int2 = (0.5, 2)
inter = box_utils.intersect_interval(int1, int2)
assert inter == (0.5, 1)
int1 = (0, 1)
int2 = (1.5, 2)
inter = box_utils.intersect_interval(int1, int2)
assert inter == (0, 0)
int1 = (1.5, 2)
int2 = (0, 1)
inter = box_utils.intersect_interval(int1, int2)
assert inter == (0, 0)
def test_intersection(self):
x_range = (-10, 10)
y_range = (-20, 20)
z_range = (-30, 30)
box = box_utils.CoordinateBox(x_range, y_range, z_range)
x_range = (-1, 1)
y_range = (-2, 2)
z_range = (-3, 3)
interior_box = box_utils.CoordinateBox(x_range, y_range, z_range)
int_box = box_utils.intersection(box, interior_box)
assert int_box == interior_box
x_range = (-10, 10)
y_range = (-20, 20)
z_range = (-30, 30)
box1 = box_utils.CoordinateBox(x_range, y_range, z_range)
x_range = (-11, 9)
y_range = (-20, 20)
z_range = (-30, 30)
box2 = box_utils.CoordinateBox(x_range, y_range, z_range)
int_box = box_utils.intersection(box1, box2)
assert int_box.x_range == (-10, 9)
assert int_box.y_range == (-20, 20)
assert int_box.z_range == (-30, 30)
def test_merge_overlapping_boxes(self):
x_range = (-1, 1)
y_range = (-2, 2)
z_range = (-3, 3)
interior_box = box_utils.CoordinateBox(x_range, y_range, z_range)
x_range = (-10, 10)
y_range = (-20, 20)
z_range = (-30, 30)
box = box_utils.CoordinateBox(x_range, y_range, z_range)
boxes = [interior_box, box]
merged_boxes = box_utils.merge_overlapping_boxes(boxes)
assert len(merged_boxes) == 1
x_range = (-10, 10)
y_range = (-20, 20)
z_range = (-30, 30)
box1 = box_utils.CoordinateBox(x_range, y_range, z_range)
x_range = (-11, 9)
y_range = (-20, 20)
z_range = (-30, 30)
box2 = box_utils.CoordinateBox(x_range, y_range, z_range)
boxes = [box1, box2]
merged_boxes = box_utils.merge_overlapping_boxes(boxes)
assert len(merged_boxes) == 1
box1 = box_utils.CoordinateBox((0, 1), (0, 1), (0, 1))
box2 = box_utils.CoordinateBox((2, 3), (2, 3), (2, 3))
boxes = [box1, box2]
merged_boxes = box_utils.merge_overlapping_boxes(boxes)
assert len(merged_boxes) == 2
box1 = box_utils.CoordinateBox((1, 2), (1, 2), (1, 2))
box2 = box_utils.CoordinateBox((1, 3), (1, 3), (1, 3))
boxes = [box1, box2]
merged_boxes = box_utils.merge_overlapping_boxes(boxes)
assert len(merged_boxes) == 1
assert merged_boxes[0] == box_utils.CoordinateBox((1, 3), (1, 3),
(1, 3))
box1 = box_utils.CoordinateBox((1, 3), (1, 3), (1, 3))
box2 = box_utils.CoordinateBox((1, 2), (1, 2), (1, 2))
boxes = [box1, box2]
merged_boxes = box_utils.merge_overlapping_boxes(boxes)
assert len(merged_boxes) == 1
assert merged_boxes[0] == box_utils.CoordinateBox((1, 3), (1, 3),
(1, 3))
box1 = box_utils.CoordinateBox((1, 3), (1, 3), (1, 3))
box2 = box_utils.CoordinateBox((1, 2), (1, 2), (1, 2))
box3 = box_utils.CoordinateBox((1, 2.5), (1, 2.5), (1, 2.5))
boxes = [box1, box2, box3]
merged_boxes = box_utils.merge_overlapping_boxes(boxes)
assert len(merged_boxes) == 1
assert merged_boxes[0] == box_utils.CoordinateBox((1, 3), (1, 3),
(1, 3))
<file_sep>"""
Script that trains graph-conv models on Tox21 dataset.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_tox21
from deepchem.models.graph_models import GraphConvModel
model_dir = "/tmp/graph_conv"
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = load_tox21(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = tox21_datasets
print(train_dataset.data_dir)
print(valid_dataset.data_dir)
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
# Batch size of models
batch_size = 50
model = GraphConvModel(
len(tox21_tasks), batch_size=batch_size, mode='classification')
model.fit(train_dataset, nb_epoch=10)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import numpy as np
from deepchem.utils.typing import PymatgenStructure
from deepchem.feat import MaterialStructureFeaturizer
from deepchem.utils.data_utils import pad_array
from typing import Any
class SineCoulombMatrix(MaterialStructureFeaturizer):
"""
Calculate sine Coulomb matrix for crystals.
A variant of Coulomb matrix for periodic crystals.
The sine Coulomb matrix is identical to the Coulomb matrix, except
that the inverse distance function is replaced by the inverse of
sin**2 of the vector between sites which are periodic in the
dimensions of the crystal lattice.
Features are flattened into a vector of matrix eigenvalues by default
for ML-readiness. To ensure that all feature vectors are equal
length, the maximum number of atoms (eigenvalues) in the input
dataset must be specified.
This featurizer requires the optional dependencies pymatgen and
matminer. It may be useful when crystal structures with 3D coordinates
are available.
See [1]_ for more details.
References
----------
.. [1] Faber et al. "Crystal Structure Representations for Machine
Learning Models of Formation Energies", Inter. J. Quantum Chem.
115, 16, 2015. https://arxiv.org/abs/1503.07406
Examples
--------
>>> import deepchem as dc
>>> import pymatgen as mg
>>> lattice = mg.core.Lattice.cubic(4.2)
>>> structure = mg.core.Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
>>> featurizer = dc.feat.SineCoulombMatrix(max_atoms=2)
>>> features = featurizer.featurize([structure])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape # (max_atoms,)
(2,)
Note
----
This class requires matminer and Pymatgen to be installed.
"""
def __init__(self, max_atoms: int = 100, flatten: bool = True):
"""
Parameters
----------
max_atoms: int (default 100)
Maximum number of atoms for any crystal in the dataset. Used to
pad the Coulomb matrix.
flatten: bool (default True)
Return flattened vector of matrix eigenvalues.
"""
self.max_atoms = max_atoms
self.flatten = flatten
self.scm: Any = None
def _featurize(self, datapoint: PymatgenStructure, **kwargs) -> np.ndarray:
"""
Calculate sine Coulomb matrix from pymatgen structure.
Parameters
----------
datapoint: pymatgen.core.Structure
A periodic crystal composed of a lattice and a sequence of atomic
sites with 3D coordinates and elements.
Returns
-------
features: np.ndarray
2D sine Coulomb matrix with shape (max_atoms, max_atoms),
or 1D matrix eigenvalues with shape (max_atoms,).
"""
if 'struct' in kwargs and datapoint is None:
datapoint = kwargs.get("struct")
raise DeprecationWarning(
'Struct is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.scm is None:
try:
from matminer.featurizers.structure import SineCoulombMatrix as SCM
self.scm = SCM(flatten=False)
except ModuleNotFoundError:
raise ImportError(
"This class requires matminer to be installed.")
# Get full N x N SCM
sine_mat = self.scm.featurize(datapoint)
if self.flatten:
eigs, _ = np.linalg.eig(sine_mat)
zeros = np.zeros(self.max_atoms)
zeros[:len(eigs[0])] = eigs[0]
features = zeros
else:
features = pad_array(sine_mat, self.max_atoms)
features = np.asarray(features)
return features
<file_sep>import pytorch_lightning as pl
import torch
class DCLightningDatasetBatch:
def __init__(self, batch):
X = [batch[0]]
y = [batch[1]]
w = [batch[2]]
self.batch_list = [X, y, w]
def collate_dataset_wrapper(batch):
return DCLightningDatasetBatch(batch)
class DCLightningDatasetModule(pl.LightningDataModule):
"""DeepChem Lightning Dataset Module to be used with the DCLightningModule and a Lightning trainer.
This module wraps over the the deepchem pytorch dataset and dataloader providing a generic interface to run training.
https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html
Notes
-----
This class requires PyTorch to be installed.
"""
def __init__(self, dataset, batch_size, collate_fn):
"""Create a new DCLightningDatasetModule.
Parameters
----------
dataset: A deepchem dataset.
batch_size: Batch size for the dataloader.
collate_fn: Method to collate instances across batch.
"""
super().__init__()
self._batch_size = batch_size
self._dataset = dataset.make_pytorch_dataset(
batch_size=self._batch_size)
self.collate_fn = collate_fn
def setup(self, stage):
self.train_dataset = self._dataset
def train_dataloader(self):
"""Returns the train dataloader from train dataset.
Returns
-------
dataloader: train dataloader to be used with DCLightningModule.
"""
dataloader = torch.utils.data.DataLoader(
self.train_dataset,
batch_size=None,
collate_fn=self.collate_fn,
shuffle=False,
)
return dataloader
<file_sep>import os
import unittest
import numpy as np
import pandas as pd
import deepchem as dc
from deepchem.feat import MolGraphConvFeaturizer
from deepchem.feat import PagtnMolGraphFeaturizer
class TestMolGraphConvFeaturizer(unittest.TestCase):
def test_default_featurizer(self):
smiles = ["C1=CC=CN=C1", "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"]
featurizer = MolGraphConvFeaturizer()
graph_feat = featurizer.featurize(smiles)
assert len(graph_feat) == 2
# assert "C1=CC=CN=C1"
assert graph_feat[0].num_nodes == 6
assert graph_feat[0].num_node_features == 30
assert graph_feat[0].num_edges == 12
# assert "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"
assert graph_feat[1].num_nodes == 22
assert graph_feat[1].num_node_features == 30
assert graph_feat[1].num_edges == 44
def test_featurizer_with_use_edge(self):
smiles = ["C1=CC=CN=C1", "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"]
featurizer = MolGraphConvFeaturizer(use_edges=True)
graph_feat = featurizer.featurize(smiles)
assert len(graph_feat) == 2
# assert "C1=CC=CN=C1"
assert graph_feat[0].num_nodes == 6
assert graph_feat[0].num_node_features == 30
assert graph_feat[0].num_edges == 12
assert graph_feat[0].num_edge_features == 11
# assert "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"
assert graph_feat[1].num_nodes == 22
assert graph_feat[1].num_node_features == 30
assert graph_feat[1].num_edges == 44
assert graph_feat[1].num_edge_features == 11
def test_featurizer_with_use_chirality(self):
smiles = ["C1=CC=CN=C1", "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"]
featurizer = MolGraphConvFeaturizer(use_chirality=True)
graph_feat = featurizer.featurize(smiles)
assert len(graph_feat) == 2
# assert "C1=CC=CN=C1"
assert graph_feat[0].num_nodes == 6
assert graph_feat[0].num_node_features == 32
assert graph_feat[0].num_edges == 12
# assert "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"
assert graph_feat[1].num_nodes == 22
assert graph_feat[1].num_node_features == 32
assert graph_feat[1].num_edges == 44
def test_featurizer_with_use_partial_charge(self):
smiles = ["C1=CC=CN=C1", "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"]
featurizer = MolGraphConvFeaturizer(use_partial_charge=True)
graph_feat = featurizer.featurize(smiles)
assert len(graph_feat) == 2
# assert "C1=CC=CN=C1"
assert graph_feat[0].num_nodes == 6
assert graph_feat[0].num_node_features == 31
assert graph_feat[0].num_edges == 12
# assert "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"
assert graph_feat[1].num_nodes == 22
assert graph_feat[1].num_node_features == 31
assert graph_feat[1].num_edges == 44
def test_featurizer_with_pos_kwargs(self):
# Test featurizer with atom 3-D coordinates as kwargs
smiles = ["C1=CC=CN=C1", "CC"]
pos_x = [np.random.randn(6), np.random.randn(2)]
pos_y, pos_z = pos_x, pos_x
featurizer = MolGraphConvFeaturizer()
graph_feat = featurizer.featurize(smiles,
pos_x=pos_x,
pos_y=pos_y,
pos_z=pos_z)
assert len(graph_feat) == 2
assert graph_feat[0].num_nodes == 6
assert graph_feat[0].node_pos_features.shape == (6, 3)
assert graph_feat[1].num_nodes == 2
assert graph_feat[1].node_pos_features.shape == (2, 3)
def test_featurizer_freesolv(self):
"""
Test freesolv sample dataset on MolGraphConvFeaturizer.
It contains 5 molecule smiles, out of which 3 can be featurized and 2 cannot be featurized.
"""
# load sample dataset
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'data/freesolv_imperfect_sample.csv')
loader = dc.data.CSVLoader(tasks=["y"],
feature_field="smiles",
featurizer=MolGraphConvFeaturizer())
dataset = loader.create_dataset(input_file)
assert len(pd.read_csv(input_file)) == 5
assert len(dataset) == 3
assert isinstance(dataset.X[0], dc.feat.GraphData)
class TestPagtnMolGraphConvFeaturizer(unittest.TestCase):
def test_default_featurizer(self):
smiles = ["C1=CC=CN=C1", "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"]
featurizer = PagtnMolGraphFeaturizer(max_length=5)
graph_feat = featurizer.featurize(smiles)
assert len(graph_feat) == 2
# assert "C1=CC=CN=C1"
assert graph_feat[0].num_nodes == 6
assert graph_feat[0].num_node_features == 94
assert graph_feat[0].num_edges == 36
assert graph_feat[0].num_edge_features == 42
# assert "O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C"
assert graph_feat[1].num_nodes == 22
assert graph_feat[1].num_node_features == 94
assert graph_feat[1].num_edges == 484
assert graph_feat[0].num_edge_features == 42
<file_sep>"""
Featurizes proposed binding pockets.
"""
import numpy as np
import logging
from typing import Dict, List
from deepchem.feat import Featurizer
from deepchem.utils.coordinate_box_utils import CoordinateBox
from deepchem.utils.rdkit_utils import load_molecule
logger = logging.getLogger(__name__)
def boxes_to_atoms(
coords: np.ndarray,
boxes: List[CoordinateBox]) -> Dict[CoordinateBox, List[int]]:
"""Maps each box to a list of atoms in that box.
Given the coordinates of a macromolecule, and a collection of boxes,
returns a dictionary which maps boxes to the atom indices of the
atoms in them.
Parameters
----------
coords: np.ndarray
A numpy array of shape `(N, 3)`
boxes: list
List of `CoordinateBox` objects.
Returns
-------
Dict[CoordinateBox, List[int]]
A dictionary mapping `CoordinateBox` objects to lists of atom indices.
"""
mapping = {}
for box_ind, box in enumerate(boxes):
box_atoms = []
for atom_ind in range(len(coords)):
atom = coords[atom_ind]
if atom in box:
box_atoms.append(atom_ind)
mapping[box] = box_atoms
return mapping
class BindingPocketFeaturizer(Featurizer):
"""Featurizes binding pockets with information about chemical
environments.
In many applications, it's desirable to look at binding pockets on
macromolecules which may be good targets for potential ligands or
other molecules to interact with. A `BindingPocketFeaturizer`
expects to be given a macromolecule, and a list of pockets to
featurize on that macromolecule. These pockets should be of the form
produced by a `dc.dock.BindingPocketFinder`, that is as a list of
`dc.utils.CoordinateBox` objects.
The base featurization in this class's featurization is currently
very simple and counts the number of residues of each type present
in the pocket. It's likely that you'll want to overwrite this
implementation for more sophisticated downstream usecases. Note that
this class's implementation will only work for proteins and not for
other macromolecules
Note
----
This class requires mdtraj to be installed.
"""
residues = [
"ALA", "ARG", "ASN", "ASP", "CYS", "GLN", "GLU", "GLY", "HIS", "ILE",
"LEU", "LYS", "MET", "PHE", "PRO", "PYL", "SER", "SEC", "THR", "TRP",
"TYR", "VAL", "ASX", "GLX"
]
n_features = len(residues)
# FIXME: Signature of "featurize" incompatible with supertype "Featurizer"
def featurize( # type: ignore[override]
self, protein_file: str,
pockets: List[CoordinateBox]) -> np.ndarray:
"""
Calculate atomic coodinates.
Parameters
----------
protein_file: str
Location of PDB file. Will be loaded by MDTraj
pockets: List[CoordinateBox]
List of `dc.utils.CoordinateBox` objects.
Returns
-------
np.ndarray
A numpy array of shale `(len(pockets), n_residues)`
"""
try:
import mdtraj
except ModuleNotFoundError:
raise ImportError("This class requires mdtraj to be installed.")
protein_coords = load_molecule(protein_file,
add_hydrogens=False,
calc_charges=False)[0]
mapping = boxes_to_atoms(protein_coords, pockets)
protein = mdtraj.load(protein_file)
n_pockets = len(pockets)
n_residues = len(BindingPocketFeaturizer.residues)
res_map = dict(zip(BindingPocketFeaturizer.residues, range(n_residues)))
all_features = np.zeros((n_pockets, n_residues))
for pocket_num, pocket in enumerate(pockets):
pocket_atoms = mapping[pocket]
for ind, atom in enumerate(pocket_atoms):
atom_name = str(protein.top.atom(atom))
# atom_name is of format RESX-ATOMTYPE
# where X is a 1 to 4 digit number
residue = atom_name[:3]
if residue not in res_map:
logger.info("Warning: Non-standard residue in PDB file")
continue
all_features[pocket_num, res_map[residue]] += 1
return all_features
<file_sep>"""
Script that trains Tensorflow Multitask models on FACTORS datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import tempfile
import shutil
import numpy as np
import deepchem as dc
from FACTORS_datasets import load_factors
###Load data###
shard_size = 2000
num_trials = 2
print("About to load FACTORS data.")
FACTORS_tasks, datasets, transformers = load_factors(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
all_results = []
for trial in range(num_trials):
###Create model###
n_layers = 3
nb_epoch = 125
model = dc.models.TensorflowMultitaskRegressor(
len(FACTORS_tasks),
train_dataset.get_data_shape()[0],
layer_sizes=[1000] * n_layers,
dropouts=[.25] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
learning_rate=.0003,
penalty=.0001,
penalty_type="l2",
optimizer="adam",
batch_size=100,
logdir="FACTORS_tf_model")
#Use R2 classification metric
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
print("Training model")
model.fit(train_dataset, nb_epoch=nb_epoch)
print("Evaluating models")
train_score, train_task_scores = model.evaluate(
train_dataset, [metric], transformers, per_task_metrics=True)
valid_score, valid_task_scores = model.evaluate(
valid_dataset, [metric], transformers, per_task_metrics=True)
test_score, test_task_scores = model.evaluate(
test_dataset, [metric], transformers, per_task_metrics=True)
all_results.append((train_score, train_task_scores, valid_score,
valid_task_scores, test_score, test_task_scores))
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
print("####################################################################")
for trial in range(num_trials):
(train_score, train_task_scores, valid_score, valid_task_scores, test_score,
test_task_scores) = all_results[trial]
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
<file_sep>"""
Common code for loading MoleculeNet datasets.
"""
import os
import logging
import deepchem as dc
from deepchem.data import Dataset, DiskDataset
from typing import List, Optional, Tuple, Type, Union
logger = logging.getLogger(__name__)
class TransformerGenerator(object):
"""Create Transformers for Datasets.
When loading molnet datasets, you cannot directly pass in Transformers
to use because many Transformers require the Dataset they will be applied to
as a constructor argument. Instead you pass in TransformerGenerator objects
which can create the Transformers once the Dataset is loaded.
"""
def __init__(self, transformer_class: Type[dc.trans.Transformer], **kwargs):
"""Construct an object for creating Transformers.
Parameters
----------
transformer_class: Type[Transformer]
the class of Transformer to create
kwargs:
any additional arguments are passed to the Transformer's constructor
"""
self.transformer_class = transformer_class
self.kwargs = kwargs
def create_transformer(self, dataset: Dataset) -> dc.trans.Transformer:
"""Construct a Transformer for a Dataset."""
return self.transformer_class(dataset=dataset, **self.kwargs)
def get_directory_name(self) -> str:
"""Get a name for directories on disk describing this Transformer."""
name = self.transformer_class.__name__
for key, value in self.kwargs.items():
if isinstance(value, list):
continue
name += '_' + key + '_' + str(value)
return name
featurizers = {
'ecfp': dc.feat.CircularFingerprint(size=1024),
'graphconv': dc.feat.ConvMolFeaturizer(),
'raw': dc.feat.RawFeaturizer(),
'onehot': dc.feat.OneHotFeaturizer(),
'smiles2img': dc.feat.SmilesToImage(img_size=80, img_spec='std'),
'weave': dc.feat.WeaveFeaturizer(),
}
splitters = {
'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter(),
'butina': dc.splits.ButinaSplitter(),
'fingerprint': dc.splits.FingerprintSplitter(),
'task': dc.splits.TaskSplitter(),
'stratified': dc.splits.RandomStratifiedSplitter()
}
transformers = {
'balancing':
TransformerGenerator(dc.trans.BalancingTransformer),
'normalization':
TransformerGenerator(dc.trans.NormalizationTransformer,
transform_y=True),
'minmax':
TransformerGenerator(dc.trans.MinMaxTransformer, transform_y=True),
'clipping':
TransformerGenerator(dc.trans.ClippingTransformer, transform_y=True),
'log':
TransformerGenerator(dc.trans.LogTransformer, transform_y=True)
}
class _MolnetLoader(object):
"""The class provides common functionality used by many molnet loader functions.
It is an abstract class. Subclasses implement loading of particular datasets.
"""
def __init__(self, featurizer: Union[dc.feat.Featurizer, str],
splitter: Union[dc.splits.Splitter, str, None],
transformer_generators: List[Union[TransformerGenerator,
str]], tasks: List[str],
data_dir: Optional[str], save_dir: Optional[str], **kwargs):
"""Construct an object for loading a dataset.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformer_generators: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
tasks: List[str]
the names of the tasks in the dataset
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
if 'split' in kwargs:
splitter = kwargs['split']
logger.warning("'split' is deprecated. Use 'splitter' instead.")
if isinstance(featurizer, str):
featurizer = featurizers[featurizer.lower()]
if isinstance(splitter, str):
splitter = splitters[splitter.lower()]
if data_dir is None:
data_dir = dc.utils.data_utils.get_data_dir()
if save_dir is None:
save_dir = dc.utils.data_utils.get_data_dir()
self.featurizer = featurizer
self.splitter = splitter
self.transformers = [
transformers[t.lower()] if isinstance(t, str) else t
for t in transformer_generators
]
self.tasks = list(tasks)
self.data_dir = data_dir
self.save_dir = save_dir
self.args = kwargs
def load_dataset(
self, name: str, reload: bool
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load the dataset.
Parameters
----------
name: str
the name of the dataset, used to identify the directory on disk
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
"""
# Build the path to the dataset on disk.
featurizer_name = str(self.featurizer)
splitter_name = 'None' if self.splitter is None else str(self.splitter)
save_folder = os.path.join(self.save_dir, name + "-featurized",
featurizer_name, splitter_name)
if len(self.transformers) > 0:
transformer_name = '_'.join(
t.get_directory_name() for t in self.transformers)
save_folder = os.path.join(save_folder, transformer_name)
# Try to reload cached datasets.
if reload:
if self.splitter is None:
if os.path.exists(save_folder):
transformers = dc.utils.data_utils.load_transformers(
save_folder)
return self.tasks, (DiskDataset(save_folder),), transformers
else:
loaded, all_dataset, transformers = dc.utils.data_utils.load_dataset_from_disk(
save_folder)
if all_dataset is not None:
return self.tasks, all_dataset, transformers
# Create the dataset
logger.info("About to featurize %s dataset." % name)
dataset = self.create_dataset()
# Split and transform the dataset.
if self.splitter is None:
transformer_dataset: Dataset = dataset
else:
logger.info("About to split dataset with {} splitter.".format(
self.splitter.__class__.__name__))
train, valid, test = self.splitter.train_valid_test_split(dataset)
transformer_dataset = train
transformers = [
t.create_transformer(transformer_dataset) for t in self.transformers
]
logger.info("About to transform data.")
if self.splitter is None:
for transformer in transformers:
dataset = transformer.transform(dataset)
if reload and isinstance(dataset, DiskDataset):
dataset.move(save_folder)
dc.utils.data_utils.save_transformers(save_folder, transformers)
return self.tasks, (dataset,), transformers
for transformer in transformers:
train = transformer.transform(train)
valid = transformer.transform(valid)
test = transformer.transform(test)
if reload and isinstance(train, DiskDataset) and isinstance(
valid, DiskDataset) and isinstance(test, DiskDataset):
dc.utils.data_utils.save_dataset_to_disk(save_folder, train, valid,
test, transformers)
return self.tasks, (train, valid, test), transformers
def create_dataset(self) -> Dataset:
"""Subclasses must implement this to load the dataset."""
raise NotImplementedError()
<file_sep>Tutorials
=========
If you're new to DeepChem, you probably want to know the basics. What is DeepChem?
Why should you care about using it? The short answer is that DeepChem is a scientific machine learning library.
(The "Chem" indicates the historical fact that DeepChem initially focused on chemical applications,
but we aim to support all types of scientific applications more broadly).
Why would you want to use DeepChem instead of another machine learning
library? Simply put, DeepChem maintains an extensive collection of utilities
to enable scientific deep learning including classes for loading scientific
datasets, processing them, transforming them, splitting them up, and learning
from them. Behind the scenes DeepChem uses a variety of other machine
learning frameworks such as `scikit-learn`_, `TensorFlow`_, and `XGBoost`_. We are
also experimenting with adding additional models implemented in `PyTorch`_
and `JAX`_. Our focus is to facilitate scientific experimentation using
whatever tools are available at hand.
In the rest of this tutorials, we'll provide a rapid fire overview of DeepChem's API.
DeepChem is a big library so we won't cover everything, but we should give you enough to get started.
.. contents:: Contents
:local:
Data Handling
-------------
The :code:`dc.data` module contains utilities to handle :code:`Dataset`
objects. These :code:`Dataset` objects are the heart of DeepChem.
A :code:`Dataset` is an abstraction of a dataset in machine learning. That is,
a collection of features, labels, weights, alongside associated identifiers.
Rather than explaining further, we'll just show you.
.. doctest::
>>> import deepchem as dc
>>> import numpy as np
>>> N_samples = 50
>>> n_features = 10
>>> X = np.random.rand(N_samples, n_features)
>>> y = np.random.rand(N_samples)
>>> dataset = dc.data.NumpyDataset(X, y)
>>> dataset.X.shape
(50, 10)
>>> dataset.y.shape
(50,)
Here we've used the :code:`NumpyDataset` class which stores datasets in memory.
This works fine for smaller datasets and is very convenient for experimentation,
but is less convenient for larger datasets. For that we have the :code:`DiskDataset` class.
.. doctest::
>>> dataset = dc.data.DiskDataset.from_numpy(X, y)
>>> dataset.X.shape
(50, 10)
>>> dataset.y.shape
(50,)
In this example we haven't specified a data directory, so this :code:`DiskDataset` is written
to a temporary folder. Note that :code:`dataset.X` and :code:`dataset.y` load data
from disk underneath the hood! So this can get very expensive for larger datasets.
Feature Engineering
-------------------
"Featurizer" is a chunk of code which transforms raw input data into a processed
form suitable for machine learning. The :code:`dc.feat` module contains an extensive collection
of featurizers for molecules, molecular complexes and inorganic crystals.
We'll show you the example about the usage of featurizers.
.. doctest::
>>> smiles = [
... 'O=Cc1ccc(O)c(OC)c1',
... 'CN1CCC[C@H]1c2cccnc2',
... 'C1CCCCC1',
... 'c1ccccc1',
... 'CC(=O)O',
... ]
>>> properties = [0.4, -1.5, 3.2, -0.2, 1.7]
>>> featurizer = dc.feat.CircularFingerprint(size=1024)
>>> ecfp = featurizer.featurize(smiles)
>>> ecfp.shape
(5, 1024)
>>> dataset = dc.data.NumpyDataset(X=ecfp, y=np.array(properties))
>>> len(dataset)
5
Here, we've used the :code:`CircularFingerprint` and converted SMILES to ECFP.
The ECFP is a fingerprint which is a bit vector made by chemical structure information
and we can use it as the input for various models.
And then, you may have a CSV file which contains SMILES and property like HOMO-LUMO gap.
In such a case, by using :code:`DataLoader`, you can load and featurize your data at once.
.. doctest::
>>> import pandas as pd
>>> # make a dataframe object for creating a CSV file
>>> df = pd.DataFrame(list(zip(smiles, properties)), columns=["SMILES", "property"])
>>> import tempfile
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... # dump the CSV file
... df.to_csv(tmpfile.name)
... # initizalize the featurizer
... featurizer = dc.feat.CircularFingerprint(size=1024)
... # initizalize the dataloader
... loader = dc.data.CSVLoader(["property"], feature_field="SMILES", featurizer=featurizer)
... # load and featurize the data from the CSV file
... dataset = loader.create_dataset(tmpfile.name)
... len(dataset)
5
Data Splitting
--------------
The :code:`dc.splits` module contains a collection of scientifically aware splitters.
Generally, we need to split the original data to training, validation and test data
in order to tune the model and evaluate the model's performance.
We'll show you the example about the usage of splitters.
.. doctest::
>>> splitter = dc.splits.RandomSplitter()
>>> # split 5 datapoints in the ratio of train:valid:test = 3:1:1
>>> train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
... dataset=dataset, frac_train=0.6, frac_valid=0.2, frac_test=0.2
... )
>>> len(train_dataset)
3
>>> len(valid_dataset)
1
>>> len(test_dataset)
1
Here, we've used the :code:`RandomSplitter` and splitted the data randomly
in the ratio of train:valid:test = 3:1:1. But, the random splitting sometimes
overestimates model's performance, especially for small data or imbalance data.
Please be careful for model evaluation. The :code:`dc.splits` provides more methods
and algorithms to evaluate the model's performance appropriately, like cross validation or
splitting using molecular scaffolds.
Model Training and Evaluating
-----------------------------
The :code:`dc.models` contains an extensive collection of models for scientific applications.
Most of all models inherits :code:`dc.models.Model` and we can train them by just calling :code:`fit` method.
You don't need to care about how to use specific framework APIs.
We'll show you the example about the usage of models.
.. doctest::
>>> from sklearn.ensemble import RandomForestRegressor
>>> rf = RandomForestRegressor()
>>> model = dc.models.SklearnModel(model=rf)
>>> # model training
>>> model.fit(train_dataset)
>>> valid_preds = model.predict(valid_dataset)
>>> valid_preds.shape
(1,)
>>> test_preds = model.predict(test_dataset)
>>> test_preds.shape
(1,)
Here, we've used the :code:`SklearnModel` and trained the model.
Even if you want to train a deep learning model which is implemented
by TensorFlow or PyTorch, calling :code:`fit` method is all you need!
And then, if you use :code:`dc.metrics.Metric`, you can evaluate your model
by just calling :code:`evaluate` method.
.. doctest::
>>> # initialze the metric
>>> metric = dc.metrics.Metric(dc.metrics.mae_score)
>>> # evaluate the model
>>> train_score = model.evaluate(train_dataset, [metric])
>>> valid_score = model.evaluate(valid_dataset, [metric])
>>> test_score = model.evaluate(test_dataset, [metric])
More Tutorials
--------------
DeepChem maintains `an extensive collection of addition tutorials`_ that are meant to
be run on `Google Colab`_, an online platform that allows you to execute Jupyter notebooks.
Once you've finished this introductory tutorial, we recommend working through these more involved tutorials.
.. _`scikit-learn`: https://scikit-learn.org/stable/
.. _`TensorFlow`: https://www.tensorflow.org/
.. _`XGBoost`: https://xgboost.readthedocs.io/en/latest/
.. _`PyTorch`: https://pytorch.org/
.. _`JAX`: https://github.com/google/jax
.. _`an extensive collection of addition tutorials`: https://github.com/deepchem/deepchem/tree/master/examples/tutorials
.. _`Google Colab`: https://colab.research.google.com/
<file_sep>"""Helper operations and classes for general model building.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import collections
import pickle
import os
import time
import warnings
import numpy as np
import pandas as pd
import tensorflow as tf
import tempfile
from deepchem.models import Model
from deepchem.metrics import from_one_hot
from deepchem.nn import model_ops
from deepchem.models.tensorflow_models import utils as tf_utils
from deepchem.trans import undo_transforms
from deepchem.utils.save import log
from deepchem.utils.evaluate import Evaluator
from deepchem.data import pad_features
from tensorflow.contrib.layers.python.layers import batch_norm
def softmax(x):
"""Simple numpy softmax implementation
"""
# (n_samples, n_classes)
if len(x.shape) == 2:
row_max = np.max(x, axis=1)
x -= row_max.reshape((x.shape[0], 1))
x = np.exp(x)
row_sum = np.sum(x, axis=1)
x /= row_sum.reshape((x.shape[0], 1))
# (n_samples, n_tasks, n_classes)
elif len(x.shape) == 3:
row_max = np.max(x, axis=2)
x -= row_max.reshape(x.shape[:2] + (1,))
x = np.exp(x)
row_sum = np.sum(x, axis=2)
x /= row_sum.reshape(x.shape[:2] + (1,))
return x
class TensorflowGraph(object):
"""Simple class that holds information needed to run Tensorflow graph."""
def __init__(self, graph, session, name_scopes, output, labels, weights,
loss):
self.graph = graph
self.session = session
self.name_scopes = name_scopes
self.output = output
self.labels = labels
self.weights = weights
self.loss = loss
@staticmethod
def get_placeholder_scope(graph, name_scopes):
"""Gets placeholder scope."""
placeholder_root = "placeholders"
return TensorflowGraph.shared_name_scope(placeholder_root, graph,
name_scopes)
@staticmethod
def shared_name_scope(name, graph, name_scopes):
"""Returns a singleton TensorFlow scope with the given name.
Used to prevent '_1'-appended scopes when sharing scopes with child classes.
Args:
name: String. Name scope for group of operations.
Returns:
tf.name_scope with the provided name.
"""
with graph.as_default():
if name not in name_scopes:
with tf.name_scope(name) as scope:
name_scopes[name] = scope
return tf.name_scope(name_scopes[name])
@staticmethod
def get_feed_dict(named_values):
feed_dict = {}
placeholder_root = "placeholders"
for name, value in named_values.items():
feed_dict['{}/{}:0'.format(placeholder_root, name)] = value
return feed_dict
class TensorflowGraphModel(Model):
"""Parent class for deepchem Tensorflow models.
Classifier:
n_classes
Has the following attributes:
placeholder_root: String placeholder prefix, used to create
placeholder_scope.
Generic base class for defining, training, and evaluating TensorflowGraphs.
Subclasses must implement the following methods:
build
add_output_ops
add_training_cost
Args:
train: If True, model is in training mode.
logdir: Directory for output files.
"""
def __init__(self,
n_tasks,
n_features,
logdir=None,
layer_sizes=[1000],
weight_init_stddevs=[.02],
bias_init_consts=[1.],
penalty=0.0,
penalty_type="l2",
dropouts=[0.5],
learning_rate=.001,
momentum=.9,
optimizer="adam",
batch_size=50,
n_classes=2,
pad_batches=False,
verbose=True,
seed=None,
**kwargs):
"""Constructs the computational graph.
This function constructs the computational graph for the model. It relies
subclassed methods (build/cost) to construct specific graphs.
Parameters
----------
n_tasks: int
Number of tasks
n_features: int
Number of features.
logdir: str
Location to save data
layer_sizes: list
List of layer sizes.
weight_init_stddevs: list
List of standard deviations for weights (sampled from zero-mean
gaussians). One for each layer.
bias_init_consts: list
List of bias initializations. One for each layer.
penalty: float
Amount of penalty (l2 or l1 applied)
penalty_type: str
Either "l2" or "l1"
dropouts: list
List of dropout amounts. One for each layer.
learning_rate: float
Learning rate for model.
momentum: float
Momentum. Only applied if optimizer=="momentum"
optimizer: str
Type of optimizer applied.
batch_size: int
Size of minibatches for training.
n_classes: int
Number of classes if this is for classification.
TODO(rbharath): Move this argument to TensorflowClassifier
verbose: True
Perform logging.
seed: int
If not none, is used as random seed for tensorflow.
"""
# Save hyperparameters
self.n_tasks = n_tasks
self.n_features = n_features
self.layer_sizes = layer_sizes
self.weight_init_stddevs = weight_init_stddevs
self.bias_init_consts = bias_init_consts
self.penalty = penalty
self.penalty_type = penalty_type
self.dropouts = dropouts
self.learning_rate = learning_rate
self.momentum = momentum
self.optimizer = optimizer
self.batch_size = batch_size
self.n_classes = n_classes
self.pad_batches = pad_batches
self.verbose = verbose
self.seed = seed
if logdir is not None:
if not os.path.exists(logdir):
os.makedirs(logdir)
else:
logdir = tempfile.mkdtemp()
self.logdir = logdir
# Guard variable to make sure we don't Restore() this model
# from a disk checkpoint more than once.
self._restored_model = False
# Path to save checkpoint files, which matches the
# replicated supervisor's default path.
self._save_path = os.path.join(logdir, 'model.ckpt')
self.train_graph = self.construct_graph(training=True, seed=self.seed)
self.eval_graph = self.construct_graph(training=False, seed=self.seed)
def save(self):
"""
No-op since tf models save themselves during fit()
"""
pass
def reload(self):
"""
Loads model from disk. Thin wrapper around restore() for consistency.
"""
self.restore()
def get_num_tasks(self):
return self.n_tasks
def construct_graph(self, training, seed):
"""Returns a TensorflowGraph object."""
graph = tf.Graph()
# Lazily created by _get_shared_session().
shared_session = None
# Cache of TensorFlow scopes, to prevent '_1' appended scope names
# when subclass-overridden methods use the same scopes.
name_scopes = {}
# Setup graph
with graph.as_default():
if seed is not None:
tf.set_random_seed(seed)
output = self.build(graph, name_scopes, training)
labels = self.add_label_placeholders(graph, name_scopes)
weights = self.add_example_weight_placeholders(graph, name_scopes)
if training:
loss = self.add_training_cost(graph, name_scopes, output, labels, weights)
else:
loss = None
output = self.add_output_ops(graph, output) # add softmax heads
return TensorflowGraph(
graph=graph,
session=shared_session,
name_scopes=name_scopes,
output=output,
labels=labels,
weights=weights,
loss=loss)
def add_training_cost(self, graph, name_scopes, output, labels, weights):
with graph.as_default():
epsilon = 1e-3 # small float to avoid dividing by zero
weighted_costs = [] # weighted costs for each example
gradient_costs = [] # costs used for gradient calculation
with TensorflowGraph.shared_name_scope('costs', graph, name_scopes):
for task in range(self.n_tasks):
task_str = str(task).zfill(len(str(self.n_tasks)))
with TensorflowGraph.shared_name_scope('cost_{}'.format(task_str),
graph, name_scopes):
with tf.name_scope('weighted'):
weighted_cost = self.cost(output[task], labels[task],
weights[task])
weighted_costs.append(weighted_cost)
with tf.name_scope('gradient'):
# Note that we divide by the batch size and not the number of
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)
# aggregated costs
with TensorflowGraph.shared_name_scope('aggregated', graph,
name_scopes):
with tf.name_scope('gradient'):
loss = tf.add_n(gradient_costs)
# weight decay
if self.penalty != 0.0:
penalty = model_ops.weight_decay(self.penalty_type, self.penalty)
loss += penalty
return loss
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
**kwargs):
"""Fit the model.
Parameters
----------
dataset: dc.data.Dataset
Dataset object holding training data
nb_epoch: 10
Number of training epochs.
max_checkpoints_to_keep: int
Maximum number of checkpoints to keep; older checkpoints will be deleted.
log_every_N_batches: int
Report every N batches. Useful for training on very large datasets,
where epochs can take long time to finish.
Raises
------
AssertionError
If model is not in training mode.
"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
log("Training for %d epochs" % nb_epoch, self.verbose)
with self.train_graph.graph.as_default():
train_op = self.get_training_op(self.train_graph.graph,
self.train_graph.loss)
with self._get_shared_session(train=True) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
# Save an initial checkpoint.
saver.save(sess, self._save_path, global_step=0)
for epoch in range(nb_epoch):
avg_loss, n_batches = 0., 0
for ind, (X_b, y_b, w_b, ids_b) in enumerate(
# Turns out there are valid cases where we don't want pad-batches
# on by default.
#dataset.iterbatches(batch_size, pad_batches=True)):
dataset.iterbatches(
self.batch_size, pad_batches=self.pad_batches)):
if ind % log_every_N_batches == 0:
log("On batch %d" % ind, self.verbose)
# Run training op.
feed_dict = self.construct_feed_dict(X_b, y_b, w_b, ids_b)
fetches = self.train_graph.output + [
train_op, self.train_graph.loss
]
fetched_values = sess.run(fetches, feed_dict=feed_dict)
output = fetched_values[:len(self.train_graph.output)]
loss = fetched_values[-1]
avg_loss += loss
y_pred = np.squeeze(np.array(output))
y_b = y_b.flatten()
n_batches += 1
saver.save(sess, self._save_path, global_step=epoch)
avg_loss = float(avg_loss) / n_batches
log('Ending epoch %d: Average loss %g' % (epoch, avg_loss),
self.verbose)
# Always save a final checkpoint when complete.
saver.save(sess, self._save_path, global_step=epoch + 1)
############################################################## TIMING
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
def add_output_ops(self, graph, output):
"""Replace logits with softmax outputs."""
with graph.as_default():
softmax = []
with tf.name_scope('inference'):
for i, logits in enumerate(output):
softmax.append(tf.nn.softmax(logits, name='softmax_%d' % i))
output = softmax
return output
def build(self, graph, name_scopes, training):
"""Define the core graph.
NOTE(user): Operations defined here should be in their own name scope to
avoid any ambiguity when restoring checkpoints.
Raises:
NotImplementedError: if not overridden by concrete subclass.
"""
raise NotImplementedError('Must be overridden by concrete subclass')
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Transform a minibatch of data into a feed_dict.
Raises:
NotImplementedError: if not overridden by concrete subclass.
"""
raise NotImplementedError('Must be overridden by concrete subclass')
def add_label_placeholders(self, graph, name_scopes):
"""Add Placeholders for labels for each task.
This method creates the following Placeholders for each task:
labels_%d: Float label tensor. For classification tasks, this tensor will
have shape batch_size x n_classes. For regression tasks, this tensor
will have shape batch_size.
Raises:
NotImplementedError: if not overridden by concrete subclass.
"""
raise NotImplementedError('Must be overridden by concrete subclass')
def add_example_weight_placeholders(self, graph, name_scopes):
"""Add Placeholders for example weights for each task.
This method creates the following Placeholders for each task:
weights_%d: Label tensor with shape batch_size.
Placeholders are wrapped in identity ops to avoid the error caused by
feeding and fetching the same tensor.
"""
weights = []
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with placeholder_scope:
for task in range(self.n_tasks):
weights.append(
tf.identity(
tf.placeholder(
tf.float32, shape=[None], name='weights_%d' % task)))
return weights
def cost(self, output, labels, weights):
"""Calculate single-task training cost for a batch of examples.
Args:
output: Tensor with model outputs.
labels: Tensor with true labels.
weights: Tensor with shape batch_size containing example weights.
Returns:
A tensor with shape batch_size containing the weighted cost for each
example. For use in subclasses that want to calculate additional costs.
"""
raise NotImplementedError('Must be overridden by concrete subclass')
def get_training_op(self, graph, loss):
"""Get training op for applying gradients to variables.
Subclasses that need to do anything fancy with gradients should override
this method.
Returns:
A training op.
"""
with graph.as_default():
opt = model_ops.optimizer(self.optimizer, self.learning_rate,
self.momentum)
return opt.minimize(loss, name='train')
def _get_shared_session(self, train):
# allow_soft_placement=True allows ops without a GPU implementation
# to run on the CPU instead.
if train:
if not self.train_graph.session:
config = tf.ConfigProto(allow_soft_placement=True)
self.train_graph.session = tf.Session(config=config)
return self.train_graph.session
else:
if not self.eval_graph.session:
config = tf.ConfigProto(allow_soft_placement=True)
self.eval_graph.session = tf.Session(config=config)
return self.eval_graph.session
def restore(self):
"""Restores the model from the provided training checkpoint.
Args:
checkpoint: string. Path to checkpoint file.
"""
if self._restored_model:
return
with self.eval_graph.graph.as_default():
last_checkpoint = self._find_last_checkpoint()
# TODO(rbharath): Is setting train=False right here?
saver = tf.train.Saver()
saver.restore(self._get_shared_session(train=False), last_checkpoint)
self._restored_model = True
def predict(self, dataset, transformers=[]):
"""
Uses self to make predictions on provided Dataset object.
Returns:
y_pred: numpy ndarray of shape (n_samples,)
"""
y_preds = []
n_tasks = self.get_num_tasks()
ind = 0
for (X_batch, _, _, ids_batch) in dataset.iterbatches(
self.batch_size, deterministic=True):
n_samples = len(X_batch)
y_pred_batch = self.predict_on_batch(X_batch)
# Discard any padded predictions
y_pred_batch = y_pred_batch[:n_samples]
y_pred_batch = np.reshape(y_pred_batch, (n_samples, n_tasks))
y_pred_batch = undo_transforms(y_pred_batch, transformers)
y_preds.append(y_pred_batch)
y_pred = np.vstack(y_preds)
# The iterbatches does padding with zero-weight examples on the last batch.
# Remove padded examples.
n_samples = len(dataset)
y_pred = np.reshape(y_pred, (n_samples, n_tasks))
# Special case to handle singletasks.
if n_tasks == 1:
y_pred = np.reshape(y_pred, (n_samples,))
return y_pred
def predict_proba(self, dataset, transformers=[], n_classes=2):
"""
TODO: Do transformers even make sense here?
Returns:
y_pred: numpy ndarray of shape (n_samples, n_classes*n_tasks)
"""
y_preds = []
n_tasks = self.get_num_tasks()
for (X_batch, y_batch, w_batch, ids_batch) in dataset.iterbatches(
self.batch_size, deterministic=True):
n_samples = len(X_batch)
y_pred_batch = self.predict_proba_on_batch(X_batch)
y_pred_batch = y_pred_batch[:n_samples]
y_pred_batch = np.reshape(y_pred_batch, (n_samples, n_tasks, n_classes))
y_pred_batch = undo_transforms(y_pred_batch, transformers)
y_preds.append(y_pred_batch)
y_pred = np.vstack(y_preds)
# The iterbatches does padding with zero-weight examples on the last batch.
# Remove padded examples.
n_samples = len(dataset)
y_pred = y_pred[:n_samples]
y_pred = np.reshape(y_pred, (n_samples, n_tasks, n_classes))
return y_pred
# TODO(rbharath): Verify this can be safely removed.
#def evaluate(self, dataset, metrics, transformers=[]):
# """
# Evaluates the performance of this model on specified dataset.
#
# Parameters
# ----------
# dataset: dc.data.Dataset
# Dataset object.
# metric: deepchem.metrics.Metric
# Evaluation metric
# transformers: list
# List of deepchem.transformers.Transformer
# Returns
# -------
# dict
# Maps tasks to scores under metric.
# """
# evaluator = Evaluator(self, dataset, transformers)
# scores = evaluator.compute_model_performance(metrics)
# return scores
def _find_last_checkpoint(self):
"""Finds last saved checkpoint."""
highest_num, last_checkpoint = -np.inf, None
for filename in os.listdir(self.logdir):
# checkpoints look like logdir/model.ckpt-N
# self._save_path is "logdir/model.ckpt"
if os.path.basename(self._save_path) in filename:
try:
N = int(filename.split("-")[1].split(".")[0])
if N > highest_num:
highest_num = N
last_checkpoint = "model.ckpt-" + str(N)
except ValueError:
pass
return os.path.join(self.logdir, last_checkpoint)
class TensorflowClassifier(TensorflowGraphModel):
"""Classification model.
Subclasses must set the following attributes:
output: logits op(s) used for computing classification loss and predicted
class probabilities for each task.
"""
def get_task_type(self):
return "classification"
def cost(self, logits, labels, weights):
"""Calculate single-task training cost for a batch of examples.
Args:
logits: Tensor with shape batch_size x n_classes containing logits.
labels: Tensor with shape batch_size x n_classes containing true labels
in a one-hot encoding.
weights: Tensor with shape batch_size containing example weights.
Returns:
A tensor with shape batch_size containing the weighted cost for each
example.
"""
return tf.multiply(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels),
weights)
def add_label_placeholders(self, graph, name_scopes):
"""Add Placeholders for labels for each task.
This method creates the following Placeholders for each task:
labels_%d: Label tensor with shape batch_size x n_classes.
Placeholders are wrapped in identity ops to avoid the error caused by
feeding and fetching the same tensor.
"""
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
batch_size = self.batch_size
n_classes = self.n_classes
labels = []
with placeholder_scope:
for task in range(self.n_tasks):
labels.append(
tf.identity(
tf.placeholder(
tf.float32,
shape=[None, n_classes],
name='labels_%d' % task)))
return labels
def predict_on_batch(self, X):
"""Return model output for the provided input.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
labels: True labels.
weights: Example weights.
Note that the output and labels arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
len_unpadded = len(X)
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
output = []
with self._get_shared_session(train=False).as_default():
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_output = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_output.ndim == 3:
batch_output = batch_output.transpose((1, 0, 2))
elif batch_output.ndim == 2:
batch_output = batch_output.transpose((1, 0))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_output.shape,))
output.append(batch_output)
outputs = np.array(
from_one_hot(np.squeeze(np.concatenate(output)), axis=-1))
outputs = np.copy(outputs)
outputs = np.reshape(outputs, (len(X), n_tasks))
outputs = outputs[:len_unpadded]
return outputs
def predict_proba_on_batch(self, X):
"""Return model output for the provided input.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.Dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
Note that the output arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
with self._get_shared_session(train=False).as_default():
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
else:
raise ValueError('Unrecognized rank combination for output: %s ' %
(batch_outputs.shape,))
# Note that softmax is already applied in construct_grpah
outputs = batch_outputs
return np.copy(outputs)
class TensorflowRegressor(TensorflowGraphModel):
"""Regression model.
Subclasses must set the following attributes:
output: Op(s) used for computing regression loss and predicted regression
outputs for each task.
"""
def get_task_type(self):
return "regressor"
def add_output_ops(self, graph, output):
"""No-op for regression models since no softmax."""
return output
def cost(self, output, labels, weights):
"""Calculate single-task training cost for a batch of examples.
Args:
output: Tensor with shape batch_size containing predicted values.
labels: Tensor with shape batch_size containing true values.
weights: Tensor with shape batch_size containing example weights.
Returns:
A tensor with shape batch_size containing the weighted cost for each
example.
"""
return tf.multiply(0.5 * tf.square(output - labels), weights)
def add_label_placeholders(self, graph, name_scopes):
"""Add Placeholders for labels for each task.
This method creates the following Placeholders for each task:
labels_%d: Label tensor with shape batch_size.
Placeholders are wrapped in identity ops to avoid the error caused by
feeding and fetching the same tensor.
"""
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
batch_size = self.batch_size
labels = []
with placeholder_scope:
for task in range(self.n_tasks):
labels.append(
tf.identity(
tf.placeholder(
tf.float32, shape=[None], name='labels_%d' % task)))
return labels
def predict_on_batch(self, X):
"""Return model output for the provided input.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.Dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
labels: True labels.
weights: Example weights.
Note that the output and labels arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
len_unpadded = len(X)
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
outputs = []
with self._get_shared_session(train=False).as_default():
n_samples = len(X)
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
# Handle edge case when batch-size is 1.
elif batch_outputs.ndim == 1:
n_samples = len(X)
batch_outputs = batch_outputs.reshape((n_samples, n_tasks))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_outputs.shape))
# Prune away any padding that was added
batch_outputs = batch_outputs[:n_samples]
outputs.append(batch_outputs)
outputs = np.squeeze(np.concatenate(outputs))
outputs = np.copy(outputs)
# Handle case of 0-dimensional scalar output
if len(outputs.shape) > 0:
return outputs[:len_unpadded]
else:
outputs = np.reshape(outputs, (1,))
return outputs
class TensorflowMultiTaskRegressor(TensorflowRegressor):
"""Implements an icml model as configured in a model_config.proto."""
def build(self, graph, name_scopes, training):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
n_features = self.n_features
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
with placeholder_scope:
self.mol_features = tf.placeholder(
tf.float32, shape=[None, n_features], name='mol_features')
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
prev_layer = self.mol_features
prev_layer_size = n_features
for i in range(n_layers):
layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]])))
layer = model_ops.dropout(layer, dropouts[i], training)
prev_layer = layer
prev_layer_size = layer_sizes[i]
output = []
for task in range(self.n_tasks):
output.append(
tf.squeeze(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, 1],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(value=bias_init_consts[i],
shape=[1]))))
return output
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Construct a feed dictionary from minibatch data.
TODO(rbharath): ids_b is not used here. Can we remove it?
Args:
X_b: np.ndarray of shape (batch_size, n_features)
y_b: np.ndarray of shape (batch_size, n_tasks)
w_b: np.ndarray of shape (batch_size, n_tasks)
ids_b: List of length (batch_size) with datapoint identifiers.
"""
orig_dict = {}
orig_dict["mol_features"] = X_b
for task in range(self.n_tasks):
if y_b is not None:
orig_dict["labels_%d" % task] = y_b[:, task]
else:
# Dummy placeholders
orig_dict["labels_%d" % task] = np.squeeze(np.zeros((self.batch_size,)))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
# Dummy placeholders
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
<file_sep>"""
Simple Tests for Graph Topologies
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import unittest
import tensorflow as tf
from deepchem.models.tf_new_models.graph_topology import GraphTopology
class TestGraphTopology(unittest.TestCase):
"""
Test that graph topologies work correctly.
"""
def test_shapes(self):
"""Simple test that Graph topology placeholders have correct shapes."""
n_atoms = 5
n_feat = 10
batch_size = 3
max_deg = 10
min_deg = 0
topology = GraphTopology(n_feat)
# Degrees from 1 to max_deg inclusive
# TODO(rbharath): Should this be 0 to max_deg inclusive?
deg_adj_lists_placeholders = topology.get_deg_adjacency_lists_placeholders()
assert len(deg_adj_lists_placeholders) == max_deg
for ind, deg_adj_list in enumerate(deg_adj_lists_placeholders):
deg = ind + 1
# Should have shape (?, deg)
assert deg_adj_list.get_shape()[1] == deg
# Shape of atom_features should be (?, n_feat)
atom_features = topology.get_atom_features_placeholder()
assert atom_features.get_shape()[1] == n_feat
# Shape of deg_slice placeholder should be (max_deg+1-min_deg, 2)
deg_slice = topology.get_deg_slice_placeholder()
print("deg_slice.get_shape()")
print(deg_slice.get_shape())
assert deg_slice.get_shape() == (max_deg + 1 - min_deg, 2)
<file_sep>import os
import numpy as np
import pytest
from scipy import io as scipy_io
from deepchem.data import NumpyDataset, CSVLoader
from deepchem.trans import DAGTransformer
from deepchem.molnet import load_bace_classification, load_delaney
from deepchem.feat import ConvMolFeaturizer
from deepchem.metrics import Metric, roc_auc_score, mean_absolute_error
from deepchem.utils.data_utils import download_url, get_data_dir
try:
import tensorflow as tf
from deepchem.models import GraphConvModel, DAGModel, MPNNModel, DTNNModel
has_tensorflow = True
except:
has_tensorflow = False
from flaky import flaky
@pytest.mark.tensorflow
def get_dataset(mode='classification', featurizer='GraphConv', num_tasks=2):
data_points = 20
if mode == 'classification':
tasks, all_dataset, transformers = load_bace_classification(featurizer)
else:
tasks, all_dataset, transformers = load_delaney(featurizer)
train, valid, test = all_dataset
for _ in range(1, num_tasks):
tasks.append("random_task")
w = np.ones(shape=(data_points, len(tasks)))
if mode == 'classification':
y = np.random.randint(0, 2, size=(data_points, len(tasks)))
metric = Metric(roc_auc_score, np.mean, mode="classification")
else:
y = np.random.normal(size=(data_points, len(tasks)))
metric = Metric(mean_absolute_error, mode="regression")
ds = NumpyDataset(train.X[:data_points], y, w, train.ids[:data_points])
return tasks, ds, transformers, metric
@flaky
@pytest.mark.tensorflow
def test_graph_conv_model():
tasks, dataset, transformers, metric = get_dataset('classification',
'GraphConv')
batch_size = 10
model = GraphConvModel(len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='classification')
model.fit(dataset, nb_epoch=20)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.9
@pytest.mark.tensorflow
def test_neural_fingerprint_retrieval():
tasks, dataset, _, _ = get_dataset('classification', 'GraphConv')
fp_size = 3
batch_size = 50
model = GraphConvModel(len(tasks),
batch_size=batch_size,
dense_layer_size=3,
mode='classification')
model.fit(dataset, nb_epoch=1)
neural_fingerprints = model.predict_embedding(dataset)
neural_fingerprints = np.array(neural_fingerprints)[:len(dataset)]
assert (len(dataset), fp_size * 2) == neural_fingerprints.shape
@flaky
@pytest.mark.tensorflow
def test_graph_conv_regression_model():
tasks, dataset, transformers, metric = get_dataset('regression',
'GraphConv')
batch_size = 10
model = GraphConvModel(len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='regression')
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean_absolute_error'] < 0.1
@pytest.mark.tensorflow
def test_graph_conv_regression_uncertainty():
tasks, dataset, _, _ = get_dataset('regression', 'GraphConv')
batch_size = 10
model = GraphConvModel(len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='regression',
dropout=0.1,
uncertainty=True)
model.fit(dataset, nb_epoch=100)
# Predict the output and uncertainty.
pred, std = model.predict_uncertainty(dataset)
mean_error = np.mean(np.abs(dataset.y - pred))
mean_value = np.mean(np.abs(dataset.y))
mean_std = np.mean(std)
assert mean_error < 0.5 * mean_value
assert mean_std > 0.5 * mean_error
assert mean_std < mean_value
@pytest.mark.tensorflow
def test_graph_conv_model_no_task():
tasks, dataset, _, __ = get_dataset('classification', 'GraphConv')
batch_size = 10
model = GraphConvModel(len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='classification')
model.fit(dataset, nb_epoch=20)
# predict datset with no y (ensured by tasks = [])
bace_url = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/bace.csv"
download_url(url=bace_url, name="bace_tmp.csv")
loader = CSVLoader(tasks=[],
smiles_field='mol',
featurizer=ConvMolFeaturizer())
td = loader.featurize(os.path.join(get_data_dir(), "bace_tmp.csv"))
model.predict(td)
@pytest.mark.tensorflow
def test_graph_conv_atom_features():
tasks, dataset, _, _ = get_dataset('regression', 'Raw', num_tasks=1)
atom_feature_name = 'feature'
y = []
for mol in dataset.X:
atom_features = []
for atom in mol.GetAtoms():
val = np.random.normal()
mol.SetProp("atom %08d %s" % (atom.GetIdx(), atom_feature_name),
str(val))
atom_features.append(np.random.normal())
y.append([np.sum(atom_features)])
featurizer = ConvMolFeaturizer(atom_properties=[atom_feature_name])
X = featurizer.featurize(dataset.X)
dataset = NumpyDataset(X, np.array(y))
batch_size = 50
model = GraphConvModel(len(tasks),
number_atom_features=featurizer.feature_length(),
batch_size=batch_size,
mode='regression')
model.fit(dataset, nb_epoch=1)
_ = model.predict(dataset)
@flaky
@pytest.mark.slow
@pytest.mark.tensorflow
def test_dag_model():
tasks, dataset, transformers, metric = get_dataset('classification',
'GraphConv')
max_atoms = max([mol.get_num_atoms() for mol in dataset.X])
transformer = DAGTransformer(max_atoms=max_atoms)
dataset = transformer.transform(dataset)
model = DAGModel(len(tasks),
max_atoms=max_atoms,
mode='classification',
learning_rate=0.001)
model.fit(dataset, nb_epoch=30)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.9
@pytest.mark.slow
@pytest.mark.tensorflow
def test_dag_regression_model():
np.random.seed(1234)
tf.random.set_seed(1234)
tasks, dataset, transformers, metric = get_dataset('regression',
'GraphConv')
max_atoms = max([mol.get_num_atoms() for mol in dataset.X])
transformer = DAGTransformer(max_atoms=max_atoms)
dataset = transformer.transform(dataset)
model = DAGModel(len(tasks),
max_atoms=max_atoms,
mode='regression',
learning_rate=0.003)
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean_absolute_error'] < 0.15
@pytest.mark.slow
@pytest.mark.tensorflow
def test_dag_regression_uncertainty():
np.random.seed(1234)
tf.random.set_seed(1234)
tasks, dataset, _, _ = get_dataset('regression', 'GraphConv')
batch_size = 10
max_atoms = max([mol.get_num_atoms() for mol in dataset.X])
transformer = DAGTransformer(max_atoms=max_atoms)
dataset = transformer.transform(dataset)
model = DAGModel(len(tasks),
max_atoms=max_atoms,
mode='regression',
learning_rate=0.003,
batch_size=batch_size,
use_queue=False,
dropout=0.05,
uncertainty=True)
model.fit(dataset, nb_epoch=750)
# Predict the output and uncertainty.
pred, std = model.predict_uncertainty(dataset)
mean_error = np.mean(np.abs(dataset.y - pred))
mean_value = np.mean(np.abs(dataset.y))
mean_std = np.mean(std)
# The DAG models have high error with dropout
# Despite a lot of effort tweaking it , there appears to be
# a limit to how low the error can go with dropout.
# assert mean_error < 0.5 * mean_value
assert mean_error < .7 * mean_value
assert mean_std > 0.5 * mean_error
assert mean_std < mean_value
@pytest.mark.slow
@pytest.mark.tensorflow
def test_mpnn_model():
tasks, dataset, transformers, metric = get_dataset('classification',
'Weave')
model = MPNNModel(len(tasks),
mode='classification',
n_hidden=75,
n_atom_feat=75,
n_pair_feat=14,
T=1,
M=1,
learning_rate=0.0005)
model.fit(dataset, nb_epoch=150)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.9
@pytest.mark.slow
@pytest.mark.tensorflow
def test_mpnn_regression_model():
tasks, dataset, transformers, metric = get_dataset('regression', 'Weave')
batch_size = 10
model = MPNNModel(len(tasks),
mode='regression',
n_hidden=75,
n_atom_feat=75,
n_pair_feat=14,
T=1,
M=1,
batch_size=batch_size)
model.fit(dataset, nb_epoch=60)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean_absolute_error'] < 0.1
@pytest.mark.slow
@pytest.mark.tensorflow
def test_mpnn_regression_uncertainty():
tasks, dataset, _, _ = get_dataset('regression', 'Weave')
batch_size = 10
model = MPNNModel(len(tasks),
mode='regression',
n_hidden=75,
n_atom_feat=75,
n_pair_feat=14,
T=1,
M=1,
dropout=0.1,
batch_size=batch_size,
uncertainty=True)
model.fit(dataset, nb_epoch=40)
# Predict the output and uncertainty.
pred, std = model.predict_uncertainty(dataset)
mean_error = np.mean(np.abs(dataset.y - pred))
mean_value = np.mean(np.abs(dataset.y))
mean_std = np.mean(std)
assert mean_error < 0.5 * mean_value
assert mean_std > 0.5 * mean_error
assert mean_std < mean_value
@flaky
@pytest.mark.tensorflow
def test_dtnn_regression_model():
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "assets/example_DTNN.mat")
dataset = scipy_io.loadmat(input_file)
X = dataset['X']
y = dataset['T']
w = np.ones_like(y)
dataset = NumpyDataset(X, y, w, ids=None)
n_tasks = y.shape[1]
model = DTNNModel(n_tasks,
n_embedding=20,
n_distance=100,
learning_rate=1.0,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=250)
# Eval model on train
pred = model.predict(dataset)
mean_rel_error = np.mean(np.abs(1 - pred / y))
assert mean_rel_error < 0.1
@pytest.mark.tensorflow
def test_graph_predict():
model = GraphConvModel(12, batch_size=50, mode='classification')
mols = ["CCCCC", "CCCCCCCCC"]
feat = ConvMolFeaturizer()
X = feat.featurize(mols)
assert (model.predict(NumpyDataset(X))).all() is True
<file_sep>"""
Testing singletask/multitask dataset shuffling
"""
import os
import deepchem as dc
import numpy as np
def test_complete_shuffle_one_shard():
"""Test that complete shuffle works with only one shard."""
X = np.random.rand(10, 10)
dataset = dc.data.DiskDataset.from_numpy(X)
shuffled = dataset.complete_shuffle()
assert len(shuffled) == len(dataset)
assert not np.array_equal(shuffled.ids, dataset.ids)
assert sorted(shuffled.ids) == sorted(dataset.ids)
assert shuffled.X.shape == dataset.X.shape
assert shuffled.y.shape == dataset.y.shape
assert shuffled.w.shape == dataset.w.shape
original_indices = dict((id, i) for i, id in enumerate(dataset.ids))
shuffled_indices = dict((id, i) for i, id in enumerate(shuffled.ids))
for id in dataset.ids:
i = original_indices[id]
j = shuffled_indices[id]
assert np.array_equal(dataset.X[i], shuffled.X[j])
assert np.array_equal(dataset.y[i], shuffled.y[j])
assert np.array_equal(dataset.w[i], shuffled.w[j])
def test_complete_shuffle_multiple_shard():
"""Test that complete shuffle works with multiple shards."""
X = np.random.rand(100, 10)
dataset = dc.data.DiskDataset.from_numpy(X)
dataset.reshard(shard_size=10)
shuffled = dataset.complete_shuffle()
assert len(shuffled) == len(dataset)
assert not np.array_equal(shuffled.ids, dataset.ids)
assert sorted(shuffled.ids) == sorted(dataset.ids)
assert shuffled.X.shape == dataset.X.shape
assert shuffled.y.shape == dataset.y.shape
assert shuffled.w.shape == dataset.w.shape
original_indices = dict((id, i) for i, id in enumerate(dataset.ids))
shuffled_indices = dict((id, i) for i, id in enumerate(shuffled.ids))
for id in dataset.ids:
i = original_indices[id]
j = shuffled_indices[id]
assert np.array_equal(dataset.X[i], shuffled.X[j])
assert np.array_equal(dataset.y[i], shuffled.y[j])
assert np.array_equal(dataset.w[i], shuffled.w[j])
def test_complete_shuffle_multiple_shard_uneven():
"""Test that complete shuffle works with multiple shards and some shards not full size."""
X = np.random.rand(57, 10)
dataset = dc.data.DiskDataset.from_numpy(X)
dataset.reshard(shard_size=10)
shuffled = dataset.complete_shuffle()
assert len(shuffled) == len(dataset)
assert not np.array_equal(shuffled.ids, dataset.ids)
assert sorted(shuffled.ids) == sorted(dataset.ids)
assert shuffled.X.shape == dataset.X.shape
assert shuffled.y.shape == dataset.y.shape
assert shuffled.w.shape == dataset.w.shape
original_indices = dict((id, i) for i, id in enumerate(dataset.ids))
shuffled_indices = dict((id, i) for i, id in enumerate(shuffled.ids))
for id in dataset.ids:
i = original_indices[id]
j = shuffled_indices[id]
assert np.array_equal(dataset.X[i], shuffled.X[j])
assert np.array_equal(dataset.y[i], shuffled.y[j])
assert np.array_equal(dataset.w[i], shuffled.w[j])
def test_complete_shuffle():
"""Test that complete shuffle."""
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file, shard_size=2)
X_orig, y_orig, w_orig, orig_ids = (dataset.X, dataset.y, dataset.w,
dataset.ids)
orig_len = len(dataset)
shuffled = dataset.complete_shuffle()
X_new, y_new, w_new, new_ids = (shuffled.X, shuffled.y, shuffled.w,
shuffled.ids)
assert len(shuffled) == orig_len
# The shuffling should have switched up the ordering
assert not np.array_equal(orig_ids, new_ids)
# But all the same entries should still be present
assert sorted(orig_ids) == sorted(new_ids)
# All the data should have same shape
assert X_orig.shape == X_new.shape
assert y_orig.shape == y_new.shape
assert w_orig.shape == w_new.shape
original_indices = dict((id, i) for i, id in enumerate(dataset.ids))
shuffled_indices = dict((id, i) for i, id in enumerate(shuffled.ids))
for id in dataset.ids:
i = original_indices[id]
j = shuffled_indices[id]
assert np.array_equal(dataset.X[i], shuffled.X[j])
assert np.array_equal(dataset.y[i], shuffled.y[j])
assert np.array_equal(dataset.w[i], shuffled.w[j])
def test_sparse_shuffle():
"""Test that sparse datasets can be shuffled quickly."""
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file, shard_size=2)
X_orig, y_orig, w_orig, orig_ids = (dataset.X, dataset.y, dataset.w,
dataset.ids)
orig_len = len(dataset)
dataset.sparse_shuffle()
X_new, y_new, w_new, new_ids = (dataset.X, dataset.y, dataset.w,
dataset.ids)
assert len(dataset) == orig_len
# The shuffling should have switched up the ordering
assert not np.array_equal(orig_ids, new_ids)
# But all the same entries should still be present
assert sorted(orig_ids) == sorted(new_ids)
# All the data should have same shape
assert X_orig.shape == X_new.shape
assert y_orig.shape == y_new.shape
assert w_orig.shape == w_new.shape
def test_shuffle_each_shard():
"""Test that shuffle_each_shard works."""
n_samples = 100
n_tasks = 10
n_features = 10
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.random.randint(2, size=(n_samples, n_tasks))
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
dataset.reshard(shard_size=10)
dataset.shuffle_each_shard()
X_s, y_s, w_s, ids_s = (dataset.X, dataset.y, dataset.w, dataset.ids)
assert X_s.shape == X.shape
assert y_s.shape == y.shape
assert ids_s.shape == ids.shape
assert w_s.shape == w.shape
assert not (ids_s == ids).all()
# The ids should now store the performed permutation. Check that the
# original dataset is recoverable.
for i in range(n_samples):
np.testing.assert_array_equal(X_s[i], X[ids_s[i]])
np.testing.assert_array_equal(y_s[i], y[ids_s[i]])
np.testing.assert_array_equal(w_s[i], w[ids_s[i]])
np.testing.assert_array_equal(ids_s[i], ids[ids_s[i]])
def test_shuffle_shards():
"""Test that shuffle_shards works."""
n_samples = 100
n_tasks = 10
n_features = 10
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.random.randint(2, size=(n_samples, n_tasks))
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
dataset.reshard(shard_size=10)
dataset.shuffle_shards()
X_s, y_s, w_s, ids_s = (dataset.X, dataset.y, dataset.w, dataset.ids)
assert X_s.shape == X.shape
assert y_s.shape == y.shape
assert ids_s.shape == ids.shape
assert w_s.shape == w.shape
# The ids should now store the performed permutation. Check that the
# original dataset is recoverable.
for i in range(n_samples):
np.testing.assert_array_equal(X_s[i], X[ids_s[i]])
np.testing.assert_array_equal(y_s[i], y[ids_s[i]])
np.testing.assert_array_equal(w_s[i], w[ids_s[i]])
np.testing.assert_array_equal(ids_s[i], ids[ids_s[i]])
<file_sep>"""Type annotations that are widely used in DeepChem"""
from typing import Any, Callable, List, Sequence, Tuple, TypeVar, Union
import numpy as np
T = TypeVar("T")
# An activation function for a layer: either a function or the name of a standard activation
ActivationFn = Union[Callable, str]
# A loss function for use with KerasModel or TorchModel: f(outputs, labels, weights)
LossFn = Callable[[List, List, List], Any]
# A single value of some type, or multiple values of that type
OneOrMany = Union[T, Sequence[T]]
# The shape of a NumPy array
Shape = Tuple[int, ...]
# A NumPy array, or an object that can be converted to one. Once we move to
# requiring NumPy 1.20, we should replace this with numpy.typing.ArrayLike.
ArrayLike = Union[np.ndarray, Sequence]
# type of RDKit object
RDKitMol = Any
RDKitAtom = Any
RDKitBond = Any
# type of Pymatgen object
PymatgenStructure = Any
PymatgenComposition = Any
<file_sep>"""
Script that trains Sklearn multitask models on toxcast & tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from deepchem.molnet import load_toxcast
import deepchem as dc
toxcast_tasks, toxcast_datasets, transformers = load_toxcast()
(train_dataset, valid_dataset, test_dataset) = toxcast_datasets
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500, n_jobs=-1)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(toxcast_tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
print("About to evaluate model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import os
import deepchem as dc
import numpy as np
import pytest
try:
import torch
from deepchem.models.torch_models.chemberta import Chemberta
except ModuleNotFoundError:
pass
@pytest.mark.torch
def test_chemberta_pretraining(smiles_regression_dataset,
smiles_multitask_regression_dataset):
# Pretraining in MLM mode
from deepchem.models.torch_models.chemberta import Chemberta
tokenizer_path = 'seyonec/PubChem10M_SMILES_BPE_60k'
model = Chemberta(task='mlm', tokenizer_path=tokenizer_path)
loss = model.fit(smiles_regression_dataset, nb_epoch=1)
assert loss
# Pretraining in Multitask Regression Mode
model = Chemberta(task='mtr', tokenizer_path=tokenizer_path, n_tasks=2)
loss = model.fit(smiles_multitask_regression_dataset, nb_epoch=1)
assert loss
@pytest.mark.torch
def test_chemberta_finetuning(smiles_regression_dataset,
smiles_multitask_regression_dataset):
# test regression
tokenizer_path = 'seyonec/PubChem10M_SMILES_BPE_60k'
model = Chemberta(task='regression', tokenizer_path=tokenizer_path)
loss = model.fit(smiles_regression_dataset, nb_epoch=1)
eval_score = model.evaluate(smiles_regression_dataset,
metrics=dc.metrics.Metric(
dc.metrics.mean_absolute_error))
assert loss, eval_score
prediction = model.predict(smiles_regression_dataset)
assert prediction.shape == smiles_regression_dataset.y.shape
# test multitask regression
model = Chemberta(task='mtr', tokenizer_path=tokenizer_path, n_tasks=2)
loss = model.fit(smiles_multitask_regression_dataset, nb_epoch=1)
eval_score = model.evaluate(smiles_multitask_regression_dataset,
metrics=dc.metrics.Metric(
dc.metrics.mean_absolute_error))
assert loss, eval_score
prediction = model.predict(smiles_multitask_regression_dataset)
assert prediction.shape == smiles_multitask_regression_dataset.y.shape
# test classification
y = np.random.choice([0, 1], size=smiles_regression_dataset.y.shape)
dataset = dc.data.NumpyDataset(X=smiles_regression_dataset.X,
y=y,
w=smiles_regression_dataset.w,
ids=smiles_regression_dataset.ids)
model = Chemberta(task='classification', tokenizer_path=tokenizer_path)
loss = model.fit(dataset, nb_epoch=1)
eval_score = model.evaluate(dataset,
metrics=dc.metrics.Metric(
dc.metrics.recall_score))
assert eval_score, loss
prediction = model.predict(dataset)
# logit scores
assert prediction.shape == (dataset.y.shape[0], 2)
@pytest.mark.torch
def test_chemberta_load_from_pretrained(tmpdir, smiles_regression_dataset):
pretrain_model_dir = os.path.join(tmpdir, 'pretrain')
finetune_model_dir = os.path.join(tmpdir, 'finetune')
tokenizer_path = 'seyonec/PubChem10M_SMILES_BPE_60k'
pretrain_model = Chemberta(task='mlm',
tokenizer_path=tokenizer_path,
model_dir=pretrain_model_dir)
pretrain_model.save_checkpoint()
finetune_model = Chemberta(task='regression',
tokenizer_path=tokenizer_path,
model_dir=finetune_model_dir)
finetune_model.load_from_pretrained(pretrain_model_dir)
# check weights match
pretrain_model_state_dict = pretrain_model.model.state_dict()
finetune_model_state_dict = finetune_model.model.state_dict()
pretrain_base_model_keys = [
key for key in pretrain_model_state_dict.keys() if 'roberta' in key
]
matches = [
torch.allclose(pretrain_model_state_dict[key],
finetune_model_state_dict[key])
for key in pretrain_base_model_keys
]
assert all(matches)
@pytest.mark.torch
def test_chemberta_save_reload(tmpdir):
tokenizer_path = 'seyonec/PubChem10M_SMILES_BPE_60k'
model = Chemberta(task='regression',
tokenizer_path=tokenizer_path,
model_dir=tmpdir)
model._ensure_built()
model.save_checkpoint()
model_new = Chemberta(task='regression',
tokenizer_path=tokenizer_path,
model_dir=tmpdir)
model_new.restore()
old_state = model.model.state_dict()
new_state = model_new.model.state_dict()
matches = [
torch.allclose(old_state[key], new_state[key])
for key in old_state.keys()
]
# all keys values should match
assert all(matches)
@pytest.mark.torch
def test_chemberta_load_weights_from_hf_hub():
pretrained_model_path = 'DeepChem/ChemBERTa-77M-MLM'
tokenizer_path = 'DeepChem/ChemBERTa-77M-MLM'
model = Chemberta(task='regression', tokenizer_path=tokenizer_path)
old_model_id = id(model.model)
model.load_from_pretrained(pretrained_model_path, from_hf_checkpoint=True)
new_model_id = id(model.model)
# new model's model attribute is an entirely new model initiated by AutoModel.load_from_pretrained
# and hence it should have a different identifier
assert old_model_id != new_model_id
<file_sep>"""
Various utilities around voxel grids.
"""
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from deepchem.utils.noncovalent_utils import compute_pi_stack
logger = logging.getLogger(__name__)
def convert_atom_to_voxel(coordinates: np.ndarray, atom_index: int,
box_width: float, voxel_width: float) -> np.ndarray:
"""Converts atom coordinates to an i,j,k grid index.
This function offsets molecular atom coordinates by
(box_width/2, box_width/2, box_width/2) and then divides by
voxel_width to compute the voxel indices.
Parameters
-----------
coordinates: np.ndarray
Array with coordinates of all atoms in the molecule, shape (N, 3).
atom_index: int
Index of an atom in the molecule.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
indices: np.ndarray
A 1D numpy array of length 3 with `[i, j, k]`, the voxel coordinates
of specified atom.
"""
indices = np.floor(
(coordinates[atom_index] + box_width / 2.0) / voxel_width).astype(int)
return indices
def convert_atom_pair_to_voxel(coordinates_tuple: Tuple[np.ndarray, np.ndarray],
atom_index_pair: Tuple[int,
int], box_width: float,
voxel_width: float) -> np.ndarray:
"""Converts a pair of atoms to i,j,k grid indexes.
Parameters
----------
coordinates_tuple: Tuple[np.ndarray, np.ndarray]
A tuple containing two molecular coordinate arrays of shapes `(N, 3)` and `(M, 3)`.
atom_index_pair: Tuple[int, int]
A tuple of indices for the atoms in the two molecules.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
indices_list: np.ndarray
A numpy array of shape `(2, 3)`, where `3` is `[i, j, k]` of the
voxel coordinates of specified atom.
"""
indices_list = []
for coordinates, atom_index in zip(coordinates_tuple, atom_index_pair):
indices_list.append(
convert_atom_to_voxel(coordinates, atom_index, box_width,
voxel_width))
return np.array(indices_list)
def voxelize(get_voxels: Callable[..., Any],
coordinates: Any,
box_width: float = 16.0,
voxel_width: float = 1.0,
hash_function: Optional[Callable[..., Any]] = None,
feature_dict: Optional[Dict[Any, Any]] = None,
feature_list: Optional[List[Union[int, Tuple[int]]]] = None,
nb_channel: int = 16,
dtype: str = 'int') -> np.ndarray:
"""Helper function to voxelize inputs.
This helper function helps convert a hash function which
specifies spatial features of a molecular complex into a voxel
tensor. This utility is used by various featurizers that generate
voxel grids.
Parameters
----------
get_voxels: Function
Function that voxelizes inputs
coordinates: Any
Contains the 3D coordinates of a molecular system. This should have
whatever type get_voxels() expects as its first argument.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid in Angstroms.
hash_function: Function
Used to map feature choices to voxel channels.
feature_dict: Dict, optional (default None)
Keys are atom indices or tuples of atom indices, the values are
computed features. If `hash_function is not None`, then the values
are hashed using the hash function into `[0, nb_channels)` and
this channel at the voxel for the given key is incremented by `1`
for each dictionary entry. If `hash_function is None`, then the
value must be a vector of size `(n_channels,)` which is added to
the existing channel values at that voxel grid.
feature_list: List, optional (default None)
List of atom indices or tuples of atom indices. This can only be
used if `nb_channel==1`. Increments the voxels corresponding to
these indices by `1` for each entry.
nb_channel: int, , optional (default 16)
The number of feature channels computed per voxel. Should
be a power of 2.
dtype: str ('int' or 'float'), optional (default 'int')
The type of the numpy ndarray created to hold features.
Returns
-------
feature_tensor: np.ndarray
The voxel of the input with the shape
`(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel)`.
"""
# Number of voxels per one edge of box to voxelize.
voxels_per_edge = int(box_width / voxel_width)
if dtype == "int":
feature_tensor = np.zeros(
(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel),
dtype=np.int8)
else:
feature_tensor = np.zeros(
(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel),
dtype=np.float16)
if feature_dict is not None:
for key, features in feature_dict.items():
voxels = get_voxels(coordinates, key, box_width, voxel_width)
if len(voxels.shape) == 1:
voxels = np.expand_dims(voxels, axis=0)
for voxel in voxels:
if ((voxel >= 0) & (voxel < voxels_per_edge)).all():
if hash_function is not None:
feature_tensor[
voxel[0], voxel[1], voxel[2],
hash_function(features, nb_channel)] += 1.0
else:
feature_tensor[voxel[0], voxel[1], voxel[2],
0] += features
elif feature_list is not None:
for key in feature_list:
voxels = get_voxels(coordinates, key, box_width, voxel_width)
for voxel in voxels:
if ((voxel >= 0) & (voxel < voxels_per_edge)).all():
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += 1.0
return feature_tensor
def voxelize_pi_stack(prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances,
pi_stack_dist_cutoff, pi_stack_angle_cutoff, box_width,
voxel_width):
protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel = (
compute_pi_stack(prot_rdk,
lig_rdk,
distances,
dist_cutoff=pi_stack_dist_cutoff,
angle_cutoff=pi_stack_angle_cutoff))
pi_parallel_tensor = voxelize(
convert_atom_to_voxel,
prot_xyz,
box_width=box_width,
voxel_width=voxel_width,
feature_dict=protein_pi_parallel,
nb_channel=1,
)
pi_parallel_tensor += voxelize(
convert_atom_to_voxel,
lig_xyz,
box_width=box_width,
voxel_width=voxel_width,
feature_dict=ligand_pi_parallel,
nb_channel=1,
)
pi_t_tensor = voxelize(
convert_atom_to_voxel,
prot_xyz,
box_width=box_width,
voxel_width=voxel_width,
feature_dict=protein_pi_t,
nb_channel=1,
)
pi_t_tensor += voxelize(
convert_atom_to_voxel,
lig_xyz,
box_width=box_width,
voxel_width=voxel_width,
feature_dict=ligand_pi_t,
nb_channel=1,
)
return [pi_parallel_tensor, pi_t_tensor]
<file_sep>import deepchem as dc
import numpy as np
import pytest
try:
import torch
has_torch = True
except ModuleNotFoundError:
has_torch = False
@pytest.mark.torch
def test_unsorted_segment_sum():
segment_ids = torch.Tensor([0, 1, 0]).to(torch.int64)
data = torch.Tensor([[1, 2, 3, 4], [5, 6, 7, 8], [4, 3, 2, 1]])
num_segments = 2
if len(segment_ids.shape) != 1:
raise AssertionError("segment_ids have be a 1-D tensor")
if data.shape[0] != segment_ids.shape[0]:
raise AssertionError(
"segment_ids should be the same size as dimension 0 of input.")
result = dc.utils.pytorch_utils.unsorted_segment_sum(
data=data, segment_ids=segment_ids, num_segments=num_segments)
assert np.allclose(
np.array(result),
np.load("deepchem/utils/test/assets/result_segment_sum.npy"),
atol=1e-04)
@pytest.mark.torch
def test_segment_sum():
data = torch.Tensor([[1, 2, 3, 4], [4, 3, 2, 1], [5, 6, 7, 8]])
segment_ids = torch.Tensor([0, 0, 1]).to(torch.int64)
if len(segment_ids.shape) != 1:
raise AssertionError("segment_ids have be a 1-D tensor")
if data.shape[0] != segment_ids.shape[0]:
raise AssertionError(
"segment_ids should be the same size as dimension 0 of input.")
result = dc.utils.pytorch_utils.segment_sum(data=data,
segment_ids=segment_ids)
assert np.allclose(
np.array(result),
np.load("deepchem/utils/test/assets/result_segment_sum.npy"),
atol=1e-04)
<file_sep>from deepchem.feat.molecule_featurizers.snap_featurizer import SNAPFeaturizer
def test_snap_featurizer():
smiles = ["C1=CC=NC=C1", "CC(=O)C", "C"]
featurizer = SNAPFeaturizer()
features = featurizer.featurize(smiles)
assert len(features) == 3
assert features[0].node_features.shape == (6, 2)
assert features[1].edge_index.shape == (2, 6)
assert features[2].edge_features.shape == (0, 2)
<file_sep>"""
Test atomic coordinates and neighbor lists.
"""
import os
import logging
import numpy as np
import unittest
from deepchem.utils import conformers
from deepchem.feat import AtomicCoordinates
from deepchem.feat import NeighborListAtomicCoordinates
from deepchem.feat import NeighborListComplexAtomicCoordinates
logger = logging.getLogger(__name__)
class TestAtomicCoordinates(unittest.TestCase):
"""
Test AtomicCoordinates.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
from rdkit import Chem
mol = Chem.MolFromSmiles(smiles)
engine = conformers.ConformerGenerator(max_conformers=1)
self.mol = engine.generate_conformers(mol)
self.get_angstrom_coords = AtomicCoordinates()._featurize
assert self.mol.GetNumConformers() > 0
def test_atomic_coordinates(self):
"""
Simple test that atomic coordinates returns ndarray of right shape.
"""
N = self.mol.GetNumAtoms()
atomic_coords_featurizer = AtomicCoordinates()
coords = atomic_coords_featurizer._featurize(self.mol)
assert isinstance(coords, np.ndarray)
assert coords.shape == (N, 3)
def test_neighbor_list_shape(self):
"""
Simple test that Neighbor Lists have right shape.
"""
nblist_featurizer = NeighborListAtomicCoordinates()
N = self.mol.GetNumAtoms()
coords = self.get_angstrom_coords(self.mol)
nblist_featurizer = NeighborListAtomicCoordinates()
nblist = nblist_featurizer._featurize(self.mol)[1]
assert isinstance(nblist, dict)
assert len(nblist.keys()) == N
for (atom, neighbors) in nblist.items():
assert isinstance(atom, int)
assert isinstance(neighbors, list)
assert len(neighbors) <= N
# Do a manual distance computation and make
for i in range(N):
for j in range(N):
dist = np.linalg.norm(coords[i] - coords[j])
logger.info("Distance(%d, %d) = %f" % (i, j, dist))
if dist < nblist_featurizer.neighbor_cutoff and i != j:
assert j in nblist[i]
else:
assert j not in nblist[i]
def test_neighbor_list_extremes(self):
"""
Test Neighbor Lists with large/small boxes.
"""
N = self.mol.GetNumAtoms()
# Test with cutoff 0 angstroms. There should be no neighbors in this case.
nblist_featurizer = NeighborListAtomicCoordinates(neighbor_cutoff=.1)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) == 0
# Test with cutoff 100 angstroms. Everything should be neighbors now.
nblist_featurizer = NeighborListAtomicCoordinates(neighbor_cutoff=100)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) == N - 1
def test_neighbor_list_max_num_neighbors(self):
"""
Test that neighbor lists return only max_num_neighbors.
"""
N = self.mol.GetNumAtoms()
max_num_neighbors = 1
nblist_featurizer = NeighborListAtomicCoordinates(max_num_neighbors)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) <= max_num_neighbors
# Do a manual distance computation and ensure that selected neighbor is
# closest since we set max_num_neighbors = 1
coords = self.get_angstrom_coords(self.mol)
for i in range(N):
closest_dist = np.inf
closest_nbr = None
for j in range(N):
if i == j:
continue
dist = np.linalg.norm(coords[i] - coords[j])
logger.info("Distance(%d, %d) = %f" % (i, j, dist))
if dist < closest_dist:
closest_dist = dist
closest_nbr = j
logger.info("Closest neighbor to %d is %d" % (i, closest_nbr))
logger.info("Distance: %f" % closest_dist)
if closest_dist < nblist_featurizer.neighbor_cutoff:
assert nblist[i] == [closest_nbr]
else:
assert nblist[i] == []
def test_neighbor_list_periodic(self):
"""Test building a neighbor list with periodic boundary conditions."""
cutoff = 4.0
box_size = np.array([10.0, 8.0, 9.0])
N = self.mol.GetNumAtoms()
coords = self.get_angstrom_coords(self.mol)
featurizer = NeighborListAtomicCoordinates(neighbor_cutoff=cutoff,
periodic_box_size=box_size)
neighborlist = featurizer._featurize(self.mol)[1]
expected_neighbors = [set() for i in range(N)]
for i in range(N):
for j in range(i):
delta = coords[i] - coords[j]
delta -= np.round(delta / box_size) * box_size
if np.linalg.norm(delta) < cutoff:
expected_neighbors[i].add(j)
expected_neighbors[j].add(i)
for i in range(N):
assert (set(neighborlist[i]) == expected_neighbors[i])
def test_complex_featurization_simple(self):
"""Test Neighbor List computation on protein-ligand complex."""
dir_path = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(dir_path, "data/3zso_ligand_hyd.pdb")
protein_file = os.path.join(dir_path, "data/3zso_protein.pdb")
max_num_neighbors = 4
complex_featurizer = NeighborListComplexAtomicCoordinates(
max_num_neighbors)
system_coords, system_neighbor_list = complex_featurizer._featurize(
(ligand_file, protein_file))
N = system_coords.shape[0]
assert len(system_neighbor_list.keys()) == N
for atom in range(N):
assert len(system_neighbor_list[atom]) <= max_num_neighbors
<file_sep>import unittest
import numpy as np
import pytest
import deepchem as dc
import deepchem.models.losses as losses
try:
import tensorflow as tf
has_tensorflow = True
except:
has_tensorflow = False
try:
import torch
has_pytorch = True
except:
has_pytorch = False
class TestLosses(unittest.TestCase):
"""Test loss functions."""
@pytest.mark.tensorflow
def test_l1_loss_tf(self):
"""Test L1Loss."""
loss = losses.L1Loss()
outputs = tf.constant([[0.1, 0.8], [0.4, 0.6]])
labels = tf.constant([[0.0, 1.0], [1.0, 0.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
expected = [[0.1, 0.2], [0.6, 0.6]]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_l1_loss_pytorch(self):
"""Test L1Loss."""
loss = losses.L1Loss()
outputs = torch.tensor([[0.1, 0.8], [0.4, 0.6]])
labels = torch.tensor([[0.0, 1.0], [1.0, 0.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
expected = [[0.1, 0.2], [0.6, 0.6]]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_huber_loss_tf(self):
"""Test HuberLoss."""
loss = losses.HuberLoss()
outputs = tf.constant([[0.1, 0.8], [0.4, 0.6]])
labels = tf.constant([[1.0, -1.0], [-1.0, 1.0]])
result = np.mean(loss._compute_tf_loss(outputs, labels).numpy())
expected = 0.67125
assert np.allclose(expected, result)
@pytest.mark.torch
def test_huber_loss_pytorch(self):
"""Test HuberLoss."""
loss = losses.HuberLoss()
outputs = torch.tensor([[0.1, 0.8], [0.4, 0.6]])
labels = torch.tensor([[1.0, -1.0], [-1.0, 1.0]])
result = np.mean(loss._create_pytorch_loss()(outputs, labels).numpy())
expected = 0.67125
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_l2_loss_tf(self):
"""Test L2Loss."""
loss = losses.L2Loss()
outputs = tf.constant([[0.1, 0.8], [0.4, 0.6]])
labels = tf.constant([[0.0, 1.0], [1.0, 0.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
expected = [[0.1**2, 0.2**2], [0.6**2, 0.6**2]]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_l2_loss_pytorch(self):
"""Test L2Loss."""
loss = losses.L2Loss()
outputs = torch.tensor([[0.1, 0.8], [0.4, 0.6]])
labels = torch.tensor([[0.0, 1.0], [1.0, 0.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
expected = [[0.1**2, 0.2**2], [0.6**2, 0.6**2]]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_hinge_loss_tf(self):
"""Test HingeLoss."""
loss = losses.HingeLoss()
outputs = tf.constant([[0.1, 0.8], [0.4, 0.6]])
labels = tf.constant([[1.0, -1.0], [-1.0, 1.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
expected = [np.mean([0.9, 1.8]), np.mean([1.4, 0.4])]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_hinge_loss_pytorch(self):
"""Test HingeLoss."""
loss = losses.HingeLoss()
outputs = torch.tensor([[0.1, 0.8], [0.4, 0.6]])
labels = torch.tensor([[1.0, -1.0], [-1.0, 1.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
expected = [np.mean([0.9, 1.8]), np.mean([1.4, 0.4])]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_squared_hinge_loss_tf(self):
"""Test SquaredHingeLoss."""
loss = losses.SquaredHingeLoss()
outputs = tf.constant([[0.1, 0.8], [0.4, 0.6]])
labels = tf.constant([[1.0, -1.0], [-1.0, 1.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
expected = [np.mean([0.8100, 3.2400]), np.mean([1.9600, 0.1600])]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_squared_hinge_loss_pytorch(self):
"""Test SquaredHingeLoss."""
loss = losses.SquaredHingeLoss()
outputs = torch.tensor([[0.1, 0.8], [0.4, 0.6]])
labels = torch.tensor([[1.0, -1.0], [-1.0, 1.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
expected = [np.mean([0.8100, 3.2400]), np.mean([1.9600, 0.1600])]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_poisson_loss_tf(self):
"""Test PoissonLoss."""
loss = losses.PoissonLoss()
outputs = tf.constant([[0.1, 0.8], [0.4, 0.6]])
labels = tf.constant([[0.0, 1.0], [1.0, 0.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
expected = 0.75986
assert np.allclose(expected, result)
@pytest.mark.torch
def test_poisson_loss_pytorch(self):
"""Test PoissonLoss."""
loss = losses.PoissonLoss()
outputs = torch.tensor([[0.1, 0.8], [0.4, 0.6]])
labels = torch.tensor([[0.0, 1.0], [1.0, 0.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
expected = 0.75986
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_binary_cross_entropy_tf(self):
"""Test BinaryCrossEntropy."""
loss = losses.BinaryCrossEntropy()
outputs = tf.constant([[0.1, 0.8], [0.4, 0.6]])
labels = tf.constant([[0.0, 1.0], [1.0, 0.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
expected = [
-np.mean([np.log(0.9), np.log(0.8)]),
-np.mean([np.log(0.4), np.log(0.4)])
]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_binary_cross_entropy_pytorch(self):
"""Test BinaryCrossEntropy."""
loss = losses.BinaryCrossEntropy()
outputs = torch.tensor([[0.1, 0.8], [0.4, 0.6]])
labels = torch.tensor([[0.0, 1.0], [1.0, 0.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
expected = [
-np.mean([np.log(0.9), np.log(0.8)]),
-np.mean([np.log(0.4), np.log(0.4)])
]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_categorical_cross_entropy_tf(self):
"""Test CategoricalCrossEntropy."""
loss = losses.CategoricalCrossEntropy()
outputs = tf.constant([[0.2, 0.8], [0.4, 0.6]])
labels = tf.constant([[0.0, 1.0], [1.0, 0.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
expected = [-np.log(0.8), -np.log(0.4)]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_categorical_cross_entropy_pytorch(self):
"""Test CategoricalCrossEntropy."""
loss = losses.CategoricalCrossEntropy()
outputs = torch.tensor([[0.2, 0.8], [0.4, 0.6]])
labels = torch.tensor([[0.0, 1.0], [1.0, 0.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
expected = [-np.log(0.8), -np.log(0.4)]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_sigmoid_cross_entropy_tf(self):
"""Test SigmoidCrossEntropy."""
loss = losses.SigmoidCrossEntropy()
y = [[0.1, 0.8], [0.4, 0.6]]
outputs = tf.constant(y)
labels = tf.constant([[0.0, 1.0], [1.0, 0.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
sigmoid = 1.0 / (1.0 + np.exp(-np.array(y)))
expected = [[-np.log(1 - sigmoid[0, 0]), -np.log(sigmoid[0, 1])],
[-np.log(sigmoid[1, 0]), -np.log(1 - sigmoid[1, 1])]]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_sigmoid_cross_entropy_pytorch(self):
"""Test SigmoidCrossEntropy."""
loss = losses.SigmoidCrossEntropy()
y = [[0.1, 0.8], [0.4, 0.6]]
outputs = torch.tensor(y)
labels = torch.tensor([[0.0, 1.0], [1.0, 0.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
sigmoid = 1.0 / (1.0 + np.exp(-np.array(y)))
expected = [[-np.log(1 - sigmoid[0, 0]), -np.log(sigmoid[0, 1])],
[-np.log(sigmoid[1, 0]), -np.log(1 - sigmoid[1, 1])]]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_softmax_cross_entropy_tf(self):
"""Test SoftmaxCrossEntropy."""
loss = losses.SoftmaxCrossEntropy()
y = np.array([[0.1, 0.8], [0.4, 0.6]])
outputs = tf.constant(y)
labels = tf.constant([[0.0, 1.0], [1.0, 0.0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
softmax = np.exp(y) / np.expand_dims(np.sum(np.exp(y), axis=1), 1)
expected = [-np.log(softmax[0, 1]), -np.log(softmax[1, 0])]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_softmax_cross_entropy_pytorch(self):
"""Test SoftmaxCrossEntropy."""
loss = losses.SoftmaxCrossEntropy()
y = np.array([[0.1, 0.8], [0.4, 0.6]])
outputs = torch.tensor(y)
labels = torch.tensor([[0.0, 1.0], [1.0, 0.0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
softmax = np.exp(y) / np.expand_dims(np.sum(np.exp(y), axis=1), 1)
expected = [-np.log(softmax[0, 1]), -np.log(softmax[1, 0])]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_sparse_softmax_cross_entropy_tf(self):
"""Test SparseSoftmaxCrossEntropy."""
loss = losses.SparseSoftmaxCrossEntropy()
y = np.array([[0.1, 0.8], [0.4, 0.6]])
outputs = tf.constant(y)
labels = tf.constant([1, 0])
result = loss._compute_tf_loss(outputs, labels).numpy()
softmax = np.exp(y) / np.expand_dims(np.sum(np.exp(y), axis=1), 1)
expected = [-np.log(softmax[0, 1]), -np.log(softmax[1, 0])]
assert np.allclose(expected, result)
labels = tf.constant([[1], [0]])
result = loss._compute_tf_loss(outputs, labels).numpy()
softmax = np.exp(y) / np.expand_dims(np.sum(np.exp(y), axis=1), 1)
expected = [-np.log(softmax[0, 1]), -np.log(softmax[1, 0])]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_sparse_softmax_cross_entropy_pytorch(self):
"""Test SparseSoftmaxCrossEntropy."""
loss = losses.SparseSoftmaxCrossEntropy()
y = np.array([[0.1, 0.8], [0.4, 0.6]])
outputs = torch.tensor(y)
labels = torch.tensor([1, 0])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
softmax = np.exp(y) / np.expand_dims(np.sum(np.exp(y), axis=1), 1)
expected = [-np.log(softmax[0, 1]), -np.log(softmax[1, 0])]
assert np.allclose(expected, result)
labels = torch.tensor([[1], [0]])
result = loss._create_pytorch_loss()(outputs, labels).numpy()
softmax = np.exp(y) / np.expand_dims(np.sum(np.exp(y), axis=1), 1)
expected = [-np.log(softmax[0, 1]), -np.log(softmax[1, 0])]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_VAE_ELBO_tf(self):
"""."""
loss = losses.VAE_ELBO()
logvar = tf.constant([[1.0, 1.3], [0.6, 1.2]])
mu = tf.constant([[0.2, 0.7], [1.2, 0.4]])
x = tf.constant([[0.9, 0.4, 0.8], [0.3, 0, 1]])
reconstruction_x = tf.constant([[0.8, 0.3, 0.7], [0.2, 0, 0.9]])
result = loss._compute_tf_loss(logvar, mu, x, reconstruction_x).numpy()
expected = [
0.5 * np.mean([
0.04 + 1.0 - np.log(1e-20 + 1.0) - 1,
0.49 + 1.69 - np.log(1e-20 + 1.69) - 1
]) - np.mean(
np.array([0.9, 0.4, 0.8]) * np.log([0.8, 0.3, 0.7]) +
np.array([0.1, 0.6, 0.2]) * np.log([0.2, 0.7, 0.3])),
0.5 * np.mean([
1.44 + 0.36 - np.log(1e-20 + 0.36) - 1,
0.16 + 1.44 - np.log(1e-20 + 1.44) - 1
]) - np.mean(
np.array([0.3, 0, 1]) * np.log([0.2, 1e-20, 0.9]) +
np.array([0.7, 1, 0]) * np.log([0.8, 1, 0.1]))
]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_VAE_ELBO_pytorch(self):
"""."""
loss = losses.VAE_ELBO()
logvar = torch.tensor([[1.0, 1.3], [0.6, 1.2]])
mu = torch.tensor([[0.2, 0.7], [1.2, 0.4]])
x = torch.tensor([[0.9, 0.4, 0.8], [0.3, 0, 1]])
reconstruction_x = torch.tensor([[0.8, 0.3, 0.7], [0.2, 0, 0.9]])
result = loss._create_pytorch_loss()(logvar, mu, x,
reconstruction_x).numpy()
expected = [
0.5 * np.mean([
0.04 + 1.0 - np.log(1e-20 + 1.0) - 1,
0.49 + 1.69 - np.log(1e-20 + 1.69) - 1
]) - np.mean(
np.array([0.9, 0.4, 0.8]) * np.log([0.8, 0.3, 0.7]) +
np.array([0.1, 0.6, 0.2]) * np.log([0.2, 0.7, 0.3])),
0.5 * np.mean([
1.44 + 0.36 - np.log(1e-20 + 0.36) - 1,
0.16 + 1.44 - np.log(1e-20 + 1.44) - 1
]) - np.mean(
np.array([0.3, 0, 1]) * np.log([0.2, 1e-20, 0.9]) +
np.array([0.7, 1, 0]) * np.log([0.8, 1, 0.1]))
]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_VAE_KLDivergence_tf(self):
"""."""
loss = losses.VAE_KLDivergence()
logvar = tf.constant([[1.0, 1.3], [0.6, 1.2]])
mu = tf.constant([[0.2, 0.7], [1.2, 0.4]])
result = loss._compute_tf_loss(logvar, mu).numpy()
expected = [
0.5 * np.mean([
0.04 + 1.0 - np.log(1e-20 + 1.0) - 1,
0.49 + 1.69 - np.log(1e-20 + 1.69) - 1
]), 0.5 * np.mean([
1.44 + 0.36 - np.log(1e-20 + 0.36) - 1,
0.16 + 1.44 - np.log(1e-20 + 1.44) - 1
])
]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_VAE_KLDivergence_pytorch(self):
"""."""
loss = losses.VAE_KLDivergence()
logvar = torch.tensor([[1.0, 1.3], [0.6, 1.2]])
mu = torch.tensor([[0.2, 0.7], [1.2, 0.4]])
result = loss._create_pytorch_loss()(logvar, mu).numpy()
expected = [
0.5 * np.mean([
0.04 + 1.0 - np.log(1e-20 + 1.0) - 1,
0.49 + 1.69 - np.log(1e-20 + 1.69) - 1
]), 0.5 * np.mean([
1.44 + 0.36 - np.log(1e-20 + 0.36) - 1,
0.16 + 1.44 - np.log(1e-20 + 1.44) - 1
])
]
assert np.allclose(expected, result)
@pytest.mark.tensorflow
def test_ShannonEntropy_tf(self):
"""."""
loss = losses.ShannonEntropy()
inputs = tf.constant([[0.7, 0.3], [0.9, 0.1]])
result = loss._compute_tf_loss(inputs).numpy()
expected = [
-np.mean([0.7 * np.log(0.7), 0.3 * np.log(0.3)]),
-np.mean([0.9 * np.log(0.9), 0.1 * np.log(0.1)])
]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_ShannonEntropy_pytorch(self):
"""."""
loss = losses.ShannonEntropy()
inputs = torch.tensor([[0.7, 0.3], [0.9, 0.1]])
result = loss._create_pytorch_loss()(inputs).numpy()
expected = [
-np.mean([0.7 * np.log(0.7), 0.3 * np.log(0.3)]),
-np.mean([0.9 * np.log(0.9), 0.1 * np.log(0.1)])
]
assert np.allclose(expected, result)
@pytest.mark.torch
def test_GlobalMutualInformation_pytorch(self):
"""."""
torch.manual_seed(123)
g_enc = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]])
g_enc2 = torch.tensor([[5, 6, 7, 8], [5, 6, 7, 8]])
globalloss = losses.GlobalMutualInformationLoss()
excepted_global_loss = np.array(34.306854)
global_loss = globalloss._create_pytorch_loss()(
g_enc, g_enc2).detach().numpy()
assert np.allclose(global_loss, excepted_global_loss, 1e-3)
@pytest.mark.torch
def test_LocalInformation_pytorch(self):
"""."""
torch.manual_seed(123)
dim = 4
g_enc = torch.rand(2, dim)
l_enc = torch.randn(4, dim)
batch_graph_index = torch.tensor([[0, 1], [1, 0]])
localloss = losses.LocalMutualInformationLoss()
expected_local_loss = np.array(-0.17072642)
local_loss = localloss._create_pytorch_loss()(
l_enc, g_enc, batch_graph_index).detach().numpy()
assert np.allclose(local_loss, expected_local_loss, 1e-3)
def get_regression_dataset(self):
import os
from deepchem.feat.molecule_featurizers import MolGraphConvFeaturizer
np.random.seed(123)
featurizer = MolGraphConvFeaturizer(use_edges=True)
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/example_regression.csv')
loader = dc.data.CSVLoader(tasks=["outcome"],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
return dataset, metric
@pytest.mark.torch
def test_get_positive_expectation(self):
import numpy as np
import torch
from deepchem.models.losses import get_positive_expectation
p_samples = torch.tensor([0.5, 1.0, -0.5, -1.0])
measures = ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1']
expected_results = [
np.array(-0.76866937),
np.array(-0.07552214),
np.array(0.625),
np.array(1),
np.array(-1.3353533),
np.array(0),
np.array(-0.33535326),
np.array(0)
]
for measure, expected in zip(measures, expected_results):
result = get_positive_expectation(p_samples,
measure).detach().numpy()
assert np.allclose(result, expected, atol=1e-6)
@pytest.mark.torch
def test_get_negative_expectation(self):
import numpy as np
import torch
from deepchem.models.losses import get_negative_expectation
q_samples = torch.tensor([0.5, 1.0, -0.5, -1.0])
measures = ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1']
expected_results = [
np.array(0.76866937),
np.array(0.07552214),
np.array(-1.5625),
np.array(1.3353533),
np.array(-1),
np.array(0.289196),
np.array(0.33535326),
np.array(0)
]
for measure, expected in zip(measures, expected_results):
result = get_negative_expectation(q_samples,
measure).detach().numpy()
assert np.allclose(result, expected, atol=1e-6)
@pytest.mark.torch
def test_grover_pretrain_loss(self):
import torch
from deepchem.models.losses import GroverPretrainLoss
loss = GroverPretrainLoss()
loss_fn = loss._create_pytorch_loss()
batch_size = 3
output_dim = 10
fg_size = 8
atom_vocab_task_target = torch.ones(batch_size).type(torch.int64)
bond_vocab_task_target = torch.ones(batch_size).type(torch.int64)
fg_task_target = torch.ones(batch_size, fg_size)
atom_vocab_task_atom_pred = torch.zeros(batch_size, output_dim)
bond_vocab_task_atom_pred = torch.zeros(batch_size, output_dim)
atom_vocab_task_bond_pred = torch.zeros(batch_size, output_dim)
bond_vocab_task_bond_pred = torch.zeros(batch_size, output_dim)
fg_task_atom_from_atom = torch.zeros(batch_size, fg_size)
fg_task_atom_from_bond = torch.zeros(batch_size, fg_size)
fg_task_bond_from_atom = torch.zeros(batch_size, fg_size)
fg_task_bond_from_bond = torch.zeros(batch_size, fg_size)
result = loss_fn(atom_vocab_task_atom_pred, atom_vocab_task_bond_pred,
bond_vocab_task_atom_pred, bond_vocab_task_bond_pred,
fg_task_atom_from_atom, fg_task_atom_from_bond,
fg_task_bond_from_atom, fg_task_bond_from_bond,
atom_vocab_task_target, bond_vocab_task_target,
fg_task_target)
expected_result = torch.tensor(2.7726)
assert torch.allclose(result, expected_result)
@pytest.mark.torch
def test_deep_graph_infomax_loss(self):
import torch
import numpy as np
from deepchem.feat.graph_data import GraphData
from torch_geometric.nn import global_mean_pool
from deepchem.models.losses import DeepGraphInfomaxLoss
x = np.array([[1, 0], [0, 1], [1, 1], [0, 0]])
edge_index = np.array([[0, 1, 2, 0, 3], [1, 0, 1, 3, 2]])
graph_index = np.array([0, 0, 1, 1])
data = GraphData(node_features=x,
edge_index=edge_index,
graph_index=graph_index).numpy_to_torch()
graph_infomax_loss = DeepGraphInfomaxLoss()._create_pytorch_loss()
num_nodes = data.num_nodes
embedding_dim = 8
node_emb = torch.randn(num_nodes, embedding_dim)
# Compute the global graph representation
summary_emb = global_mean_pool(node_emb, data.graph_index)
# Compute positive and negative scores
positive_score = torch.matmul(node_emb, summary_emb.t())
negative_score = torch.matmul(node_emb, summary_emb.roll(1, dims=0).t())
loss = graph_infomax_loss(positive_score, negative_score)
# Check if the loss is a scalar and has the correct dtype
assert loss.dim() == 0
assert loss.dtype == torch.float32
@pytest.mark.torch
def test_graph_context_pred_loss(self):
import torch
from deepchem.models.losses import GraphContextPredLoss
torch.manual_seed(1234)
mode = "cbow"
neg_samples = 2
substruct_rep = torch.randn(4, 8)
overlapped_node_rep = torch.randn(8, 8)
context_rep = torch.randn(4, 8)
neg_context_rep = torch.randn(2 * 4, 8)
overlapped_context_size = torch.tensor([2, 2, 2, 2])
graph_context_pred_loss = GraphContextPredLoss()._create_pytorch_loss(
mode, neg_samples)
loss = graph_context_pred_loss(substruct_rep, overlapped_node_rep,
context_rep, neg_context_rep,
overlapped_context_size)
assert torch.allclose(loss, torch.tensor(4.4781, dtype=torch.float64))
mode = "skipgram"
graph_context_pred_loss = GraphContextPredLoss()._create_pytorch_loss(
mode, neg_samples)
loss = graph_context_pred_loss(substruct_rep, overlapped_node_rep,
context_rep, neg_context_rep,
overlapped_context_size)
assert torch.allclose(loss, torch.tensor(2.8531, dtype=torch.float64))
@pytest.mark.torch
def test_NTXentMultiplePositives_loss(self):
from deepchem.models.losses import NTXentMultiplePositives
z1 = torch.randn(4, 8)
z2 = torch.randn(4 * 3, 8)
ntxent_loss = NTXentMultiplePositives(norm=True, tau=0.5)
loss_fn = ntxent_loss._create_pytorch_loss()
loss = loss_fn(z1, z2)
# Check if the loss is a scalar and non-negative
assert loss.dim() == 0
assert loss.item() >= 0
<file_sep>import numpy as np
import torch
import torch.nn as nn
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models import layers
from deepchem.models.torch_models import TorchModel
from torch_geometric.data import Data, Batch
from deepchem.feat import GraphData
from deepchem.data import Dataset
from typing import Union, List, Sequence, Optional, Iterable, Tuple
class _ModData(Data):
"""Modified version of Data class of pytorch-geometric to enable batching process to
custom increment values in certain keys.
"""
def __init__(self, required_inc, *args, **kwargs):
"""Initialize the _ModData class"""
super().__init__(*args, **kwargs)
self.required_inc = required_inc # required increment
def __inc__(self, key, value, *args, **kwargs):
"""
Modified __inc__() to increment 'atom_to_incoming_bonds' and 'mapping' keys
based given required increment value (example, number of bonds in the molecule)
"""
if key in ['atom_to_incoming_bonds', 'mapping']:
return self.required_inc
else:
return super().__inc__(key, value, *args, **kwargs)
class _MapperDMPNN:
"""
This class is a helper class for DMPNNModel class to generate concatenated feature vector and mappings.
`self.f_ini_atoms_bonds` is the concatenated feature vector which contains
concatenation of initial atom and bond features.
`self.atom_to_incoming_bonds` is mapping from atom index to list of indicies of incoming bonds.
`self.mapping` is the mapping that maps bond index to 'array of indices of the bonds'
incoming at the initial atom of the bond (excluding the reverse bonds)
Example,
Let the diagram given below represent a molecule containing 3 atoms (nodes) and 2 bonds (edges):-
| 0 --- 1
| |
| 2
Here, atoms are => A0, A1 and A2 and their respective feature vectors are f0, f1, and f2.
Let the bonds from atoms 0->1 ('B[01]') and 1->0 ('B[10]') be considered as 2 different bonds.
Hence, by considering the same for all atoms, the total number of bonds = 4.
Let:
B[01] => B0
B[10] => B1
B[02] => B2
B[20] => B3
Hence repective bond features are fb0, fb1, fb2, and fb3.
(Note: fb0 = fb1, fb2 = fb3)
'f_ini_atoms_bonds' is the concatenated feature vector which contains
concatenation of initial atom and bond features.
'B0'
Example: 'A0' -----> A1 , concat feature = f0 + fb0
Hence,
B0 B1 B2 B3 B(-1)
f_ini_atoms_bonds = [ f0+fb0 , f1+fb1 , f0+fb2 , f2+fb3 , f(-1) ]
(Note: f(-1) is a zero array of the same size as other concatenated features.)
`atom_to_incoming_bonds` is mapping from atom index to list of indicies of incoming bonds.
B3 B1
Example: 'A2' ----> 'A0' <---- 'A1', for A0 => [B1, B3]
Hence,
A0 A1 A2
atom_to_incoming_bonds = [ [B1,B3] [B0,B(-1)] [B2,B(-1)] ]
(Note: Here, maximum number of incoming bonds is 2. So, -1 index is added to all those cases
where number of incoming bonds is less than maximum. In this case, its for A1 and A2.)
To get mapping, first find indices of the bonds, incoming at the initial atom of the bond.
Example: for bond B0, B1 and B3 are coming towards atom 0.
| B0 B1
| 0 ----> 1 | 0 <---- 1
| | ^
| | | B3
| | 2
B0 B1 B2 B3
mapping (with reverse bonds) = [ [B1,B3] [B0] [B1,B3] [B2] ]
To get the required mapping, reverse bond indices are replaced with -1
and extra space in the array elements is filled with -1, to get a uniform array.
The mapping is also padded with -1 at the end, so that the length of `mapping` is
equal to the length of `f_ini_atoms_bonds`.
Hence,
B0 B1 B2 B3 B(-1)
mapping = [ [B(-1),B3] [B(-1),B(-1)] [B1,B(-1)] [B(-1),B(-1)] [B(-1),B(-1)] ]
OR
mapping = [[-1, 3], [-1, -1], [1, -1], [-1, -1], [-1, -1]]
"""
def __init__(self, graph: GraphData):
"""
Parameters
----------
graph: GraphData
GraphData object.
"""
self.num_atoms: int = graph.num_nodes
self.num_atom_features: int = graph.num_node_features
self.num_bonds: int = graph.num_edges
self.num_bond_features: int = graph.num_edge_features
self.atom_features: np.ndarray = graph.node_features
self.bond_features: Optional[np.ndarray] = graph.edge_features
self.bond_index: np.ndarray = graph.edge_index
self.global_features: np.ndarray = graph.global_features # type: ignore
# mypy check is ignored for global_features as it is not a default attribute
# of GraphData. It is created during runtime using **kwargs.
# mapping from bond index to the index of the atom (where the bond is coming from)
self.bond_to_ini_atom: np.ndarray
# mapping from bond index to concat(in_atom, bond) features
self.f_ini_atoms_bonds: np.ndarray = np.empty(0)
# mapping from atom index to list of indicies of incoming bonds
self.atom_to_incoming_bonds: np.ndarray
# mapping which maps bond index to 'array of indices of the bonds' incoming at the initial atom of the bond (excluding the reverse bonds)
self.mapping: np.ndarray = np.empty(0)
if self.num_bonds == 0:
self.bond_to_ini_atom = np.empty(0)
self.f_ini_atoms_bonds = np.zeros(
(1, self.num_atom_features + self.num_bond_features))
self.atom_to_incoming_bonds = np.asarray([[-1]] * self.num_atoms,
dtype=int)
self.mapping = np.asarray([[-1]], dtype=int)
else:
self.bond_to_ini_atom = self.bond_index[0]
self._get_f_ini_atoms_bonds() # its zero padded at the end
self.atom_to_incoming_bonds = self._get_atom_to_incoming_bonds()
self._generate_mapping() # its padded with -1 at the end
@property
def values(self) -> Sequence[np.ndarray]:
"""
Returns the required mappings:
- atom features
- concat features (atom + bond)
- atom to incoming bonds mapping
- mapping
- global features
"""
return self.atom_features, self.f_ini_atoms_bonds, self.atom_to_incoming_bonds, self.mapping, self.global_features
def _get_f_ini_atoms_bonds(self):
"""Method to get `self.f_ini_atoms_bonds`"""
self.f_ini_atoms_bonds = np.hstack(
(self.atom_features[self.bond_to_ini_atom], self.bond_features))
# zero padded at the end
self.f_ini_atoms_bonds = np.pad(self.f_ini_atoms_bonds,
((0, 1), (0, 0)))
def _generate_mapping(self):
"""
Generate mapping, which maps bond index to 'array of indices of the bonds'
incoming at the initial atom of the bond (reverse bonds are not considered).
Steps:
- Get mapping based on `self.atom_to_incoming_bonds` and `self.bond_to_ini_atom`.
- Replace reverse bond indices with -1.
- Pad the mapping with -1.
"""
# get mapping which maps bond index to 'array of indices of the bonds' incoming at the initial atom of the bond
self.mapping = self.atom_to_incoming_bonds[self.bond_to_ini_atom]
self._replace_rev_bonds()
# padded with -1 at the end
self.mapping = np.pad(self.mapping, ((0, 1), (0, 0)),
constant_values=-1)
def _get_atom_to_incoming_bonds(self) -> np.ndarray:
"""Method to get atom_to_incoming_bonds mapping"""
# mapping from bond index to the index of the atom (where the bond if going to)
bond_to_final_atom: np.ndarray = self.bond_index[1]
# mapping from atom index to list of indicies of incoming bonds
a2b: List = []
for i in range(self.num_atoms):
a2b.append(list(np.where(bond_to_final_atom == i)[0]))
# get maximum number of incoming bonds
max_num_bonds: int = max(
1, max(len(incoming_bonds) for incoming_bonds in a2b))
# Make number of incoming bonds equal to maximum number of bonds.
# This is done by appending -1 to fill remaining space at each atom indices.
a2b = [
a2b[a] + [-1] * (max_num_bonds - len(a2b[a]))
for a in range(self.num_atoms)
]
return np.asarray(a2b, dtype=int)
def _replace_rev_bonds(self):
"""Method to get b2revb and replace the reverse bond indices with -1 in mapping."""
# mapping from bond index to the index of the reverse bond
b2revb = np.empty(self.num_bonds, dtype=int)
for i in range(self.num_bonds):
if i % 2 == 0:
b2revb[i] = i + 1
else:
b2revb[i] = i - 1
for count, i in enumerate(b2revb):
self.mapping[count][np.where(self.mapping[count] == i)] = -1
class DMPNN(nn.Module):
"""Directed Message Passing Neural Network
In this class, we define the various encoder layers and establish a sequential model for the Directed Message Passing Neural Network (D-MPNN) [1]_.
We also define the forward call of this model in the forward function.
Example
-------
>>> import deepchem as dc
>>> from torch_geometric.data import Data, Batch
>>> # Get data
>>> input_smile = "CC"
>>> feat = dc.feat.DMPNNFeaturizer(features_generators=['morgan'])
>>> graph = feat.featurize(input_smile)
>>> mapper = _MapperDMPNN(graph[0])
>>> atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds, mapping, global_features = mapper.values
>>> atom_features = torch.from_numpy(atom_features).float()
>>> f_ini_atoms_bonds = torch.from_numpy(f_ini_atoms_bonds).float()
>>> atom_to_incoming_bonds = torch.from_numpy(atom_to_incoming_bonds)
>>> mapping = torch.from_numpy(mapping)
>>> global_features = torch.from_numpy(global_features).float()
>>> data = [Data(atom_features=atom_features,\
f_ini_atoms_bonds=f_ini_atoms_bonds,\
atom_to_incoming_bonds=atom_to_incoming_bonds,\
mapping=mapping, global_features=global_features)]
>>> # Prepare batch (size 1)
>>> pyg_batch = Batch()
>>> pyg_batch = pyg_batch.from_data_list(data)
>>> # Initialize the model
>>> model = DMPNN(mode='regression', global_features_size=2048, n_tasks=2)
>>> # Get the forward call of the model for this batch.
>>> output = model(pyg_batch)
References
----------
.. [1] Analyzing Learned Molecular Representations for Property Prediction https://arxiv.org/pdf/1904.01561.pdf
"""
def __init__(self,
mode: str = 'regression',
n_classes: int = 3,
n_tasks: int = 1,
global_features_size: int = 0,
use_default_fdim: bool = True,
atom_fdim: int = 133,
bond_fdim: int = 14,
enc_hidden: int = 300,
depth: int = 3,
bias: bool = False,
enc_activation: str = 'relu',
enc_dropout_p: float = 0.0,
aggregation: str = 'mean',
aggregation_norm: Union[int, float] = 100,
ffn_hidden: int = 300,
ffn_activation: str = 'relu',
ffn_layers: int = 3,
ffn_dropout_p: float = 0.0,
ffn_dropout_at_input_no_act: bool = True):
"""Initialize the DMPNN class.
Parameters
----------
mode: str, default 'regression'
The model type - classification or regression.
n_classes: int, default 3
The number of classes to predict (used only in classification mode).
n_tasks: int, default 1
The number of tasks.
global_features_size: int, default 0
Size of the global features vector, based on the global featurizers used during featurization.
use_default_fdim: bool
If `True`, self.atom_fdim and self.bond_fdim are initialized using values from the GraphConvConstants class.
If `False`, self.atom_fdim and self.bond_fdim are initialized from the values provided.
atom_fdim: int
Dimension of atom feature vector.
bond_fdim: int
Dimension of bond feature vector.
enc_hidden: int
Size of hidden layer in the encoder layer.
depth: int
No of message passing steps.
bias: bool
If `True`, dense layers will use bias vectors.
enc_activation: str
Activation function to be used in the encoder layer.
Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, and 'elu' for ELU.
enc_dropout_p: float
Dropout probability for the encoder layer.
aggregation: str
Aggregation type to be used in the encoder layer.
Can choose between 'mean', 'sum', and 'norm'.
aggregation_norm: Union[int, float]
Value required if `aggregation` type is 'norm'.
ffn_hidden: int
Size of hidden layer in the feed-forward network layer.
ffn_activation: str
Activation function to be used in feed-forward network layer.
Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, and 'elu' for ELU.
ffn_layers: int
Number of layers in the feed-forward network layer.
ffn_dropout_p: float
Dropout probability for the feed-forward network layer.
ffn_dropout_at_input_no_act: bool
If true, dropout is applied on the input tensor. For single layer, it is not passed to an activation function.
"""
super(DMPNN, self).__init__()
self.mode: str = mode
self.n_classes: int = n_classes
self.n_tasks: int = n_tasks
# get encoder
self.encoder: nn.Module = layers.DMPNNEncoderLayer(
use_default_fdim=use_default_fdim,
atom_fdim=atom_fdim,
bond_fdim=bond_fdim,
d_hidden=enc_hidden,
depth=depth,
bias=bias,
activation=enc_activation,
dropout_p=enc_dropout_p,
aggregation=aggregation,
aggregation_norm=aggregation_norm)
# get input size for ffn
ffn_input: int = enc_hidden + global_features_size
# get output size for ffn
if self.mode == 'regression':
ffn_output: int = self.n_tasks
elif self.mode == 'classification':
ffn_output = self.n_tasks * self.n_classes
# get ffn
self.ffn: nn.Module = layers.PositionwiseFeedForward(
d_input=ffn_input,
d_hidden=ffn_hidden,
d_output=ffn_output,
activation=ffn_activation,
n_layers=ffn_layers,
dropout_p=ffn_dropout_p,
dropout_at_input_no_act=ffn_dropout_at_input_no_act)
def forward(
self,
pyg_batch: Batch) -> Union[torch.Tensor, Sequence[torch.Tensor]]:
"""
Parameters
----------
data: Batch
A pytorch-geometric batch containing tensors for:
- atom_features
- f_ini_atoms_bonds
- atom_to_incoming_bonds
- mapping
- global_features
The `molecules_unbatch_key` is also derived from the batch.
(List containing number of atoms in various molecules of the batch)
Returns
-------
output: Union[torch.Tensor, Sequence[torch.Tensor]]
Predictions for the graphs
"""
atom_features: torch.Tensor = pyg_batch['atom_features']
f_ini_atoms_bonds: torch.Tensor = pyg_batch['f_ini_atoms_bonds']
atom_to_incoming_bonds: torch.Tensor = pyg_batch[
'atom_to_incoming_bonds']
mapping: torch.Tensor = pyg_batch['mapping']
global_features: torch.Tensor = pyg_batch['global_features']
# Steps to get `molecules_unbatch_key`:
# 1. Get the tensor containing the indices of first atoms of each molecule
# 2. Get the tensor containing number of atoms of each molecule
# by taking the difference between consecutive indices.
# 3. Convert the tensor to a list.
molecules_unbatch_key: List = torch.diff(
pyg_batch._slice_dict['atom_features']).tolist()
# num_molecules x (enc_hidden + global_features_size)
encodings: torch.Tensor = self.encoder(atom_features, f_ini_atoms_bonds,
atom_to_incoming_bonds, mapping,
global_features,
molecules_unbatch_key)
# ffn_output (`self.n_tasks` or `self.n_tasks * self.n_classes`)
output: torch.Tensor = self.ffn(encodings)
final_output: Union[torch.Tensor, Sequence[torch.Tensor]]
if self.mode == 'regression':
final_output = output
elif self.mode == 'classification':
if self.n_tasks == 1:
output = output.view(-1, self.n_classes)
final_output = nn.functional.softmax(output, dim=1), output
else:
output = output.view(-1, self.n_tasks, self.n_classes)
final_output = nn.functional.softmax(output, dim=2), output
return final_output
class DMPNNModel(TorchModel):
"""Directed Message Passing Neural Network
This class implements the Directed Message Passing Neural Network (D-MPNN) [1]_.
The DMPNN model has 2 phases, message-passing phase and read-out phase.
- The goal of the message-passing phase is to generate 'hidden states of all the atoms in the molecule' using encoders.
- Next in read-out phase, the features are passed into feed-forward neural network to get the task-based prediction.
For additional information:
- `Mapper class <https://github.com/deepchem/deepchem/blob/31676cc2497d5f2de65d648c09fc86191b594501/deepchem/models/torch_models/dmpnn.py#L10-L92>`_
- `Encoder layer class <https://github.com/deepchem/deepchem/blob/31676cc2497d5f2de65d648c09fc86191b594501/deepchem/models/torch_models/layers.py#L1223-L1374>`_
- `Feed-Forward class <https://github.com/deepchem/deepchem/blob/31676cc2497d5f2de65d648c09fc86191b594501/deepchem/models/torch_models/layers.py#L689-L700>`_
Example
-------
>>> import deepchem as dc
>>> import os
>>> model_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
>>> input_file = os.path.join(model_dir, 'tests/assets/freesolv_sample_5.csv')
>>> loader = dc.data.CSVLoader(tasks=['y'], feature_field='smiles', featurizer=dc.feat.DMPNNFeaturizer())
>>> dataset = loader.create_dataset(input_file)
>>> model = DMPNNModel()
>>> out = model.fit(dataset, nb_epoch=1)
References
----------
.. [1] Analyzing Learned Molecular Representations for Property Prediction https://arxiv.org/pdf/1904.01561.pdf
"""
def __init__(self,
mode: str = 'regression',
n_classes: int = 3,
n_tasks: int = 1,
batch_size: int = 1,
global_features_size: int = 0,
use_default_fdim: bool = True,
atom_fdim: int = 133,
bond_fdim: int = 14,
enc_hidden: int = 300,
depth: int = 3,
bias: bool = False,
enc_activation: str = 'relu',
enc_dropout_p: float = 0.0,
aggregation: str = 'mean',
aggregation_norm: Union[int, float] = 100,
ffn_hidden: int = 300,
ffn_activation: str = 'relu',
ffn_layers: int = 3,
ffn_dropout_p: float = 0.0,
ffn_dropout_at_input_no_act: bool = True,
**kwargs):
"""Initialize the DMPNNModel class.
Parameters
----------
mode: str, default 'regression'
The model type - classification or regression.
n_classes: int, default 3
The number of classes to predict (used only in classification mode).
n_tasks: int, default 1
The number of tasks.
batch_size: int, default 1
The number of datapoints in a batch.
global_features_size: int, default 0
Size of the global features vector, based on the global featurizers used during featurization.
use_default_fdim: bool
If `True`, self.atom_fdim and self.bond_fdim are initialized using values from the GraphConvConstants class.
If `False`, self.atom_fdim and self.bond_fdim are initialized from the values provided.
atom_fdim: int
Dimension of atom feature vector.
bond_fdim: int
Dimension of bond feature vector.
enc_hidden: int
Size of hidden layer in the encoder layer.
depth: int
No of message passing steps.
bias: bool
If `True`, dense layers will use bias vectors.
enc_activation: str
Activation function to be used in the encoder layer.
Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, and 'elu' for ELU.
enc_dropout_p: float
Dropout probability for the encoder layer.
aggregation: str
Aggregation type to be used in the encoder layer.
Can choose between 'mean', 'sum', and 'norm'.
aggregation_norm: Union[int, float]
Value required if `aggregation` type is 'norm'.
ffn_hidden: int
Size of hidden layer in the feed-forward network layer.
ffn_activation: str
Activation function to be used in feed-forward network layer.
Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, and 'elu' for ELU.
ffn_layers: int
Number of layers in the feed-forward network layer.
ffn_dropout_p: float
Dropout probability for the feed-forward network layer.
ffn_dropout_at_input_no_act: bool
If true, dropout is applied on the input tensor. For single layer, it is not passed to an activation function.
kwargs: Dict
kwargs supported by TorchModel
"""
model: nn.Module = DMPNN(
mode=mode,
n_classes=n_classes,
n_tasks=n_tasks,
global_features_size=global_features_size,
use_default_fdim=use_default_fdim,
atom_fdim=atom_fdim,
bond_fdim=bond_fdim,
enc_hidden=enc_hidden,
depth=depth,
bias=bias,
enc_activation=enc_activation,
enc_dropout_p=enc_dropout_p,
aggregation=aggregation,
aggregation_norm=aggregation_norm,
ffn_hidden=ffn_hidden,
ffn_activation=ffn_activation,
ffn_layers=ffn_layers,
ffn_dropout_p=ffn_dropout_p,
ffn_dropout_at_input_no_act=ffn_dropout_at_input_no_act)
if mode == 'regression':
loss: Loss = L2Loss()
output_types: List[str] = ['prediction']
elif mode == 'classification':
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(DMPNNModel, self).__init__(model,
loss=loss,
output_types=output_types,
batch_size=batch_size,
**kwargs)
def _to_pyg_graph(self, values: Sequence[np.ndarray]) -> _ModData:
"""Convert to PyTorch Geometric graph modified data instance
.. note::
This method requires PyTorch Geometric to be installed.
Parameters
----------
values: Sequence[np.ndarray]
Mappings from ``_MapperDMPNN`` helper class for a molecule
Returns
-------
torch_geometric.data.Data
Modified Graph data for PyTorch Geometric (``_ModData``)
"""
# atom feature matrix with shape [number of atoms, number of features]
atom_features: np.ndarray
# concatenated feature vector which contains concatenation of initial atom and bond features
f_ini_atoms_bonds: np.ndarray
# mapping from atom index to list of indicies of incoming bonds
atom_to_incoming_bonds: np.ndarray
# mapping that maps bond index to 'array of indices of the bonds'
# incoming at the initial atom of the bond (excluding the reverse bonds)
mapping: np.ndarray
# array of global molecular features
global_features: np.ndarray
atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds, mapping, global_features = values
t_atom_features: torch.Tensor = torch.from_numpy(
atom_features).float().to(device=self.device)
t_f_ini_atoms_bonds: torch.Tensor = torch.from_numpy(
f_ini_atoms_bonds).float().to(device=self.device)
t_atom_to_incoming_bonds: torch.Tensor = torch.from_numpy(
atom_to_incoming_bonds).to(device=self.device)
t_mapping: torch.Tensor = torch.from_numpy(mapping).to(
device=self.device)
t_global_features: torch.Tensor = torch.from_numpy(
global_features).float().to(device=self.device)
return _ModData(required_inc=len(t_f_ini_atoms_bonds),
atom_features=t_atom_features,
f_ini_atoms_bonds=t_f_ini_atoms_bonds,
atom_to_incoming_bonds=t_atom_to_incoming_bonds,
mapping=t_mapping,
global_features=t_global_features)
def _prepare_batch(
self, batch: Tuple[List, List, List]
) -> Tuple[Batch, List[torch.Tensor], List[torch.Tensor]]:
"""Method to prepare pytorch-geometric batches from inputs.
Overrides the existing ``_prepare_batch`` method to customize how model batches are
generated from the inputs.
.. note::
This method requires PyTorch Geometric to be installed.
Parameters
----------
batch: Tuple[List, List, List]
batch data from ``default_generator``
Returns
-------
Tuple[Batch, List[torch.Tensor], List[torch.Tensor]]
"""
graphs_list: List
labels: List
weights: List
graphs_list, labels, weights = batch
pyg_batch: Batch = Batch()
pyg_batch = pyg_batch.from_data_list(graphs_list)
_, labels, weights = super(DMPNNModel, self)._prepare_batch(
([], labels, weights))
return pyg_batch, labels, weights
def default_generator(self,
dataset: Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = False,
**kwargs) -> Iterable[Tuple[List, List, List]]:
"""Create a generator that iterates batches for a dataset.
Overrides the existing ``default_generator`` method to customize how model inputs are
generated from the data.
Here, the ``_MapperDMPNN`` helper class is used, for each molecule in a batch, to get required input parameters:
- atom_features
- f_ini_atoms_bonds
- atom_to_incoming_bonds
- mapping
- global_features
Then data from each molecule is converted to a ``_ModData`` object and stored as list of graphs.
The graphs are modified such that all tensors have same size in 0th dimension. (important requirement for batching)
Parameters
----------
dataset: Dataset
the data to iterate
epochs: int
the number of times to iterate over the full dataset
mode: str
allowed values are 'fit' (called during training), 'predict' (called
during prediction), and 'uncertainty' (called during uncertainty
prediction)
deterministic: bool
whether to iterate over the dataset in order, or randomly shuffle the
data for each epoch
pad_batches: bool
whether to pad each batch up to this model's preferred batch size
Returns
-------
a generator that iterates batches, each represented as a tuple of lists:
([inputs], [outputs], [weights])
Here, [inputs] is list of graphs.
"""
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
pyg_graphs_list: List = []
# maximum number of incoming bonds in the batch
max_num_bonds: int = 1
for graph in X_b:
# generate concatenated feature vector and mappings
mapper: _MapperDMPNN = _MapperDMPNN(graph)
pyg_graph: _ModData = self._to_pyg_graph(mapper.values)
max_num_bonds = max(
max_num_bonds,
pyg_graph['atom_to_incoming_bonds'].shape[1])
pyg_graphs_list.append(pyg_graph)
# pad all mappings to maximum number of incoming bonds in the batch
for graph in pyg_graphs_list:
required_padding: int = max_num_bonds - graph[
'atom_to_incoming_bonds'].shape[1]
graph['atom_to_incoming_bonds'] = nn.functional.pad(
graph['atom_to_incoming_bonds'],
(0, required_padding, 0, 0),
mode='constant',
value=-1)
graph['mapping'] = nn.functional.pad(
graph['mapping'], (0, required_padding, 0, 0),
mode='constant',
value=-1)
yield (pyg_graphs_list, [y_b], [w_b])
<file_sep>"""
BBBC Dataset loader.
This file contains image loaders for the BBBC dataset collection (https://data.broadinstitute.org/bbbc/image_sets.html).
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
BBBC1_IMAGE_URL = 'https://data.broadinstitute.org/bbbc/BBBC001/BBBC001_v1_images_tif.zip'
BBBC1_LABEL_URL = 'https://data.broadinstitute.org/bbbc/BBBC001/BBBC001_v1_counts.txt'
BBBC1_TASKS = ["cell-count"]
BBBC2_IMAGE_URL = 'https://data.broadinstitute.org/bbbc/BBBC002/BBBC002_v1_images.zip'
BBBC2_LABEL_URL = 'https://data.broadinstitute.org/bbbc/BBBC002/BBBC002_v1_counts.txt'
BBBC2_TASKS = ["cell-count"]
class _BBBC001Loader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "BBBC001_v1_images_tif.zip")
labels_file = os.path.join(self.data_dir, "BBBC001_v1_counts.txt")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=BBBC1_IMAGE_URL,
dest_dir=self.data_dir)
if not os.path.exists(labels_file):
dc.utils.data_utils.download_url(url=BBBC1_LABEL_URL,
dest_dir=self.data_dir)
loader = dc.data.ImageLoader()
return loader.create_dataset(dataset_file, in_memory=False)
def load_bbbc001(
splitter: Union[dc.splits.Splitter, str, None] = 'index',
transformers: List[Union[TransformerGenerator, str]] = [],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load BBBC001 dataset
This dataset contains 6 images of human HT29 colon cancer cells. The task is
to learn to predict the cell counts in these images. This dataset is too small
to serve to train algorithms, but might serve as a good test dataset.
https://data.broadinstitute.org/bbbc/BBBC001/
Parameters
----------
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
featurizer = dc.feat.UserDefinedFeaturizer([]) # Not actually used
loader = _BBBC001Loader(featurizer, splitter, transformers, BBBC1_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('bbbc001', reload)
class _BBBC002Loader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "BBBC002_v1_images.zip")
labels_file = os.path.join(self.data_dir, "BBBC002_v1_counts.txt.txt")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=BBBC2_IMAGE_URL,
dest_dir=self.data_dir)
if not os.path.exists(labels_file):
dc.utils.data_utils.download_url(url=BBBC2_LABEL_URL,
dest_dir=self.data_dir)
loader = dc.data.ImageLoader()
return loader.create_dataset(dataset_file, in_memory=False)
def load_bbbc002(
splitter: Union[dc.splits.Splitter, str, None] = 'index',
transformers: List[Union[TransformerGenerator, str]] = [],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load BBBC002 dataset
This dataset contains data corresponding to 5 samples of Drosophilia Kc167
cells. There are 10 fields of view for each sample, each an image of size
512x512. Ground truth labels contain cell counts for this dataset. Full
details about this dataset are present at
https://data.broadinstitute.org/bbbc/BBBC002/.
Parameters
----------
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
featurizer = dc.feat.UserDefinedFeaturizer([]) # Not actually used
loader = _BBBC002Loader(featurizer, splitter, transformers, BBBC2_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('bbbc002', reload)
<file_sep>"""
Gathers all datasets in one place for convenient imports
"""
# flake8: noqa
# TODO(rbharath): Get rid of * import
from deepchem.data.datasets import pad_features
from deepchem.data.datasets import pad_batch
from deepchem.data.datasets import Dataset
from deepchem.data.datasets import NumpyDataset
from deepchem.data.datasets import DiskDataset
from deepchem.data.datasets import ImageDataset
from deepchem.data.datasets import sparsify_features
from deepchem.data.datasets import densify_features
from deepchem.data.supports import *
from deepchem.data.data_loader import DataLoader
from deepchem.data.data_loader import CSVLoader
from deepchem.data.data_loader import UserCSVLoader
from deepchem.data.data_loader import JsonLoader
from deepchem.data.data_loader import SDFLoader
from deepchem.data.data_loader import FASTALoader
from deepchem.data.data_loader import ImageLoader
from deepchem.data.data_loader import InMemoryLoader
<file_sep>"""
Utility Functions for computing features on batch.
"""
import numpy as np
def batch_coulomb_matrix_features(X_b: np.ndarray,
distance_max: float = -1,
distance_min: float = 18,
n_distance: int = 100):
"""Computes the values for different Feature on given batch.
It works as a helper function to coulomb matrix.
This function takes in a batch of Molecules represented as Coulomb Matrix.
It proceeds as follows:
- It calculates the Number of atoms per molecule by counting all the non zero elements(numbers) of every\
molecule layer in matrix in one dimension.
- The Gaussian distance is calculated using the Euclidean distance between the Cartesian coordinates of two atoms.\
The distance value is then passed through a Gaussian function, which transforms it into a continuous value.
- Then using number of atom per molecule, calculates the atomic charge by looping over the molecule layer in the Coulomb matrix\
and takes the `2.4` root of the diagonal of `2X` of each molecule layer. `Undoing the Equation of coulomb matrix.`
- Atom_membership is assigned as a commomn repeating integers for all the atoms for a specific molecule.
- Distance Membership encodes spatial information, assigning closer values to atoms that are in that specific molecule.\
All initial Distances are added a start value to them which are unique to each molecule.
Models Used in:
* DTNN
Parameters
----------
X_b: np.ndarray
It is a 3d Matrix containing information of each the atom's ionic interaction with other atoms in the molecule.
distance_min: float (default -1)
minimum distance of atom pairs (in Angstrom)
distance_max: float (default = 18)
maximum distance of atom pairs (in Angstrom)
n_distance: int (default 100)
granularity of distance matrix
step size will be (distance_max-distance_min)/n_distance
Returns
-------
atom_number: np.ndarray
Atom numbers are assigned to each atom based on their atomic properties.
The atomic numbers are derived from the periodic table of elements.
For example, hydrogen -> 1, carbon -> 6, and oxygen -> 8.
gaussian_dist: np.ndarray
Gaussian distance refers to the method of representing the pairwise distances between atoms in a molecule using Gaussian functions.
The Gaussian distance is calculated using the Euclidean distance between the Cartesian coordinates of two atoms.
The distance value is then passed through a Gaussian function, which transforms it into a continuous value.
atom_mem: np.ndarray
Atom membership refers to the binary representation of whether an atom belongs to a specific group or property within a molecule.
It allows the model to incorporate domain-specific information and enhance its understanding of the molecule's properties and interactions.
dist_mem_i: np.ndarray
Distance membership i are utilized to encode spatial information and capture the influence of atom distances on the properties and interactions within a molecule.
The inner membership function assigns higher values to atoms that are closer to the atoms' interaction region, thereby emphasizing the impact of nearby atoms.
dist_mem_j: np.ndarray
It captures the long-range effects and influences between atoms that are not in direct proximity but still contribute to the overall molecular properties.
Distance membership j are utilized to encode spatial information and capture the influence of atom distances on the properties and interactions outside a molecule.
The outer membership function assigns higher values to atoms that are farther to the atoms' interaction region, thereby emphasizing the impact of farther atoms.
Examples
--------
>>> import os
>>> import deepchem as dc
>>> current_dir = os.path.dirname(os.path.abspath(__file__))
>>> dataset_file = os.path.join(current_dir, 'test/assets/qm9_mini.sdf')
>>> TASKS = ["alpha", "homo"]
>>> loader = dc.data.SDFLoader(tasks=TASKS,
... featurizer=dc.feat.CoulombMatrix(29),
... sanitize=True)
>>> data = loader.create_dataset(dataset_file, shard_size=100)
>>> inputs = dc.utils.batch_utils.batch_coulomb_matrix_features(data.X)
References
----------
.. [1] <NAME>, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
distance = []
atom_membership = []
distance_membership_i = []
distance_membership_j = []
# Calculation of Step Size and steps
step_size = (distance_max - distance_min) / n_distance
steps = np.array([distance_min + i * step_size for i in range(n_distance)])
steps = np.expand_dims(steps, 0)
# Number of atoms per molecule is calculated by counting all the non zero elements(numbers) of every molecule.
num_atoms = list(map(sum, X_b.astype(bool)[:, :, 0]))
# It loops over the molecules in the Coulomb matrix and takes the "2.4" root of the diagonal of "2X" of each molecule's representation.
atom_number = [
np.round(
np.power(2 * np.diag(X_b[i, :num_atoms[i], :num_atoms[i]]),
1 / 2.4)).astype(int) for i in range(len(num_atoms))
]
start = 0
for im, molecule in enumerate(atom_number):
distance_matrix = np.outer(
molecule, molecule) / X_b[im, :num_atoms[im], :num_atoms[im]]
np.fill_diagonal(distance_matrix, -100)
distance.append(np.expand_dims(distance_matrix.flatten(), 1))
atom_membership.append([im] * num_atoms[im])
membership = np.array([np.arange(num_atoms[im])] * num_atoms[im])
membership_i = membership.flatten(order='F')
membership_j = membership.flatten()
distance_membership_i.append(membership_i + start)
distance_membership_j.append(membership_j + start)
start = start + num_atoms[im]
atom_number = np.concatenate(atom_number).astype(np.int32)
distance = np.concatenate(distance, axis=0)
# Calculates the Gaussian Distance by passing distance by a gaussian function.
gaussian_dist = np.exp(-np.square(distance - steps) / (2 * step_size**2))
gaussian_dist = gaussian_dist.astype(np.float64)
atom_mem = np.concatenate(atom_membership).astype(np.int64)
dist_mem_i = np.concatenate(distance_membership_i).astype(np.int64)
dist_mem_j = np.concatenate(distance_membership_j).astype(np.int64)
features = [atom_number, gaussian_dist, atom_mem, dist_mem_i, dist_mem_j]
return features
<file_sep>"""
Test that Layers work as advertised.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import numpy as np
import unittest
import deepchem as dc
from tensorflow.python.framework import test_util
class TestLayers(test_util.TensorFlowTestCase):
"""
Test Layers.
The tests in this class only do basic sanity checks to make sure that
produced tensors have the right shape.
"""
def setUp(self):
super(TestLayers, self).setUp()
self.root = '/tmp'
<file_sep>import deepchem as dc
import numpy as np
import pytest
from deepchem.models.optimizers import Adam
try:
import tensorflow as tf
class RouletteEnvironment(dc.rl.Environment):
def __init__(self):
super(RouletteEnvironment, self).__init__([(1,)], 38)
self._state = [np.array([0])]
def step(self, action):
if action == 37:
self._terminated = True # Walk away.
return 0.0
wheel = np.random.randint(37)
if wheel == 0:
if action == 0:
return 35.0
return -1.0
if action != 0 and wheel % 2 == action % 2:
return 1.0
return -1.0
def reset(self):
self._terminated = False
# This policy just learns a constant probability for each action, and a constant for the value.
class TestPolicy(dc.rl.Policy):
def __init__(self, env):
super(TestPolicy, self).__init__(['action_prob', 'value'])
self.env = env
def create_model(self, **kwargs):
env = self.env
class TestModel(tf.keras.Model):
def __init__(self):
super(TestModel, self).__init__(**kwargs)
self.action = tf.Variable(np.ones(env.n_actions,
np.float32))
self.value = tf.Variable([0.0], tf.float32)
def call(self, inputs, **kwargs):
prob = tf.nn.softmax(
tf.reshape(self.action, (-1, env.n_actions)))
return (prob, self.value)
return TestModel()
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def test_a2c_reload():
env = RouletteEnvironment()
policy = TestPolicy(env)
a2c = dc.rl.A2C(env,
policy,
max_rollout_length=20,
optimizer=Adam(learning_rate=0.001))
a2c.fit(1000)
action_prob, value = a2c.predict([[0]])
new_a2c = dc.rl.A2C(env, policy, model_dir=a2c._model.model_dir)
new_a2c.restore()
action_prob2, value2 = new_a2c.predict([[0]])
assert np.all(action_prob == action_prob2)
assert value == value2
@pytest.mark.tensorflow
def test_ppo_reload():
env = RouletteEnvironment()
policy = TestPolicy(env)
ppo = dc.rl.PPO(env,
policy,
max_rollout_length=20,
optimization_epochs=8,
optimizer=Adam(learning_rate=0.003))
ppo.fit(1000)
action_prob, value = ppo.predict([[0]])
new_ppo = dc.rl.PPO(env, policy, model_dir=ppo._model.model_dir)
new_ppo.restore()
action_prob2, value2 = new_ppo.predict([[0]])
assert np.all(action_prob == action_prob2)
assert value == value2
<file_sep>"""
Implements a multitask graph convolutional classifier.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import os
import sys
import numpy as np
import tensorflow as tf
import sklearn.metrics
import tempfile
from deepchem.data import pad_features
from deepchem.utils.save import log
from deepchem.models import Model
from deepchem.nn.copy import Input
from deepchem.nn.copy import Dense
from deepchem.nn import model_ops
# TODO(rbharath): Find a way to get rid of this import?
from deepchem.models.tf_new_models.graph_topology import merge_dicts
def get_loss_fn(final_loss):
# Obtain appropriate loss function
if final_loss == 'L2':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(tf.square(diff), 0)
elif final_loss == 'weighted_L2':
def loss_fn(x, t, w):
diff = tf.subtract(x, t)
weighted_diff = tf.multiply(diff, w)
return tf.reduce_sum(tf.square(weighted_diff), 0)
elif final_loss == 'L1':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(tf.abs(diff), 0)
elif final_loss == 'huber':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(
tf.minimum(0.5 * tf.square(diff),
huber_d * (tf.abs(diff) - 0.5 * huber_d)), 0)
elif final_loss == 'cross_entropy':
def loss_fn(x, t, w):
costs = tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=t)
weighted_costs = tf.multiply(costs, w)
return tf.reduce_sum(weighted_costs)
elif final_loss == 'hinge':
def loss_fn(x, t, w):
t = tf.multiply(2.0, t) - 1
costs = tf.maximum(0.0, 1.0 - tf.multiply(t, x))
weighted_costs = tf.multiply(costs, w)
return tf.reduce_sum(weighted_costs)
return loss_fn
class MultitaskGraphClassifier(Model):
def __init__(self,
model,
n_tasks,
n_feat,
logdir=None,
batch_size=50,
final_loss='cross_entropy',
learning_rate=.001,
optimizer_type="adam",
learning_rate_decay_time=1000,
beta1=.9,
beta2=.999,
pad_batches=True,
verbose=True):
warnings.warn(
"MultitaskGraphClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
super(MultitaskGraphClassifier, self).__init__(
model_dir=logdir, verbose=verbose)
self.n_tasks = n_tasks
self.final_loss = final_loss
self.model = model
self.sess = tf.Session(graph=self.model.graph)
with self.model.graph.as_default():
# Extract model info
self.batch_size = batch_size
self.pad_batches = pad_batches
# Get graph topology for x
self.graph_topology = self.model.get_graph_topology()
self.feat_dim = n_feat
# Raw logit outputs
self.logits = self.build()
self.loss_op = self.add_training_loss(self.final_loss, self.logits)
self.outputs = self.add_softmax(self.logits)
self.learning_rate = learning_rate
self.T = learning_rate_decay_time
self.optimizer_type = optimizer_type
self.optimizer_beta1 = beta1
self.optimizer_beta2 = beta2
# Set epsilon
self.epsilon = 1e-7
self.add_optimizer()
# Initialize
self.init_fn = tf.global_variables_initializer()
self.sess.run(self.init_fn)
# Path to save checkpoint files, which matches the
# replicated supervisor's default path.
self._save_path = os.path.join(self.model_dir, 'model.ckpt')
def build(self):
# Create target inputs
self.label_placeholder = tf.placeholder(
dtype='bool', shape=(None, self.n_tasks), name="label_placeholder")
self.weight_placeholder = tf.placeholder(
dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")
feat = self.model.return_outputs()
################################################################ DEBUG
#print("multitask classifier")
#print("feat")
#print(feat)
################################################################ DEBUG
output = model_ops.multitask_logits(feat, self.n_tasks)
return output
def add_optimizer(self):
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(
self.learning_rate,
beta1=self.optimizer_beta1,
beta2=self.optimizer_beta2,
epsilon=self.epsilon)
else:
raise ValueError("Optimizer type not recognized.")
# Get train function
self.train_op = self.optimizer.minimize(self.loss_op)
def construct_feed_dict(self, X_b, y_b=None, w_b=None, training=True):
"""Get initial information about task normalization"""
# TODO(rbharath): I believe this is total amount of data
n_samples = len(X_b)
if y_b is None:
y_b = np.zeros((n_samples, self.n_tasks))
if w_b is None:
w_b = np.zeros((n_samples, self.n_tasks))
targets_dict = {self.label_placeholder: y_b, self.weight_placeholder: w_b}
# Get graph information
atoms_dict = self.graph_topology.batch_to_feed_dict(X_b)
# TODO (hraut->rhbarath): num_datapoints should be a vector, with ith element being
# the number of labeled data points in target_i. This is to normalize each task
# num_dat_dict = {self.num_datapoints_placeholder : self.}
# Get other optimizer information
# TODO(rbharath): Figure out how to handle phase appropriately
feed_dict = merge_dicts([targets_dict, atoms_dict])
return feed_dict
def add_training_loss(self, final_loss, logits):
"""Computes loss using logits."""
loss_fn = get_loss_fn(final_loss) # Get loss function
task_losses = []
# label_placeholder of shape (batch_size, n_tasks). Split into n_tasks
# tensors of shape (batch_size,)
task_labels = tf.split(
axis=1, num_or_size_splits=self.n_tasks, value=self.label_placeholder)
task_weights = tf.split(
axis=1, num_or_size_splits=self.n_tasks, value=self.weight_placeholder)
for task in range(self.n_tasks):
task_label_vector = task_labels[task]
task_weight_vector = task_weights[task]
# Convert the labels into one-hot vector encodings.
one_hot_labels = tf.cast(
tf.one_hot(tf.cast(tf.squeeze(task_label_vector), tf.int32), 2),
tf.float32)
# Since we use tf.nn.softmax_cross_entropy_with_logits note that we pass in
# un-softmaxed logits rather than softmax outputs.
task_loss = loss_fn(logits[task], one_hot_labels, task_weight_vector)
task_losses.append(task_loss)
# It's ok to divide by just the batch_size rather than the number of nonzero
# examples (effect averages out)
total_loss = tf.add_n(task_losses)
total_loss = tf.math.divide(total_loss, self.batch_size)
return total_loss
def add_softmax(self, outputs):
"""Replace logits with softmax outputs."""
softmax = []
with tf.name_scope('inference'):
for i, logits in enumerate(outputs):
softmax.append(tf.nn.softmax(logits, name='softmax_%d' % i))
return softmax
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
# Perform the optimization
log("Training for %d epochs" % nb_epoch, self.verbose)
# TODO(rbharath): Disabling saving for now to try to debug.
for epoch in range(nb_epoch):
log("Starting epoch %d" % epoch, self.verbose)
for batch_num, (X_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(self.batch_size, pad_batches=self.pad_batches)):
if batch_num % log_every_N_batches == 0:
log("On batch %d" % batch_num, self.verbose)
self.sess.run(
self.train_op, feed_dict=self.construct_feed_dict(X_b, y_b, w_b))
def save(self):
"""
No-op since this model doesn't currently support saving...
"""
pass
def predict(self, dataset, transformers=[], **kwargs):
"""Wraps predict to set batch_size/padding."""
return super(MultitaskGraphClassifier, self).predict(
dataset, transformers, batch_size=self.batch_size)
def predict_proba(self, dataset, transformers=[], n_classes=2, **kwargs):
"""Wraps predict_proba to set batch_size/padding."""
return super(MultitaskGraphClassifier, self).predict_proba(
dataset, transformers, n_classes=n_classes, batch_size=self.batch_size)
def predict_on_batch(self, X):
"""Return model output for the provided input.
"""
if self.pad_batches:
X = pad_features(self.batch_size, X)
# run eval data through the model
n_tasks = self.n_tasks
with self.sess.as_default():
feed_dict = self.construct_feed_dict(X)
# Shape (n_samples, n_tasks)
batch_outputs = self.sess.run(self.outputs, feed_dict=feed_dict)
n_samples = len(X)
outputs = np.zeros((n_samples, self.n_tasks))
for task, output in enumerate(batch_outputs):
outputs[:, task] = np.argmax(output, axis=1)
return outputs
def predict_proba_on_batch(self, X, n_classes=2):
"""Returns class probabilities on batch"""
# run eval data through the model
if self.pad_batches:
X = pad_features(self.batch_size, X)
n_tasks = self.n_tasks
with self.sess.as_default():
feed_dict = self.construct_feed_dict(X)
batch_outputs = self.sess.run(self.outputs, feed_dict=feed_dict)
n_samples = len(X)
outputs = np.zeros((n_samples, self.n_tasks, n_classes))
for task, output in enumerate(batch_outputs):
outputs[:, task, :] = output
return outputs
def get_num_tasks(self):
"""Needed to use Model.predict() from superclass."""
return self.n_tasks
<file_sep>import os
import pytest
import deepchem as dc
import pandas as pd
@pytest.fixture
def grover_graph_attributes():
from deepchem.feat.graph_data import BatchGraphData
from deepchem.utils.grover import extract_grover_attributes
smiles = ['CC', 'CCC', 'CC(=O)C']
fg = dc.feat.CircularFingerprint()
featurizer = dc.feat.GroverFeaturizer(features_generator=fg)
graphs = featurizer.featurize(smiles)
batched_graph = BatchGraphData(graphs)
attributes = extract_grover_attributes(batched_graph)
return attributes
@pytest.fixture
def smiles_regression_dataset(tmpdir):
smiles = [
"CCN(CCSC)C(=O)N[C@@](C)(CC)C(F)(F)F",
"CC1(C)CN(C(=O)Nc2cc3ccccc3nn2)C[C@@]2(CCOC2)O1"
]
labels = [3.112, 2.432]
df = pd.DataFrame(list(zip(smiles, labels)), columns=["smiles", "task1"])
filepath = os.path.join(tmpdir, 'smiles.csv')
df.to_csv(filepath)
loader = dc.data.CSVLoader(["task1"],
feature_field="smiles",
featurizer=dc.feat.DummyFeaturizer())
dataset = loader.create_dataset(filepath)
return dataset
@pytest.fixture
def smiles_multitask_regression_dataset():
cwd = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(cwd,
'../../tests/assets/multitask_regression.csv')
loader = dc.data.CSVLoader(tasks=['task0', 'task1'],
feature_field='smiles',
featurizer=dc.feat.DummyFeaturizer())
dataset = loader.create_dataset(input_file)
return dataset
<file_sep># -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 22:31:24 2017
@author: <NAME>
"""
import torch
import numpy as np
from deepchem.metrics import from_one_hot
from torch_model import TorchMultitaskModel
class TorchMultitaskClassification(TorchMultitaskModel):
def __init__(self, n_tasks, n_features, n_classes=2, **kwargs):
"""Constructs the computational graph.
This function constructs the computational graph for the model. It relies
subclassed methods (build/cost) to construct specific graphs.
Parameters
----------
n_tasks: int
Number of tasks
n_features: int
Number of features.
n_classes: int
Number of classes if this is for classification.
"""
# Save hyperparameters
self.n_tasks = n_tasks
self.n_features = n_features
self.n_classes = n_classes
super(TorchMultitaskClassification, self).__init__(**kwargs)
def build(self):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
prev_layer_size = self.n_features
self.W_list = []
self.b_list = []
for i in range(n_layers):
W_init = np.random.normal(0, weight_init_stddevs[i],
(prev_layer_size, layer_sizes[i]))
W_init = torch.cuda.FloatTensor(W_init)
self.W_list.append(torch.autograd.Variable(W_init, requires_grad=True))
b_init = np.full((layer_sizes[i],), bias_init_consts[i])
b_init = torch.cuda.FloatTensor(b_init)
self.b_list.append(torch.autograd.Variable(b_init, requires_grad=True))
prev_layer_size = layer_sizes[i]
self.task_W_list = []
self.task_b_list = []
for i in range(self.n_tasks):
W_init = np.random.normal(0, weight_init_stddevs[-1],
(prev_layer_size, self.n_classes))
W_init = torch.cuda.FloatTensor(W_init)
self.task_W_list.append(
torch.autograd.Variable(W_init, requires_grad=True))
b_init = np.full((self.n_classes,), bias_init_consts[-1])
b_init = torch.cuda.FloatTensor(b_init)
self.task_b_list.append(
torch.autograd.Variable(b_init, requires_grad=True))
self.trainables = self.W_list + self.b_list + self.task_W_list + self.task_b_list
self.regularizaed_variables = self.W_list + self.task_W_list
def forward(self, X, training=False):
for i, W in enumerate(self.W_list):
X = X.mm(W)
X += self.b_list[i].unsqueeze(0).expand_as(X)
X = torch.nn.ReLU()(X)
if training:
X = torch.nn.Dropout(p=self.dropouts[i])(X)
outputs = []
for i, W in enumerate(self.task_W_list):
output = X.mm(W)
output += self.task_b_list[i].unsqueeze(0).expand_as(output)
if not training:
output = torch.nn.functional.softmax(output)
outputs.append(output)
return outputs
def cost(self, logit, label, weight):
loss = []
for i in range(logit.size()[0]):
loss.append(
torch.nn.functional.cross_entropy(logit[i, :], label[i].long()).mul(
weight[i]))
loss = torch.cat(loss).mean()
return loss
def predict_on_batch(self, X_batch):
X_batch = torch.autograd.Variable(torch.cuda.FloatTensor(X_batch))
outputs = self.forward(X_batch, training=False)
y_pred_batch = torch.stack(outputs, 1).data.cpu().numpy()[:]
y_pred_batch = from_one_hot(y_pred_batch, 2)
return y_pred_batch
def predict_proba_on_batch(self, X_batch):
X_batch = torch.autograd.Variable(torch.cuda.FloatTensor(X_batch))
outputs = self.forward(X_batch, training=False)
y_pred_batch = torch.stack(outputs, 1).data.cpu().numpy()[:]
return y_pred_batch
<file_sep>echo "Pulling HOPV dataset from deepchem"
wget http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/hopv.tar.gz
echo "Extracting HOPV dataset"
tar -zxvf hopv.tar.gz
<file_sep>"""
Atomic coordinate featurizer.
"""
import numpy as np
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.utils.typing import RDKitMol
class AtomicCoordinates(MolecularFeaturizer):
"""Calculate atomic coordinates.
Examples
--------
>>> import deepchem as dc
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('C1C=CC=CC=1')
>>> n_atoms = len(mol.GetAtoms())
>>> n_atoms
6
>>> featurizer = dc.feat.AtomicCoordinates(use_bohr=False)
>>> features = featurizer.featurize([mol])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape # (n_atoms, 3)
(6, 3)
Note
----
This class requires RDKit to be installed.
"""
def __init__(self, use_bohr: bool = False):
"""
Parameters
----------
use_bohr: bool, optional (default False)
Whether to use bohr or angstrom as a coordinate unit.
"""
self.use_bohr = use_bohr
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""Calculate atomic coordinates.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
A numpy array of atomic coordinates. The shape is `(n_atoms, 3)`.
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
# Check whether num_confs >=1 or not
num_confs = len(datapoint.GetConformers())
if num_confs == 0:
datapoint = Chem.AddHs(datapoint)
AllChem.EmbedMolecule(datapoint, AllChem.ETKDG())
datapoint = Chem.RemoveHs(datapoint)
N = datapoint.GetNumAtoms()
coords = np.zeros((N, 3))
# RDKit stores atomic coordinates in Angstrom. Atomic unit of length is the
# bohr (1 bohr = 0.529177 Angstrom). Converting units makes gradient calculation
# consistent with most QM software packages.
if self.use_bohr:
coords_list = [
datapoint.GetConformer(0).GetAtomPosition(i).__idiv__(
0.52917721092) for i in range(N)
]
else:
coords_list = [
datapoint.GetConformer(0).GetAtomPosition(i) for i in range(N)
]
for atom in range(N):
coords[atom, 0] = coords_list[atom].x
coords[atom, 1] = coords_list[atom].y
coords[atom, 2] = coords_list[atom].z
return coords
<file_sep>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 10 06:12:11 2018
@author: zqwu
"""
import deepchem as dc
import numpy as np
import pandas as pd
import os
import logging
from model import DRModel, DRAccuracy, ConfusionMatrix, QuadWeightedKappa
from data import load_images_DR
train, valid, test = load_images_DR(split='random', seed=123)
# Define and build model
model = DRModel(
n_init_kernel=32,
batch_size=32,
learning_rate=1e-5,
augment=True,
model_dir='./test_model')
if not os.path.exists('./test_model'):
os.mkdir('test_model')
model.build()
#model.restore()
metrics = [
dc.metrics.Metric(DRAccuracy, mode='classification'),
dc.metrics.Metric(QuadWeightedKappa, mode='classification')
]
cm = [dc.metrics.Metric(ConfusionMatrix, mode='classification')]
logger = logging.getLogger('deepchem.models.tensorgraph.tensor_graph')
logger.setLevel(logging.DEBUG)
for i in range(10):
model.fit(train, nb_epoch=10, checkpoint_interval=3512)
model.evaluate(train, metrics)
model.evaluate(valid, metrics)
model.evaluate(valid, cm)
model.evaluate(test, metrics)
model.evaluate(test, cm)
<file_sep>import pytest
import tempfile
from os import path, remove
import numpy as np
from deepchem.feat import CGCNNFeaturizer
from deepchem.molnet import load_perovskite, load_mp_metallicity
from deepchem.metrics import Metric, mae_score, roc_auc_score
try:
import dgl # noqa
import torch # noqa
from deepchem.models import CGCNNModel
has_pytorch_and_dgl = True
except:
has_pytorch_and_dgl = False
@pytest.mark.torch
def test_cgcnn_regression():
# load datasets
current_dir = path.dirname(path.abspath(__file__))
config = {
"reload": False,
"featurizer": CGCNNFeaturizer(),
# disable transformer
"transformers": [],
"data_dir": path.join(current_dir, "assets")
}
tasks, datasets, transformers = load_perovskite(**config)
train, valid, test = datasets
n_tasks = len(tasks)
model = CGCNNModel(n_tasks=n_tasks,
mode='regression',
batch_size=4,
learning_rate=0.001)
# check train
model.fit(train, nb_epoch=20)
# check predict shape
valid_preds = model.predict_on_batch(valid.X)
assert valid_preds.shape == (2, n_tasks)
test_preds = model.predict(test)
assert test_preds.shape == (3, n_tasks)
# check overfit
regression_metric = Metric(mae_score, n_tasks=n_tasks)
scores = model.evaluate(train, [regression_metric], transformers)
assert scores[regression_metric.name] < 0.6
if path.exists(path.join(current_dir, 'perovskite.json')):
remove(path.join(current_dir, 'perovskite.json'))
@pytest.mark.torch
def test_cgcnn_classification():
# load datasets
current_dir = path.dirname(path.abspath(__file__))
config = {
"reload": False,
"featurizer": CGCNNFeaturizer(),
# disable transformer
"transformers": [],
"data_dir": path.join(current_dir, "assets")
}
tasks, datasets, transformers = load_mp_metallicity(**config)
train, valid, test = datasets
n_tasks = len(tasks)
n_classes = 2
model = CGCNNModel(n_tasks=n_tasks,
n_classes=n_classes,
mode='classification',
batch_size=4,
learning_rate=0.001)
# check train
model.fit(train, nb_epoch=20)
# check predict shape
valid_preds = model.predict_on_batch(valid.X)
assert valid_preds.shape == (2, n_classes)
test_preds = model.predict(test)
assert test_preds.shape == (3, n_classes)
# check overfit
classification_metric = Metric(roc_auc_score, n_tasks=n_tasks)
scores = model.evaluate(train, [classification_metric],
transformers,
n_classes=n_classes)
assert scores[classification_metric.name] > 0.8
if path.exists(path.join(current_dir, 'mp_is_metal.json')):
remove(path.join(current_dir, 'mp_is_metal.json'))
@pytest.mark.torch
def test_cgcnn_reload():
# load datasets
current_dir = path.dirname(path.abspath(__file__))
config = {
"reload": False,
"featurizer": CGCNNFeaturizer(),
# disable transformer
"transformers": [],
"data_dir": path.join(current_dir, "assets")
}
tasks, datasets, transformers = load_mp_metallicity(**config)
train, valid, test = datasets
n_tasks = len(tasks)
n_classes = 2
model_dir = tempfile.mkdtemp()
model = CGCNNModel(n_tasks=n_tasks,
n_classes=n_classes,
mode='classification',
model_dir=model_dir,
batch_size=4,
learning_rate=0.001)
# check train
model.fit(train, nb_epoch=20)
# check predict shape
valid_preds = model.predict_on_batch(valid.X)
assert valid_preds.shape == (2, n_classes)
test_preds = model.predict(test)
assert test_preds.shape == (3, n_classes)
# check overfit
classification_metric = Metric(roc_auc_score, n_tasks=n_tasks)
scores = model.evaluate(train, [classification_metric],
transformers,
n_classes=n_classes)
assert scores[classification_metric.name] > 0.8
# reload
reloaded_model = CGCNNModel(n_tasks=n_tasks,
n_classes=n_classes,
mode='classification',
model_dir=model_dir,
batch_size=4,
learning_rate=0.001)
reloaded_model.restore()
original_pred = model.predict(test)
reload_pred = reloaded_model.predict(test)
assert np.all(original_pred == reload_pred)
if path.exists(path.join(current_dir, 'mp_is_metal.json')):
remove(path.join(current_dir, 'mp_is_metal.json'))
<file_sep>import collections
import numpy as np
import six
import tensorflow as tf
from deepchem.data import NumpyDataset
from deepchem.feat.graph_features import ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
from deepchem.metrics import to_one_hot
from deepchem.models.tensorgraph.graph_layers import WeaveGather, \
DTNNEmbedding, DTNNStep, DTNNGather, DAGLayer, \
DAGGather, DTNNExtract, MessagePassing, SetGather
from deepchem.models.tensorgraph.graph_layers import WeaveLayerFactory
from deepchem.models.tensorgraph.layers import Dense, SoftMax, \
SoftMaxCrossEntropy, GraphConv, BatchNorm, \
GraphPool, GraphGather, WeightedError, Dropout, BatchNorm, Stack, Flatten, GraphCNN, GraphCNNPool
from deepchem.models.tensorgraph.layers import L2Loss, Label, Weights, Feature
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
from deepchem.trans import undo_transforms
class PetroskiSuchModel(TensorGraph):
"""
Model from Robust Spatial Filtering with Graph Convolutional Neural Networks
https://arxiv.org/abs/1703.00792
"""
def __init__(self,
n_tasks,
max_atoms=200,
dropout=0.0,
mode="classification",
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
mode: str
Either "classification" or "regression"
"""
self.n_tasks = n_tasks
self.mode = mode
self.max_atoms = max_atoms
self.error_bars = True if 'error_bars' in kwargs and kwargs['error_bars'] else False
self.dropout = dropout
kwargs['use_queue'] = False
super(PetroskiSuchModel, self).__init__(**kwargs)
self.build_graph()
def build_graph(self):
self.vertex_features = Feature(shape=(None, self.max_atoms, 75))
self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms))
self.mask = Feature(shape=(None, self.max_atoms, 1))
gcnn1 = BatchNorm(
GraphCNN(
num_filters=64,
in_layers=[self.vertex_features, self.adj_matrix, self.mask]))
gcnn1 = Dropout(self.dropout, in_layers=gcnn1)
gcnn2 = BatchNorm(
GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask]))
gcnn2 = Dropout(self.dropout, in_layers=gcnn2)
gc_pool, adj_matrix = GraphCNNPool(
num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask])
gc_pool = BatchNorm(gc_pool)
gc_pool = Dropout(self.dropout, in_layers=gc_pool)
gcnn3 = BatchNorm(GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix]))
gcnn3 = Dropout(self.dropout, in_layers=gcnn3)
gc_pool2, adj_matrix2 = GraphCNNPool(
num_vertices=8, in_layers=[gcnn3, adj_matrix])
gc_pool2 = BatchNorm(gc_pool2)
gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2)
flattened = Flatten(in_layers=gc_pool2)
readout = Dense(
out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened)
costs = []
self.my_labels = []
for task in range(self.n_tasks):
if self.mode == 'classification':
classification = Dense(
out_channels=2, activation_fn=None, in_layers=[readout])
softmax = SoftMax(in_layers=[classification])
self.add_output(softmax)
label = Label(shape=(None, 2))
self.my_labels.append(label)
cost = SoftMaxCrossEntropy(in_layers=[label, classification])
costs.append(cost)
if self.mode == 'regression':
regression = Dense(
out_channels=1, activation_fn=None, in_layers=[readout])
self.add_output(regression)
label = Label(shape=(None, 1))
self.my_labels.append(label)
cost = L2Loss(in_layers=[label, regression])
costs.append(cost)
if self.mode == "classification":
entropy = Stack(in_layers=costs, axis=-1)
elif self.mode == "regression":
entropy = Stack(in_layers=costs, axis=1)
self.my_task_weights = Weights(shape=(None, self.n_tasks))
loss = WeightedError(in_layers=[entropy, self.my_task_weights])
self.set_loss(loss)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
if not predict:
print('Starting epoch %i' % epoch)
for ind, (X_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(
self.batch_size, pad_batches=True, deterministic=deterministic)):
d = {}
for index, label in enumerate(self.my_labels):
if self.mode == 'classification':
d[label] = to_one_hot(y_b[:, index])
if self.mode == 'regression':
d[label] = np.expand_dims(y_b[:, index], -1)
d[self.my_task_weights] = w_b
d[self.adj_matrix] = np.expand_dims(np.array([x[0] for x in X_b]), -2)
d[self.vertex_features] = np.array([x[1] for x in X_b])
mask = np.zeros(shape=(self.batch_size, self.max_atoms, 1))
for i in range(self.batch_size):
mask_size = X_b[i][2]
mask[i][:mask_size][0] = 1
d[self.mask] = mask
yield d
def predict_proba_on_generator(self, generator, transformers=[]):
if not self.built:
self.build()
with self._get_tf("Graph").as_default():
out_tensors = [x.out_tensor for x in self.outputs]
results = []
for feed_dict in generator:
feed_dict = {
self.layers[k.name].out_tensor: v
for k, v in six.iteritems(feed_dict)
}
feed_dict[self._training_placeholder] = 1.0 ##
result = np.array(self.session.run(out_tensors, feed_dict=feed_dict))
if len(result.shape) == 3:
result = np.transpose(result, axes=[1, 0, 2])
if len(transformers) > 0:
result = undo_transforms(result, transformers)
results.append(result)
return np.concatenate(results, axis=0)
def evaluate(self, dataset, metrics, transformers=[], per_task_metrics=False):
if not self.built:
self.build()
return self.evaluate_generator(
self.default_generator(dataset, predict=True),
metrics,
labels=self.my_labels,
weights=[self.my_task_weights],
per_task_metrics=per_task_metrics)
<file_sep>"""
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
class CoulombMatrix(MolecularFeaturizer):
"""Calculate Coulomb matrices for molecules.
Coulomb matrices provide a representation of the electronic structure of
a molecule. For a molecule with `N` atoms, the Coulomb matrix is a
`N X N` matrix where each element gives the strength of the
electrostatic interaction between two atoms. The method is described
in more detail in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrix(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
upper_tri: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
upper_tri: bool, optional (default False)
Generate only upper triangle part of Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.upper_tri = upper_tri
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Coulomb matrices for molecules. If extra randomized
matrices are generated, they are treated as if they are features
for additional conformers.
Since Coulomb matrices are symmetric, only the (flattened) upper
triangular portion is returned.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule.
The default shape is `(num_confs, max_atoms, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms, max_atoms)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
features = self.coulomb_matrix(datapoint)
if self.upper_tri:
features = np.asarray(
[f[np.triu_indices_from(f)] for f in features])
if features.shape[0] == 1:
# `(1, max_atoms, max_atoms)` -> `(max_atoms, max_atoms)`
features = np.squeeze(features, axis=0)
return features
def coulomb_matrix(self, mol: RDKitMol) -> np.ndarray:
"""
Generate Coulomb matrices for each conformer of the given molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
# Check whether num_confs >=1 or not
num_confs = len(mol.GetConformers())
if num_confs == 0:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDG())
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol)
n_atoms = mol.GetNumAtoms()
z = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
rval = []
for conf in mol.GetConformers():
d = self.get_interatomic_distances(conf)
m = np.outer(z, z) / d
m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4
if self.randomize:
for random_m in self.randomize_coulomb_matrix(m):
random_m = pad_array(random_m, self.max_atoms)
rval.append(random_m)
else:
m = pad_array(m, self.max_atoms)
rval.append(m)
return np.asarray(rval)
def randomize_coulomb_matrix(self, m: np.ndarray) -> List[np.ndarray]:
"""Randomize a Coulomb matrix as decribed in [1]_:
1. Compute row norms for M in a vector row_norms.
2. Sample a zero-mean unit-variance noise vector e with dimension
equal to row_norms.
3. Permute the rows and columns of M with the permutation that
sorts row_norms + e.
Parameters
----------
m: np.ndarray
Coulomb matrix.
Returns
-------
List[np.ndarray]
List of the random coulomb matrix
References
----------
.. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003
"""
rval = []
row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float)
rng = np.random.RandomState(self.seed)
for i in range(self.n_samples):
e = rng.normal(size=row_norms.size)
p = np.argsort(row_norms + e)
new = m[p][:, p] # permute rows first, then columns
rval.append(new)
return rval
@staticmethod
def get_interatomic_distances(conf: Any) -> np.ndarray:
"""
Get interatomic distances for atoms in a molecular conformer.
Parameters
----------
conf: rdkit.Chem.rdchem.Conformer
Molecule conformer.
Returns
-------
np.ndarray
The distances matrix for all atoms in a molecule
"""
n_atoms = conf.GetNumAtoms()
coords = [
# Convert AtomPositions from Angstrom to bohr (atomic units)
conf.GetAtomPosition(i).__idiv__(0.52917721092)
for i in range(n_atoms)
]
d = np.zeros((n_atoms, n_atoms), dtype=float)
for i in range(n_atoms):
for j in range(i):
d[i, j] = coords[i].Distance(coords[j])
d[j, i] = d[i, j]
return d
class CoulombMatrixEig(CoulombMatrix):
"""Calculate the eigenvalues of Coulomb matrices for molecules.
This featurizer computes the eigenvalues of the Coulomb matrices for provided
molecules. Coulomb matrices are described in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] <NAME>, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues
are returned sorted by absolute value in descending order and padded
by max_atoms.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The eigenvalues of Coulomb matrix for molecules.
The default shape is `(num_confs, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms,)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
cmat = self.coulomb_matrix(datapoint)
features_list = []
for f in cmat:
w, v = np.linalg.eig(f)
w_abs = np.abs(w)
sortidx = np.argsort(w_abs)
sortidx = sortidx[::-1]
w = w[sortidx]
f = pad_array(w, self.max_atoms)
features_list.append(f)
features = np.asarray(features_list)
if features.shape[0] == 1:
# `(1, max_atoms)` -> `(max_atoms,)`
features = np.squeeze(features, axis=0)
return features
<file_sep>from functools import partial
from math import sqrt
from typing import Callable, Dict, List, Union
import dgl
import torch
from torch import nn
from deepchem.feat.molecule_featurizers.conformer_featurizer import (
full_atom_feature_dims,
full_bond_feature_dims,
)
from deepchem.models.torch_models.layers import MultilayerPerceptron
from deepchem.utils.graph_utils import (
aggregate_max,
aggregate_mean,
aggregate_min,
aggregate_moment,
aggregate_std,
aggregate_sum,
aggregate_var,
scale_amplification,
scale_attenuation,
scale_identity,
)
PNA_AGGREGATORS = {
"mean": aggregate_mean,
"sum": aggregate_sum,
"max": aggregate_max,
"min": aggregate_min,
"std": aggregate_std,
"var": aggregate_var,
"moment3": partial(aggregate_moment, n=3),
"moment4": partial(aggregate_moment, n=4),
"moment5": partial(aggregate_moment, n=5),
}
PNA_SCALERS = {
"identity": scale_identity,
"amplification": scale_amplification,
"attenuation": scale_attenuation,
}
class AtomEncoder(torch.nn.Module):
"""
Encodes atom features into embeddings based on the Open Graph Benchmark feature set in conformer_featurizer.
Parameters
----------
emb_dim : int
The dimension that the returned embedding will have.
padding : bool, optional (default=False)
If true then the last index will be used for padding.
Examples
--------
>>> from deepchem.feat.molecule_featurizers.conformer_featurizer import full_atom_feature_dims
>>> atom_encoder = AtomEncoder(emb_dim=32)
>>> num_rows = 10
>>> atom_features = torch.stack([
... torch.randint(low=0, high=dim, size=(num_rows,))
... for dim in full_atom_feature_dims
... ], dim=1)
>>> atom_embeddings = atom_encoder(atom_features)
"""
def __init__(self, emb_dim, padding=False):
super(AtomEncoder, self).__init__()
self.atom_embedding_list = torch.nn.ModuleList()
self.padding = padding
for dim in full_atom_feature_dims:
if padding:
emb = torch.nn.Embedding(dim + 1, emb_dim, padding_idx=0)
else:
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.atom_embedding_list.append(emb)
def reset_parameters(self):
"""
Reset the parameters of the atom embeddings.
This method resets the weights of the atom embeddings by initializing
them with a uniform distribution between -sqrt(3) and sqrt(3).
"""
for embedder in self.atom_embedding_list:
embedder.weight.data.uniform_(-sqrt(3), sqrt(3))
def forward(self, x):
"""
Compute the atom embeddings for the given atom features.
Parameters
----------
x : torch.Tensor, shape (batch_size, num_atoms, num_features)
The input atom features tensor.
Returns
-------
x_embedding : torch.Tensor, shape (batch_size, num_atoms, emb_dim)
The computed atom embeddings.
"""
x_embedding = 0
for i in range(x.shape[1]):
if self.padding:
x_embedding += self.atom_embedding_list[i](x[:, i].long() + 1)
else:
x_embedding += self.atom_embedding_list[i](x[:, i].long())
return x_embedding
class BondEncoder(torch.nn.Module):
"""
Encodes bond features into embeddings based on the Open Graph Benchmark feature set in conformer_featurizer.
Parameters
----------
emb_dim : int
The dimension that the returned embedding will have.
padding : bool, optional (default=False)
If true then the last index will be used for padding.
Examples
--------
>>> from deepchem.feat.molecule_featurizers.conformer_featurizer import full_bond_feature_dims
>>> bond_encoder = BondEncoder(emb_dim=32)
>>> num_rows = 10
>>> bond_features = torch.stack([
... torch.randint(low=0, high=dim, size=(num_rows,))
... for dim in full_bond_feature_dims
... ], dim=1)
>>> bond_embeddings = bond_encoder(bond_features)
"""
def __init__(self, emb_dim, padding=False):
super(BondEncoder, self).__init__()
self.bond_embedding_list = torch.nn.ModuleList()
self.padding = padding
for dim in full_bond_feature_dims:
if padding:
emb = torch.nn.Embedding(dim + 1, emb_dim, padding_idx=0)
else:
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.bond_embedding_list.append(emb)
def forward(self, edge_attr):
"""
Compute the bond embeddings for the given bond features.
Parameters
----------
edge_attr : torch.Tensor, shape (batch_size, num_edges, num_features)
The input bond features tensor.
Returns
-------
bond_embedding : torch.Tensor, shape (batch_size, num_edges, emb_dim)
The computed bond embeddings.
"""
bond_embedding = 0
for i in range(edge_attr.shape[1]):
if self.padding:
bond_embedding += self.bond_embedding_list[i](
edge_attr[:, i].long() + 1)
else:
bond_embedding += self.bond_embedding_list[i](
edge_attr[:, i].long())
return bond_embedding
class PNALayer(nn.Module):
"""
Principal Neighbourhood Aggregation Layer (PNA) from [1].
Parameters
----------
in_dim : int
Input dimension of the node features.
out_dim : int
Output dimension of the node features.
in_dim_edges : int
Input dimension of the edge features.
aggregators : List[str]
List of aggregator functions to use. Options are "mean", "sum", "max", "min", "std", "var", "moment3", "moment4", "moment5".
scalers : List[str]
List of scaler functions to use. Options are "identity", "amplification", "attenuation".
activation : Union[Callable, str], optional, default="relu"
Activation function to use.
last_activation : Union[Callable, str], optional, default="none"
Last activation function to use.
dropout : float, optional, default=0.0
Dropout rate.
residual : bool, optional, default=True
Whether to use residual connections.
pairwise_distances : bool, optional, default=False
Whether to use pairwise distances.
batch_norm : bool, optional, default=True
Whether to use batch normalization.
batch_norm_momentum : float, optional, default=0.1
Momentum for the batch normalization layers.
avg_d : Dict[str, float], optional, default={"log": 1.0}
Dictionary containing the average degree of the graph.
posttrans_layers : int, optional, default=2
Number of post-transformation layers.
pretrans_layers : int, optional, default=1
Number of pre-transformation layers.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. Principal Neighbourhood Aggregation for Graph Nets. Preprint at https://doi.org/10.48550/arXiv.2004.05718 (2020).
Examples
--------
>>> import dgl
>>> import numpy as np
>>> import torch
>>> from deepchem.models.torch_models.pna_gnn import PNALayer
>>> in_dim = 32
>>> out_dim = 64
>>> in_dim_edges = 16
>>> aggregators = ["mean", "max"]
>>> scalers = ["identity", "amplification", "attenuation"]
>>> pna_layer = PNALayer(in_dim=in_dim,
... out_dim=out_dim,
... in_dim_edges=in_dim_edges,
... aggregators=aggregators,
... scalers=scalers)
>>> num_nodes = 10
>>> num_edges = 20
>>> node_features = torch.randn(num_nodes, in_dim)
>>> edge_features = torch.randn(num_edges, in_dim_edges)
>>> g = dgl.graph((np.random.randint(0, num_nodes, num_edges),
... np.random.randint(0, num_nodes, num_edges)))
>>> g.ndata['feat'] = node_features
>>> g.edata['feat'] = edge_features
>>> g.ndata['feat'] = pna_layer(g)
"""
def __init__(
self,
in_dim: int,
out_dim: int,
in_dim_edges: int,
aggregators: List[str],
scalers: List[str],
activation: Union[Callable, str] = "relu",
dropout: float = 0.0,
residual: bool = True,
pairwise_distances: bool = False,
batch_norm: bool = True,
batch_norm_momentum=0.1,
avg_d: Dict[str, float] = {"log": 1.0},
posttrans_layers: int = 2,
pretrans_layers: int = 1,
):
super(PNALayer, self).__init__()
self.aggregators = [PNA_AGGREGATORS[aggr] for aggr in aggregators]
self.scalers = [PNA_SCALERS[scale] for scale in scalers]
self.edge_features = in_dim_edges > 0
self.activation = activation
self.avg_d = avg_d
self.pairwise_distances = pairwise_distances
self.residual = residual
if in_dim != out_dim:
self.residual = False
self.pretrans = MultilayerPerceptron(
d_input=(2 * in_dim + in_dim_edges +
1) if self.pairwise_distances else
(2 * in_dim + in_dim_edges),
d_output=in_dim,
d_hidden=(in_dim,) * (pretrans_layers - 1),
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum,
dropout=dropout)
self.posttrans = MultilayerPerceptron(
d_input=(len(self.aggregators) * len(self.scalers) + 1) * in_dim,
d_hidden=(out_dim,) * (posttrans_layers - 1),
d_output=out_dim,
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum,
dropout=dropout)
def forward(self, g):
"""
Forward pass of the PNA layer.
Parameters
----------
g : dgl.DGLGraph
Input graph
Returns
-------
h : torch.Tensor
Node feature tensor
"""
h = g.ndata['feat']
h_in = h
# pretransformation
g.apply_edges(self.pretrans_edges)
# aggregation
g.update_all(self.message_func, self.reduce_func)
h = torch.cat([h, g.ndata['feat']], dim=-1)
# post-transformation
h = self.posttrans(h)
if self.residual:
h = h + h_in
return h
def message_func(self, edges) -> Dict[str, torch.Tensor]:
"""
The message function to generate messages along the edges.
Parameters
----------
edges : dgl.EdgeBatch
Batch of edges.
Returns
-------
Dict[str, torch.Tensor]
Dictionary containing the edge features.
"""
return {"e": edges.data["e"]}
def reduce_func(self, nodes) -> Dict[str, torch.Tensor]:
"""
The reduce function to aggregate the messages.
Apply the aggregators and scalers, and concatenate the results.
Parameters
----------
nodes : dgl.NodeBatch
Batch of nodes.
Returns
-------
Dict[str, torch.Tensor]
Dictionary containing the aggregated node features.
"""
h_in = nodes.data['feat']
h = nodes.mailbox["e"]
D = h.shape[-2]
h_to_cat = [
aggr(h=h, h_in=h_in) # type: ignore
for aggr in self.aggregators
]
h = torch.cat(h_to_cat, dim=-1)
if len(self.scalers) > 1:
h = torch.cat(
[
scale(h, D=D, avg_d=self.avg_d) # type: ignore
for scale in self.scalers
],
dim=-1)
return {'feat': h}
def pretrans_edges(self, edges) -> Dict[str, torch.Tensor]:
"""
Return a mapping to the concatenation of the features from
the source node, the destination node, and the edge between them (if applicable).
Parameters
----------
edges : dgl.EdgeBatch
Batch of edges.
Returns
-------
Dict[str, torch.Tensor]
Dictionary containing the concatenated features.
"""
if self.edge_features and self.pairwise_distances:
squared_distance = torch.sum((edges.src['x'] - edges.dst['x'])**2,
dim=-1)[:, None]
z2 = torch.cat([
edges.src['feat'], edges.dst['feat'], edges.data['feat'],
squared_distance
],
dim=-1)
elif not self.edge_features and self.pairwise_distances:
squared_distance = torch.sum((edges.src['x'] - edges.dst['x'])**2,
dim=-1)[:, None]
z2 = torch.cat(
[edges.src['feat'], edges.dst['feat'], squared_distance],
dim=-1)
elif self.edge_features and not self.pairwise_distances:
z2 = torch.cat(
[edges.src['feat'], edges.dst['feat'], edges.data['feat']],
dim=-1)
else:
z2 = torch.cat([edges.src['feat'], edges.dst['feat']], dim=-1)
return {"e": self.pretrans(z2)}
class PNAGNN(nn.Module):
"""
Principal Neighbourhood Aggregation Graph Neural Network [1]. This defines the message passing layers of the PNA model.
Parameters
----------
hidden_dim : int
Dimension of the hidden layers.
aggregators : List[str]
List of aggregator functions to use.
scalers : List[str]
List of scaler functions to use. Options are "identity", "amplification", "attenuation".
residual : bool, optional, default=True
Whether to use residual connections.
pairwise_distances : bool, optional, default=False
Whether to use pairwise distances.
activation : Union[Callable, str], optional, default="relu"
Activation function to use.
batch_norm : bool, optional, default=True
Whether to use batch normalization in the layers before the aggregator.
batch_norm_momentum : float, optional, default=0.1
Momentum for the batch normalization layers.
propagation_depth : int, optional, default=5
Number of propagation layers.
dropout : float, optional, default=0.0
Dropout rate.
posttrans_layers : int, optional, default=1
Number of post-transformation layers.
pretrans_layers : int, optional, default=1
Number of pre-transformation layers.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. Principal Neighbourhood Aggregation for Graph Nets. Preprint at https://doi.org/10.48550/arXiv.2004.05718 (2020).
Examples
--------
>>> import numpy as np
>>> from deepchem.feat.molecule_featurizers.conformer_featurizer import RDKitConformerFeaturizer
>>> from deepchem.feat.graph_data import BatchGraphData
>>> from deepchem.models.torch_models.pna_gnn import PNAGNN
>>> featurizer = RDKitConformerFeaturizer(num_conformers=2)
>>> smiles = ['C1=CC=NC=C1', 'CC(=O)C', 'C']
>>> featurizer = RDKitConformerFeaturizer(num_conformers=2, rmsd_cutoff=1)
>>> data = featurizer.featurize(smiles)
>>> features = BatchGraphData(np.concatenate(data))
>>> features = features.to_dgl_graph()
>>> model = PNAGNN(hidden_dim=16,
... aggregators=['mean', 'sum'],
... scalers=['identity'])
>>> output = model(features)
"""
def __init__(self,
hidden_dim,
aggregators: List[str],
scalers: List[str],
residual: bool = True,
pairwise_distances: bool = False,
activation: Union[Callable, str] = "relu",
batch_norm: bool = True,
batch_norm_momentum=0.1,
propagation_depth: int = 5,
dropout: float = 0.0,
posttrans_layers: int = 1,
pretrans_layers: int = 1,
**kwargs):
super(PNAGNN, self).__init__()
self.mp_layers = nn.ModuleList()
for _ in range(propagation_depth):
self.mp_layers.append(
PNALayer(in_dim=hidden_dim,
out_dim=int(hidden_dim),
in_dim_edges=hidden_dim,
aggregators=aggregators,
scalers=scalers,
pairwise_distances=pairwise_distances,
residual=residual,
dropout=dropout,
activation=activation,
avg_d={"log": 1.0},
posttrans_layers=posttrans_layers,
pretrans_layers=pretrans_layers,
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum),)
self.atom_encoder = AtomEncoder(emb_dim=hidden_dim)
self.bond_encoder = BondEncoder(emb_dim=hidden_dim)
def forward(self, input_graph: dgl.DGLGraph) -> dgl.DGLGraph:
"""
Forward pass of the PNAGNN model.
Parameters
----------
input_graph : dgl.DGLGraph
Input graph with node and edge features.
Returns
-------
graph : dgl.DGLGraph
Output graph with updated node features after applying the message passing layers.
"""
graph = input_graph.clone()
graph.ndata['feat'] = self.atom_encoder(graph.ndata['x'])
graph.edata['feat'] = self.bond_encoder(graph.edata['edge_attr'])
for mp_layer in self.mp_layers:
graph.ndata['feat'] = mp_layer(graph)
return graph
class PNA(nn.Module):
"""
Message passing neural network for graph representation learning [1]_.
Parameters
----------
hidden_dim : int
Hidden dimension size.
target_dim : int
Dimensionality of the output, for example for binary classification target_dim = 1.
aggregators : List[str]
Type of message passing functions. Options are 'mean','sum','max','min','std','var','moment3','moment4','moment5'.
scalers : List[str]
Type of normalization layers in the message passing network. Options are 'identity','amplification','attenuation'.
readout_aggregators : List[str]
Type of aggregators in the readout network.
readout_hidden_dim : int, default None
The dimension of the hidden layer in the readout network. If not provided, the readout has the same dimensionality of the final layer of the PNA layer, which is the hidden dimension size.
readout_layers : int, default 1
The number of linear layers in the readout network.
residual : bool, default True
Whether to use residual connections.
pairwise_distances : bool, default False
Whether to use pairwise distances.
activation : Union[Callable, str]
Activation function to use.
batch_norm : bool, default True
Whether to use batch normalization in the layers before the aggregator..
batch_norm_momentum : float, default 0.1
Momentum for the batch normalization layers.
propagation_depth : int, default
Number of propagation layers.
dropout : float, default 0.0
Dropout probability in the message passing layers.
posttrans_layers : int, default 1
Number of post-transformation layers.
pretrans_layers : int, default 1
Number of pre-transformation layers.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. Principal Neighbourhood Aggregation for Graph Nets. Preprint at https://doi.org/10.48550/arXiv.2004.05718 (2020).
Examples
--------
>>> import numpy as np
>>> from deepchem.feat.graph_data import BatchGraphData
>>> from deepchem.models.torch_models.pna_gnn import PNA
>>> from deepchem.feat.molecule_featurizers.conformer_featurizer import RDKitConformerFeaturizer
>>> smiles = ["C1=CC=CN=C1", "C1CCC1"]
>>> featurizer = RDKitConformerFeaturizer(num_conformers=2)
>>> data = featurizer.featurize(smiles)
>>> features = BatchGraphData(np.concatenate(data))
>>> features = features.to_dgl_graph()
>>> target_dim = 1
>>> model = PNA(hidden_dim=16, target_dim=target_dim)
>>> output = model(features)
>>> print(output.shape)
torch.Size([1, 1])
"""
def __init__(self,
hidden_dim: int,
target_dim: int,
aggregators: List[str] = ['mean'],
scalers: List[str] = ['identity'],
readout_aggregators: List[str] = ['mean'],
readout_hidden_dim: int = 1,
readout_layers: int = 2,
residual: bool = True,
pairwise_distances: bool = False,
activation: Union[Callable, str] = "relu",
batch_norm: bool = True,
batch_norm_momentum: float = 0.1,
propagation_depth: int = 5,
dropout: float = 0.0,
posttrans_layers: int = 1,
pretrans_layers: int = 1,
**kwargs):
super(PNA, self).__init__()
self.node_gnn = PNAGNN(hidden_dim=hidden_dim,
aggregators=aggregators,
scalers=scalers,
residual=residual,
pairwise_distances=pairwise_distances,
activation=activation,
batch_norm=batch_norm,
batch_norm_momentum=batch_norm_momentum,
propagation_depth=propagation_depth,
dropout=dropout,
posttrans_layers=posttrans_layers,
pretrans_layers=pretrans_layers)
if readout_hidden_dim == 1:
readout_hidden_dim = hidden_dim
self.readout_aggregators = readout_aggregators
self.output = MultilayerPerceptron(
d_input=hidden_dim * len(self.readout_aggregators),
d_hidden=(readout_hidden_dim,) * readout_layers,
batch_norm=False,
d_output=target_dim)
def forward(self, graph: dgl.DGLGraph):
graph = self.node_gnn(graph)
readouts_to_cat = [
dgl.readout_nodes(graph, 'feat', op=aggr)
for aggr in self.readout_aggregators
]
readout = torch.cat(readouts_to_cat, dim=-1)
return self.output(readout)
<file_sep>Metrics
=======
Metrics are one of the most important parts of machine learning. Unlike
traditional software, in which algorithms either work or don't work,
machine learning models work in degrees. That is, there's a continuous
range of "goodness" for a model. "Metrics" are functions which measure
how well a model works. There are many different choices of metrics
depending on the type of model at hand.
Metric Utilities
----------------
Metric utility functions allow for some common manipulations such as
switching to/from one-hot representations.
.. autofunction:: deepchem.metrics.to_one_hot
.. autofunction:: deepchem.metrics.from_one_hot
Metric Shape Handling
---------------------
One of the trickiest parts of handling metrics correctly is making sure the
shapes of input weights, predictions and labels and processed correctly. This
is challenging in particular since DeepChem supports multitask, multiclass
models which means that shapes must be handled with care to prevent errors.
DeepChem maintains the following utility functions which attempt to
facilitate shape handling for you.
.. autofunction:: deepchem.metrics.normalize_weight_shape
.. autofunction:: deepchem.metrics.normalize_labels_shape
.. autofunction:: deepchem.metrics.normalize_prediction_shape
.. autofunction:: deepchem.metrics.handle_classification_mode
Metric Functions
----------------
DeepChem has a variety of different metrics which are useful for measuring model performance. A number (but not all) of these metrics are directly sourced from :code:`sklearn`.
.. autofunction:: deepchem.metrics.matthews_corrcoef
.. autofunction:: deepchem.metrics.recall_score
.. autofunction:: deepchem.metrics.r2_score
.. autofunction:: deepchem.metrics.mean_squared_error
.. autofunction:: deepchem.metrics.mean_absolute_error
.. autofunction:: deepchem.metrics.precision_score
.. autofunction:: deepchem.metrics.precision_recall_curve
.. autofunction:: deepchem.metrics.auc
.. autofunction:: deepchem.metrics.jaccard_score
.. autofunction:: deepchem.metrics.f1_score
.. autofunction:: deepchem.metrics.roc_auc_score
.. autofunction:: deepchem.metrics.accuracy_score
.. autofunction:: deepchem.metrics.balanced_accuracy_score
.. autofunction:: deepchem.metrics.top_k_accuracy_score
.. autofunction:: deepchem.metrics.pearson_r2_score
.. autofunction:: deepchem.metrics.jaccard_index
.. autofunction:: deepchem.metrics.pixel_error
.. autofunction:: deepchem.metrics.prc_auc_score
.. autofunction:: deepchem.metrics.rms_score
.. autofunction:: deepchem.metrics.mae_score
.. autofunction:: deepchem.metrics.kappa_score
.. autofunction:: deepchem.metrics.bedroc_score
.. autofunction:: deepchem.metrics.concordance_index
.. autofunction:: deepchem.metrics.genomic_metrics.get_motif_scores
.. autofunction:: deepchem.metrics.genomic_metrics.get_pssm_scores
.. autofunction:: deepchem.metrics.genomic_metrics.in_silico_mutagenesis
Metric Class
------------
The :code:`dc.metrics.Metric` class is a wrapper around metric
functions which interoperates with DeepChem :code:`dc.models.Model`.
.. autoclass:: deepchem.metrics.Metric
:members:
<file_sep>chembl_tasks = [
'CHEMBL1075051', 'CHEMBL1075104', 'CHEMBL1075145', 'CHEMBL1075189',
'CHEMBL1075228', 'CHEMBL1075284', 'CHEMBL1075319', 'CHEMBL1163101',
'CHEMBL1163116', 'CHEMBL1163125', 'CHEMBL1255149', 'CHEMBL1255150',
'CHEMBL1293255', 'CHEMBL1293289', 'CHEMBL1293292', 'CHEMBL1741186',
'CHEMBL1741195', 'CHEMBL1744525', 'CHEMBL1764940', 'CHEMBL1781',
'CHEMBL1781862', 'CHEMBL1782', 'CHEMBL1784', 'CHEMBL1790', 'CHEMBL1792',
'CHEMBL1795101', 'CHEMBL1795126', 'CHEMBL1800', 'CHEMBL1801', 'CHEMBL1804',
'CHEMBL1806', 'CHEMBL1811', 'CHEMBL1821', 'CHEMBL1822', 'CHEMBL1824',
'CHEMBL1825', 'CHEMBL1827', 'CHEMBL1829', 'CHEMBL1833', 'CHEMBL1836',
'CHEMBL1844', 'CHEMBL1849', 'CHEMBL1850', 'CHEMBL1853', 'CHEMBL1855',
'CHEMBL1856', 'CHEMBL1860', 'CHEMBL1862', 'CHEMBL1865', 'CHEMBL1867',
'CHEMBL1868', 'CHEMBL1871', 'CHEMBL1873', 'CHEMBL1875', 'CHEMBL1878',
'CHEMBL1881', 'CHEMBL1889', 'CHEMBL1892', 'CHEMBL1898', 'CHEMBL1899',
'CHEMBL1900', 'CHEMBL1901', 'CHEMBL1902', 'CHEMBL1906', 'CHEMBL1907',
'CHEMBL1908', 'CHEMBL1913', 'CHEMBL1914', 'CHEMBL1916', 'CHEMBL1917',
'CHEMBL1919', 'CHEMBL1921', 'CHEMBL1921666', 'CHEMBL1926', 'CHEMBL1936',
'CHEMBL1937', 'CHEMBL1941', 'CHEMBL1942', 'CHEMBL1944', 'CHEMBL1945',
'CHEMBL1946', 'CHEMBL1947', 'CHEMBL1949', 'CHEMBL1951', 'CHEMBL1952',
'CHEMBL1955', 'CHEMBL1957', 'CHEMBL1966', 'CHEMBL1968', 'CHEMBL1974',
'CHEMBL1977', 'CHEMBL1978', 'CHEMBL1980', 'CHEMBL1981', 'CHEMBL1983',
'CHEMBL1985', 'CHEMBL1991', 'CHEMBL1994', 'CHEMBL1995', 'CHEMBL1997',
'CHEMBL2000', 'CHEMBL2001', 'CHEMBL2002', 'CHEMBL2007', 'CHEMBL2014',
'CHEMBL2016', 'CHEMBL202', 'CHEMBL2027', 'CHEMBL2028', 'CHEMBL203',
'CHEMBL2034', 'CHEMBL2035', 'CHEMBL2039', 'CHEMBL204', 'CHEMBL2041',
'CHEMBL2047', 'CHEMBL2049', 'CHEMBL205', 'CHEMBL2056', 'CHEMBL206',
'CHEMBL2061', 'CHEMBL2069', 'CHEMBL208', 'CHEMBL2083', 'CHEMBL2085',
'CHEMBL209', 'CHEMBL210', 'CHEMBL2107', 'CHEMBL2108', 'CHEMBL211',
'CHEMBL213', 'CHEMBL214', 'CHEMBL2146302', 'CHEMBL2147', 'CHEMBL2148',
'CHEMBL215', 'CHEMBL216', 'CHEMBL217', 'CHEMBL2179', 'CHEMBL218',
'CHEMBL2185', 'CHEMBL219', 'CHEMBL220', 'CHEMBL2207', 'CHEMBL2208',
'CHEMBL221', 'CHEMBL222', 'CHEMBL223', 'CHEMBL224', 'CHEMBL2243',
'CHEMBL225', 'CHEMBL226', 'CHEMBL2265', 'CHEMBL227', 'CHEMBL2274',
'CHEMBL2276', 'CHEMBL228', 'CHEMBL2285', 'CHEMBL2288', 'CHEMBL229',
'CHEMBL2292', 'CHEMBL230', 'CHEMBL2304402', 'CHEMBL2304404', 'CHEMBL231',
'CHEMBL2318', 'CHEMBL232', 'CHEMBL2326', 'CHEMBL2327', 'CHEMBL2329',
'CHEMBL233', 'CHEMBL2334', 'CHEMBL2335', 'CHEMBL2337', 'CHEMBL234',
'CHEMBL2345', 'CHEMBL235', 'CHEMBL236', 'CHEMBL2361', 'CHEMBL2363',
'CHEMBL2366456', 'CHEMBL2366505', 'CHEMBL2366512', 'CHEMBL2366516',
'CHEMBL2366517', 'CHEMBL237', 'CHEMBL2373', 'CHEMBL238', 'CHEMBL239',
'CHEMBL2391', 'CHEMBL2397', 'CHEMBL240', 'CHEMBL2409', 'CHEMBL241',
'CHEMBL2413', 'CHEMBL2414', 'CHEMBL242', 'CHEMBL2425', 'CHEMBL243',
'CHEMBL2431', 'CHEMBL2434', 'CHEMBL244', 'CHEMBL2447', 'CHEMBL245',
'CHEMBL246', 'CHEMBL2461', 'CHEMBL247', 'CHEMBL2470', 'CHEMBL2474',
'CHEMBL248', 'CHEMBL2487', 'CHEMBL2488', 'CHEMBL2489', 'CHEMBL249',
'CHEMBL2492', 'CHEMBL2499', 'CHEMBL251', 'CHEMBL252', 'CHEMBL2525',
'CHEMBL2527', 'CHEMBL253', 'CHEMBL2534', 'CHEMBL2536', 'CHEMBL254',
'CHEMBL255', 'CHEMBL256', 'CHEMBL2563', 'CHEMBL2564', 'CHEMBL2567',
'CHEMBL2568', 'CHEMBL2575', 'CHEMBL258', 'CHEMBL2581', 'CHEMBL259',
'CHEMBL2590', 'CHEMBL2599', 'CHEMBL260', 'CHEMBL261', 'CHEMBL2611',
'CHEMBL2617', 'CHEMBL262', 'CHEMBL2622', 'CHEMBL2637', 'CHEMBL264',
'CHEMBL265', 'CHEMBL2652', 'CHEMBL2664', 'CHEMBL267', 'CHEMBL268',
'CHEMBL269', 'CHEMBL2693', 'CHEMBL2695', 'CHEMBL270', 'CHEMBL2716',
'CHEMBL2722', 'CHEMBL273', 'CHEMBL2730', 'CHEMBL2736', 'CHEMBL274',
'CHEMBL2742', 'CHEMBL2749', 'CHEMBL275', 'CHEMBL2756', 'CHEMBL276',
'CHEMBL2778', 'CHEMBL278', 'CHEMBL2781', 'CHEMBL2782', 'CHEMBL2789',
'CHEMBL279', 'CHEMBL280', 'CHEMBL2803', 'CHEMBL2808', 'CHEMBL2815',
'CHEMBL2820', 'CHEMBL2828', 'CHEMBL283', 'CHEMBL2830', 'CHEMBL2835',
'CHEMBL284', 'CHEMBL2842', 'CHEMBL285', 'CHEMBL2851', 'CHEMBL2858',
'CHEMBL286', 'CHEMBL2868', 'CHEMBL287', 'CHEMBL2871', 'CHEMBL288',
'CHEMBL2882', 'CHEMBL2885', 'CHEMBL2902', 'CHEMBL2903', 'CHEMBL2916',
'CHEMBL2949', 'CHEMBL2954', 'CHEMBL2959', 'CHEMBL2971', 'CHEMBL2973',
'CHEMBL2978', 'CHEMBL298', 'CHEMBL299', 'CHEMBL2993', 'CHEMBL2996',
'CHEMBL2998', 'CHEMBL301', 'CHEMBL3012', 'CHEMBL3018', 'CHEMBL302',
'CHEMBL3024', 'CHEMBL3025', 'CHEMBL3037', 'CHEMBL304', 'CHEMBL3045',
'CHEMBL3048', 'CHEMBL3060', 'CHEMBL3066', 'CHEMBL3067', 'CHEMBL3072',
'CHEMBL308', 'CHEMBL3081', 'CHEMBL3085613', 'CHEMBL309', 'CHEMBL3100',
'CHEMBL3105', 'CHEMBL3106', 'CHEMBL311', 'CHEMBL3114', 'CHEMBL3116',
'CHEMBL312', 'CHEMBL313', 'CHEMBL3130', 'CHEMBL3138', 'CHEMBL3142',
'CHEMBL3145', 'CHEMBL3155', 'CHEMBL3157', 'CHEMBL3166', 'CHEMBL318',
'CHEMBL3180', 'CHEMBL3181', 'CHEMBL319', 'CHEMBL3192', 'CHEMBL3199',
'CHEMBL3202', 'CHEMBL321', 'CHEMBL322', 'CHEMBL3222', 'CHEMBL3223',
'CHEMBL3227', 'CHEMBL3229', 'CHEMBL3230', 'CHEMBL3231', 'CHEMBL324',
'CHEMBL3242', 'CHEMBL3247', 'CHEMBL325', 'CHEMBL3254', 'CHEMBL326',
'CHEMBL3267', 'CHEMBL3286', 'CHEMBL330', 'CHEMBL3305', 'CHEMBL331',
'CHEMBL3310', 'CHEMBL3314', 'CHEMBL3318', 'CHEMBL332', 'CHEMBL333',
'CHEMBL3332', 'CHEMBL335', 'CHEMBL3351', 'CHEMBL3358', 'CHEMBL3360',
'CHEMBL3361', 'CHEMBL3371', 'CHEMBL3374', 'CHEMBL338', 'CHEMBL339',
'CHEMBL3399910', 'CHEMBL340', 'CHEMBL3403', 'CHEMBL3419', 'CHEMBL3426',
'CHEMBL3437', 'CHEMBL3438', 'CHEMBL344', 'CHEMBL3464', 'CHEMBL3468',
'CHEMBL3471', 'CHEMBL3473', 'CHEMBL3474', 'CHEMBL3476', 'CHEMBL3486',
'CHEMBL3501', 'CHEMBL3510', 'CHEMBL3513', 'CHEMBL3522', 'CHEMBL3524',
'CHEMBL3535', 'CHEMBL3553', 'CHEMBL3559', 'CHEMBL3563', 'CHEMBL3568',
'CHEMBL3571', 'CHEMBL3572', 'CHEMBL3582', 'CHEMBL3587', 'CHEMBL3589',
'CHEMBL3590', 'CHEMBL3594', 'CHEMBL3602', 'CHEMBL3614', 'CHEMBL3623',
'CHEMBL3629', 'CHEMBL3638338', 'CHEMBL3649', 'CHEMBL3650', 'CHEMBL3687',
'CHEMBL3691', 'CHEMBL3699', 'CHEMBL3706', 'CHEMBL3710', 'CHEMBL3717',
'CHEMBL3729', 'CHEMBL3746', 'CHEMBL3759', 'CHEMBL3764', 'CHEMBL3766',
'CHEMBL3768', 'CHEMBL3769', 'CHEMBL3772', 'CHEMBL3775', 'CHEMBL3776',
'CHEMBL3778', 'CHEMBL3785', 'CHEMBL3788', 'CHEMBL3795', 'CHEMBL3798',
'CHEMBL3802', 'CHEMBL3807', 'CHEMBL3815', 'CHEMBL3816', 'CHEMBL3820',
'CHEMBL3833', 'CHEMBL3836', 'CHEMBL3837', 'CHEMBL3864', 'CHEMBL3868',
'CHEMBL3869', 'CHEMBL3880', 'CHEMBL3884', 'CHEMBL3891', 'CHEMBL3892',
'CHEMBL3910', 'CHEMBL3912', 'CHEMBL3920', 'CHEMBL3922', 'CHEMBL3942',
'CHEMBL3943', 'CHEMBL3948', 'CHEMBL3952', 'CHEMBL3959', 'CHEMBL3969',
'CHEMBL3974', 'CHEMBL3975', 'CHEMBL3976', 'CHEMBL3979', 'CHEMBL3983',
'CHEMBL3991', 'CHEMBL3996', 'CHEMBL4005', 'CHEMBL4015', 'CHEMBL4016',
'CHEMBL4018', 'CHEMBL4026', 'CHEMBL4029', 'CHEMBL4040', 'CHEMBL4051',
'CHEMBL4068', 'CHEMBL4072', 'CHEMBL4073', 'CHEMBL4074', 'CHEMBL4077',
'CHEMBL4078', 'CHEMBL4080', 'CHEMBL4093', 'CHEMBL4102', 'CHEMBL4111',
'CHEMBL4123', 'CHEMBL4124', 'CHEMBL4128', 'CHEMBL4132', 'CHEMBL4140',
'CHEMBL4142', 'CHEMBL4145', 'CHEMBL4150', 'CHEMBL4153', 'CHEMBL4161',
'CHEMBL4179', 'CHEMBL4188', 'CHEMBL4191', 'CHEMBL4198', 'CHEMBL4203',
'CHEMBL4204', 'CHEMBL4224', 'CHEMBL4234', 'CHEMBL4235', 'CHEMBL4247',
'CHEMBL4261', 'CHEMBL4282', 'CHEMBL4296', 'CHEMBL4302', 'CHEMBL4303',
'CHEMBL4306', 'CHEMBL4308', 'CHEMBL4315', 'CHEMBL4321', 'CHEMBL4333',
'CHEMBL4336', 'CHEMBL4338', 'CHEMBL4354', 'CHEMBL4358', 'CHEMBL4361',
'CHEMBL4372', 'CHEMBL4393', 'CHEMBL4394', 'CHEMBL4409', 'CHEMBL4414',
'CHEMBL4422', 'CHEMBL4427', 'CHEMBL4429', 'CHEMBL4430', 'CHEMBL4439',
'CHEMBL4441', 'CHEMBL4462', 'CHEMBL4465', 'CHEMBL4471', 'CHEMBL4477',
'CHEMBL4478', 'CHEMBL4481', 'CHEMBL4482', 'CHEMBL4501', 'CHEMBL4506',
'CHEMBL4508', 'CHEMBL4523', 'CHEMBL4550', 'CHEMBL4552', 'CHEMBL4561',
'CHEMBL4581', 'CHEMBL4586', 'CHEMBL4588', 'CHEMBL4599', 'CHEMBL4600',
'CHEMBL4608', 'CHEMBL4616', 'CHEMBL4617', 'CHEMBL4618', 'CHEMBL4625',
'CHEMBL4630', 'CHEMBL4633', 'CHEMBL4641', 'CHEMBL4644', 'CHEMBL4649',
'CHEMBL4652', 'CHEMBL4653', 'CHEMBL4657', 'CHEMBL4660', 'CHEMBL4662',
'CHEMBL4681', 'CHEMBL4683', 'CHEMBL4685', 'CHEMBL4687', 'CHEMBL4696',
'CHEMBL4698', 'CHEMBL4699', 'CHEMBL4722', 'CHEMBL4761', 'CHEMBL4768',
'CHEMBL4777', 'CHEMBL4779', 'CHEMBL4780', 'CHEMBL4789', 'CHEMBL4792',
'CHEMBL4793', 'CHEMBL4794', 'CHEMBL4801', 'CHEMBL4802', 'CHEMBL4803',
'CHEMBL4804', 'CHEMBL4805', 'CHEMBL4816', 'CHEMBL4822', 'CHEMBL4828',
'CHEMBL4829', 'CHEMBL4835', 'CHEMBL4860', 'CHEMBL4893', 'CHEMBL4895',
'CHEMBL4899', 'CHEMBL4908', 'CHEMBL4919', 'CHEMBL4975', 'CHEMBL4979',
'CHEMBL4980', 'CHEMBL5011', 'CHEMBL5017', 'CHEMBL5023', 'CHEMBL5024',
'CHEMBL5036', 'CHEMBL5067', 'CHEMBL5071', 'CHEMBL5076', 'CHEMBL5077',
'CHEMBL5080', 'CHEMBL5102', 'CHEMBL5103', 'CHEMBL5112', 'CHEMBL5113',
'CHEMBL5122', 'CHEMBL5131', 'CHEMBL5136', 'CHEMBL5137', 'CHEMBL5141',
'CHEMBL5145', 'CHEMBL5147', 'CHEMBL5160', 'CHEMBL5192', 'CHEMBL5203',
'CHEMBL5205', 'CHEMBL5247', 'CHEMBL5251', 'CHEMBL5282', 'CHEMBL5314',
'CHEMBL5328', 'CHEMBL5331', 'CHEMBL5353', 'CHEMBL5373', 'CHEMBL5375',
'CHEMBL5387', 'CHEMBL5393', 'CHEMBL5407', 'CHEMBL5409', 'CHEMBL5413',
'CHEMBL5414', 'CHEMBL5424', 'CHEMBL5441', 'CHEMBL5443', 'CHEMBL5445',
'CHEMBL5451', 'CHEMBL5457', 'CHEMBL5462', 'CHEMBL5471', 'CHEMBL5485',
'CHEMBL5491', 'CHEMBL5508', 'CHEMBL5522', 'CHEMBL5543', 'CHEMBL5555',
'CHEMBL5570', 'CHEMBL5582', 'CHEMBL5631', 'CHEMBL5645', 'CHEMBL5652',
'CHEMBL5658', 'CHEMBL5669', 'CHEMBL5697', 'CHEMBL5704', 'CHEMBL5736',
'CHEMBL5747', 'CHEMBL5763', 'CHEMBL5769', 'CHEMBL5800', 'CHEMBL5847',
'CHEMBL5879', 'CHEMBL5932', 'CHEMBL5966', 'CHEMBL5971', 'CHEMBL6007',
'CHEMBL6009', 'CHEMBL6080', 'CHEMBL6084', 'CHEMBL6136', 'CHEMBL6137',
'CHEMBL6140', 'CHEMBL6141', 'CHEMBL6145', 'CHEMBL6154', 'CHEMBL6164',
'CHEMBL6166', 'CHEMBL6184'
]
<file_sep>from unittest import TestCase
import numpy as np
import deepchem.rl.envs.tictactoe
class TestTicTacToeEnvironment(TestCase):
def test_constructor(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
assert len(board.state) == 1
assert board.state[0].shape == (3, 3, 2)
assert np.sum(board.state[0]) == 1 or np.sum(board.state[0]) == 0
def test_step(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
X = deepchem.rl.envs.tictactoe.TicTacToeEnvironment.X
board._state = [np.zeros(shape=(3, 3, 2), dtype=np.float32)]
board.step(0)
assert np.all(board.state[0][0][0] == X)
def test_winner(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
X = deepchem.rl.envs.tictactoe.TicTacToeEnvironment.X
board.state[0][0][0] = X
board.state[0][0][1] = X
assert not board.check_winner(X)
board.state[0][0][2] = X
assert board.check_winner(X)
def test_game_over(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
X = deepchem.rl.envs.tictactoe.TicTacToeEnvironment.X
board.state[0][0][0] = X
board.state[0][0][1] = X
assert not board.check_winner(X)
board.state[0][0][2] = X
assert board.check_winner(X)
def test_display(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
s = board.display()
assert s.find("X") == -1
def test_get_O_move(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
empty = deepchem.rl.envs.tictactoe.TicTacToeEnvironment.EMPTY
move = board.get_O_move()
assert np.all(board.state[0][move[0]][move[1]] == empty)
<file_sep>import numpy as np
import tensorflow as tf
from deepchem.data import NumpyDataset
from deepchem.feat import CircularFingerprint
from deepchem.models import KerasModel
from deepchem.models.losses import HingeLoss
from tensorflow.keras.layers import Input, Dense, Dropout, Lambda
class ScScoreModel(KerasModel):
"""
https://pubs.acs.org/doi/abs/10.1021/acs.jcim.7b00622
Several definitions of molecular complexity exist to facilitate prioritization
of lead compounds, to identify diversity-inducing and complexifying reactions,
and to guide retrosynthetic searches. In this work, we focus on synthetic
complexity and reformalize its definition to correlate with the expected number
of reaction steps required to produce a target molecule, with implicit knowledge
about what compounds are reasonable starting materials. We train a neural
network model on 12 million reactions from the Reaxys database to impose a
pairwise inequality constraint enforcing the premise of this definition: that on
average, the products of published chemical reactions should be more
synthetically complex than their corresponding reactants. The learned metric
(SCScore) exhibits highly desirable nonlinear behavior, particularly in
recognizing increases in synthetic complexity throughout a number of linear
synthetic routes.
Our model here actually uses hingeloss instead of the shifted relu loss in
https://github.com/connorcoley/scscore.
This could cause issues differentiation issues with compounds that are "close"
to each other in "complexity"
"""
def __init__(self,
n_features,
layer_sizes=[300, 300, 300],
dropouts=0.0,
**kwargs):
"""
Parameters
----------
n_features: int
number of features per molecule
layer_sizes: list of int
size of each hidden layer
dropouts: int
droupout to apply to each hidden layer
kwargs
This takes all kwards as TensorGraph
"""
self.n_features = n_features
self.layer_sizes = layer_sizes
self.dropout = dropouts
m1_features = Input(shape=(self.n_features,))
m2_features = Input(shape=(self.n_features,))
prev_layer1 = m1_features
prev_layer2 = m2_features
for layer_size in self.layer_sizes:
layer = Dense(layer_size, activation=tf.nn.relu)
prev_layer1 = layer(prev_layer1)
prev_layer2 = layer(prev_layer2)
if self.dropout > 0.0:
prev_layer1 = Dropout(rate=self.dropout)(prev_layer1)
prev_layer2 = Dropout(rate=self.dropout)(prev_layer2)
readout_layer = Dense(1)
readout_m1 = readout_layer(prev_layer1)
readout_m2 = readout_layer(prev_layer2)
outputs = [
Lambda(lambda x: tf.sigmoid(x) * 4 + 1)(readout_m1),
Lambda(lambda x: tf.sigmoid(x) * 4 + 1)(readout_m2),
Lambda(lambda x: x[0] - x[1])([readout_m1, readout_m2])
]
output_types = ['prediction', 'prediction', 'loss']
model = tf.keras.Model(inputs=[m1_features, m2_features],
outputs=outputs)
super(ScScoreModel, self).__init__(model,
HingeLoss(),
output_types=output_types,
**kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
yield ([X_b[:, 0], X_b[:, 1]], [y_b], [w_b])
def predict_mols(self, mols):
featurizer = CircularFingerprint(size=self.n_features,
radius=2,
chiral=True)
features = np.expand_dims(featurizer.featurize(mols), axis=1)
features = np.concatenate([features, features], axis=1)
ds = NumpyDataset(features, None, None, None)
return self.predict(ds)[0][:, 0]
<file_sep>Splitters
=========
DeepChem :code:`dc.splits.Splitter` objects are a tool to meaningfully
split DeepChem datasets for machine learning testing. The core idea is
that when evaluating a machine learning model, it's useful to creating
training, validation and test splits of your source data. The training
split is used to train models, the validation is used to benchmark
different model architectures. The test is ideally held out till the
very end when it's used to gauge a final estimate of the model's
performance.
The :code:`dc.splits` module contains a collection of scientifically
aware splitters. In many cases, we want to evaluate scientific deep
learning models more rigorously than standard deep models since we're
looking for the ability to generalize to new domains. Some of the
implemented splitters here may help.
.. contents:: Contents
:local:
General Splitters
-----------------
RandomSplitter
^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.RandomSplitter
:members:
:inherited-members:
:exclude-members: __init__
RandomGroupSplitter
^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.RandomGroupSplitter
:members:
:inherited-members:
RandomStratifiedSplitter
^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.RandomStratifiedSplitter
:members:
:inherited-members:
:exclude-members: __init__
SingletaskStratifiedSplitter
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.SingletaskStratifiedSplitter
:members:
:inherited-members:
IndexSplitter
^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.IndexSplitter
:members:
:inherited-members:
:exclude-members: __init__
SpecifiedSplitter
^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.SpecifiedSplitter
:members:
:inherited-members:
TaskSplitter
^^^^^^^^^^^^
.. autoclass:: deepchem.splits.TaskSplitter
:members:
:inherited-members:
Molecule Splitters
------------------
ScaffoldSplitter
^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.ScaffoldSplitter
:members:
:inherited-members:
:exclude-members: __init__
MolecularWeightSplitter
^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.MolecularWeightSplitter
:members:
:inherited-members:
:exclude-members: __init__
MaxMinSplitter
^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.MaxMinSplitter
:members:
:inherited-members:
:exclude-members: __init__
ButinaSplitter
^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.ButinaSplitter
:members:
:inherited-members:
FingerprintSplitter
^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.splits.FingerprintSplitter
:members:
:inherited-members:
:exclude-members: __init__
Base Splitter (for develop)
----------------------------
The :code:`dc.splits.Splitter` class is the abstract parent class for
all splitters. This class should never be directly instantiated.
.. autoclass:: deepchem.splits.Splitter
:members:
<file_sep># -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from collections.abc import Sequence as SequenceCollection
from typing import Optional, Callable, Dict, List, Tuple
from tensorflow.keras import activations, initializers, backend
from tensorflow.keras.layers import Dropout, BatchNormalization, Dense, Activation
class InteratomicL2Distances(tf.keras.layers.Layer):
"""Compute (squared) L2 Distances between atoms given neighbors.
This class computes pairwise distances between its inputs.
Examples
--------
>>> import numpy as np
>>> import deepchem as dc
>>> atoms = 5
>>> neighbors = 2
>>> coords = np.random.rand(atoms, 3)
>>> neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))
>>> layer = InteratomicL2Distances(atoms, neighbors, 3)
>>> result = np.array(layer([coords, neighbor_list]))
>>> result.shape
(5, 2)
"""
def __init__(self, N_atoms: int, M_nbrs: int, ndim: int, **kwargs):
"""Constructor for this layer.
Parameters
----------
N_atoms: int
Number of atoms in the system total.
M_nbrs: int
Number of neighbors to consider when computing distances.
n_dim: int
Number of descriptors for each atom.
"""
super(InteratomicL2Distances, self).__init__(**kwargs)
self.N_atoms = N_atoms
self.M_nbrs = M_nbrs
self.ndim = ndim
def get_config(self) -> Dict:
"""Returns config dictionary for this layer."""
config = super(InteratomicL2Distances, self).get_config()
config['N_atoms'] = self.N_atoms
config['M_nbrs'] = self.M_nbrs
config['ndim'] = self.ndim
return config
def call(self, inputs: List):
"""Invokes this layer.
Parameters
----------
inputs: list
Should be of form `inputs=[coords, nbr_list]` where `coords` is a
tensor of shape `(None, N, 3)` and `nbr_list` is a list.
Returns
-------
Tensor of shape `(N_atoms, M_nbrs)` with interatomic distances.
"""
if len(inputs) != 2:
raise ValueError("InteratomicDistances requires coords,nbr_list")
coords, nbr_list = (inputs[0], inputs[1])
N_atoms, M_nbrs, ndim = self.N_atoms, self.M_nbrs, self.ndim
# Shape (N_atoms, M_nbrs, ndim)
nbr_coords = tf.gather(coords, nbr_list)
# Shape (N_atoms, M_nbrs, ndim)
tiled_coords = tf.tile(tf.reshape(coords, (N_atoms, 1, ndim)),
(1, M_nbrs, 1))
# Shape (N_atoms, M_nbrs)
return tf.reduce_sum((tiled_coords - nbr_coords)**2, axis=2)
class GraphConv(tf.keras.layers.Layer):
"""Graph Convolutional Layers
This layer implements the graph convolution introduced in [1]_. The graph
convolution combines per-node feature vectures in a nonlinear fashion with
the feature vectors for neighboring nodes. This "blends" information in
local neighborhoods of a graph.
References
----------
.. [1] Duvenaud, <NAME>., et al. "Convolutional networks on graphs for learning molecular fingerprints."
Advances in neural information processing systems. 2015. https://arxiv.org/abs/1509.09292
"""
def __init__(self,
out_channel: int,
min_deg: int = 0,
max_deg: int = 10,
activation_fn: Optional[Callable] = None,
**kwargs):
"""Initialize a graph convolutional layer.
Parameters
----------
out_channel: int
The number of output channels per graph node.
min_deg: int, optional (default 0)
The minimum allowed degree for each graph node.
max_deg: int, optional (default 10)
The maximum allowed degree for each graph node. Note that this
is set to 10 to handle complex molecules (some organometallic
compounds have strange structures). If you're using this for
non-molecular applications, you may need to set this much higher
depending on your dataset.
activation_fn: function
A nonlinear activation function to apply. If you're not sure,
`tf.nn.relu` is probably a good default for your application.
"""
super(GraphConv, self).__init__(**kwargs)
self.out_channel = out_channel
self.min_degree = min_deg
self.max_degree = max_deg
self.activation_fn = activation_fn
def build(self, input_shape):
# Generate the nb_affine weights and biases
num_deg = 2 * self.max_degree + (1 - self.min_degree)
self.W_list = [
self.add_weight(name='kernel' + str(k),
shape=(int(input_shape[0][-1]), self.out_channel),
initializer='glorot_uniform',
trainable=True) for k in range(num_deg)
]
self.b_list = [
self.add_weight(name='bias' + str(k),
shape=(self.out_channel,),
initializer='zeros',
trainable=True) for k in range(num_deg)
]
self.built = True
def get_config(self):
config = super(GraphConv, self).get_config()
config['out_channel'] = self.out_channel
config['min_deg'] = self.min_degree
config['max_deg'] = self.max_degree
config['activation_fn'] = self.activation_fn
return config
def call(self, inputs):
# Extract atom_features
atom_features = inputs[0]
# Extract graph topology
deg_slice = inputs[1]
deg_adj_lists = inputs[3:]
W = iter(self.W_list)
b = iter(self.b_list)
# Sum all neighbors using adjacency matrix
deg_summed = self.sum_neigh(atom_features, deg_adj_lists)
# Get collection of modified atom features
new_rel_atoms_collection = (self.max_degree + 1 -
self.min_degree) * [None]
split_features = tf.split(atom_features, deg_slice[:, 1])
for deg in range(1, self.max_degree + 1):
# Obtain relevant atoms for this degree
rel_atoms = deg_summed[deg - 1]
# Get self atoms
self_atoms = split_features[deg - self.min_degree]
# Apply hidden affine to relevant atoms and append
rel_out = tf.matmul(rel_atoms, next(W)) + next(b)
self_out = tf.matmul(self_atoms, next(W)) + next(b)
out = rel_out + self_out
new_rel_atoms_collection[deg - self.min_degree] = out
# Determine the min_deg=0 case
if self.min_degree == 0:
self_atoms = split_features[0]
# Only use the self layer
out = tf.matmul(self_atoms, next(W)) + next(b)
new_rel_atoms_collection[0] = out
# Combine all atoms back into the list
atom_features = tf.concat(axis=0, values=new_rel_atoms_collection)
if self.activation_fn is not None:
atom_features = self.activation_fn(atom_features)
return atom_features
def sum_neigh(self, atoms, deg_adj_lists):
"""Store the summed atoms by degree"""
deg_summed = self.max_degree * [None]
# Tensorflow correctly processes empty lists when using concat
for deg in range(1, self.max_degree + 1):
gathered_atoms = tf.gather(atoms, deg_adj_lists[deg - 1])
# Sum along neighbors as well as self, and store
summed_atoms = tf.reduce_sum(gathered_atoms, 1)
deg_summed[deg - 1] = summed_atoms
return deg_summed
class GraphPool(tf.keras.layers.Layer):
"""A GraphPool gathers data from local neighborhoods of a graph.
This layer does a max-pooling over the feature vectors of atoms in a
neighborhood. You can think of this layer as analogous to a max-pooling
layer for 2D convolutions but which operates on graphs instead. This
technique is described in [1]_.
References
----------
.. [1] Duvenaud, <NAME>., et al. "Convolutional networks on graphs for
learning molecular fingerprints." Advances in neural information processing
systems. 2015. https://arxiv.org/abs/1509.09292
"""
def __init__(self, min_degree=0, max_degree=10, **kwargs):
"""Initialize this layer
Parameters
----------
min_deg: int, optional (default 0)
The minimum allowed degree for each graph node.
max_deg: int, optional (default 10)
The maximum allowed degree for each graph node. Note that this
is set to 10 to handle complex molecules (some organometallic
compounds have strange structures). If you're using this for
non-molecular applications, you may need to set this much higher
depending on your dataset.
"""
super(GraphPool, self).__init__(**kwargs)
self.min_degree = min_degree
self.max_degree = max_degree
def get_config(self):
config = super(GraphPool, self).get_config()
config['min_degree'] = self.min_degree
config['max_degree'] = self.max_degree
return config
def call(self, inputs):
atom_features = inputs[0]
deg_slice = inputs[1]
deg_adj_lists = inputs[3:]
# Perform the mol gather
# atom_features = graph_pool(atom_features, deg_adj_lists, deg_slice,
# self.max_degree, self.min_degree)
deg_maxed = (self.max_degree + 1 - self.min_degree) * [None]
# Tensorflow correctly processes empty lists when using concat
split_features = tf.split(atom_features, deg_slice[:, 1])
for deg in range(1, self.max_degree + 1):
# Get self atoms
self_atoms = split_features[deg - self.min_degree]
if deg_adj_lists[deg - 1].shape[0] == 0:
# There are no neighbors of this degree, so just create an empty tensor directly.
maxed_atoms = tf.zeros((0, self_atoms.shape[-1]))
else:
# Expand dims
self_atoms = tf.expand_dims(self_atoms, 1)
# always deg-1 for deg_adj_lists
gathered_atoms = tf.gather(atom_features,
deg_adj_lists[deg - 1])
gathered_atoms = tf.concat(axis=1,
values=[self_atoms, gathered_atoms])
maxed_atoms = tf.reduce_max(gathered_atoms, 1)
deg_maxed[deg - self.min_degree] = maxed_atoms
if self.min_degree == 0:
self_atoms = split_features[0]
deg_maxed[0] = self_atoms
return tf.concat(axis=0, values=deg_maxed)
class GraphGather(tf.keras.layers.Layer):
"""A GraphGather layer pools node-level feature vectors to create a graph feature vector.
Many graph convolutional networks manipulate feature vectors per
graph-node. For a molecule for example, each node might represent an
atom, and the network would manipulate atomic feature vectors that
summarize the local chemistry of the atom. However, at the end of
the application, we will likely want to work with a molecule level
feature representation. The `GraphGather` layer creates a graph level
feature vector by combining all the node-level feature vectors.
One subtlety about this layer is that it depends on the
`batch_size`. This is done for internal implementation reasons. The
`GraphConv`, and `GraphPool` layers pool all nodes from all graphs
in a batch that's being processed. The `GraphGather` reassembles
these jumbled node feature vectors into per-graph feature vectors.
References
----------
.. [1] Duvenaud, <NAME>., et al. "Convolutional networks on graphs for
learning molecular fingerprints." Advances in neural information processing
systems. 2015. https://arxiv.org/abs/1509.09292
"""
def __init__(self, batch_size, activation_fn=None, **kwargs):
"""Initialize this layer.
Parameters
---------
batch_size: int
The batch size for this layer. Note that the layer's behavior
changes depending on the batch size.
activation_fn: function
A nonlinear activation function to apply. If you're not sure,
`tf.nn.relu` is probably a good default for your application.
"""
super(GraphGather, self).__init__(**kwargs)
self.batch_size = batch_size
self.activation_fn = activation_fn
def get_config(self):
config = super(GraphGather, self).get_config()
config['batch_size'] = self.batch_size
config['activation_fn'] = self.activation_fn
return config
def call(self, inputs):
"""Invoking this layer.
Parameters
----------
inputs: list
This list should consist of `inputs = [atom_features, deg_slice,
membership, deg_adj_list placeholders...]`. These are all
tensors that are created/process by `GraphConv` and `GraphPool`
"""
atom_features = inputs[0]
# Extract graph topology
membership = inputs[2]
assert self.batch_size > 1, "graph_gather requires batches larger than 1"
sparse_reps = tf.math.unsorted_segment_sum(atom_features, membership,
self.batch_size)
max_reps = tf.math.unsorted_segment_max(atom_features, membership,
self.batch_size)
mol_features = tf.concat(axis=1, values=[sparse_reps, max_reps])
if self.activation_fn is not None:
mol_features = self.activation_fn(mol_features)
return mol_features
class MolGANConvolutionLayer(tf.keras.layers.Layer):
"""
Graph convolution layer used in MolGAN model.
MolGAN is a WGAN type model for generation of small molecules.
Not used directly, higher level layers like MolGANMultiConvolutionLayer use it.
This layer performs basic convolution on one-hot encoded matrices containing
atom and bond information. This layer also accepts three inputs for the case
when convolution is performed more than once and results of previous convolution
need to used. It was done in such a way to avoid creating another layer that
accepts three inputs rather than two. The last input layer is so-called
hidden_layer and it hold results of the convolution while first two are unchanged
input tensors.
Example
-------
See: MolGANMultiConvolutionLayer for using in layers.
>>> from tensorflow.keras import Model
>>> from tensorflow.keras.layers import Input
>>> vertices = 9
>>> nodes = 5
>>> edges = 5
>>> units = 128
>>> layer1 = MolGANConvolutionLayer(units=units,edges=edges, name='layer1')
>>> layer2 = MolGANConvolutionLayer(units=units,edges=edges, name='layer2')
>>> adjacency_tensor= Input(shape=(vertices, vertices, edges))
>>> node_tensor = Input(shape=(vertices,nodes))
>>> hidden1 = layer1([adjacency_tensor,node_tensor])
>>> output = layer2(hidden1)
>>> model = Model(inputs=[adjacency_tensor,node_tensor], outputs=[output])
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
units: int,
activation: Callable = activations.tanh,
dropout_rate: float = 0.0,
edges: int = 5,
name: str = "",
**kwargs):
"""
Initialize this layer.
Parameters
---------
units: int
Dimesion of dense layers used for convolution
activation: function, optional (default=Tanh)
activation function used across model, default is Tanh
dropout_rate: float, optional (default=0.0)
Dropout rate used by dropout layer
edges: int, optional (default=5)
How many dense layers to use in convolution.
Typically equal to number of bond types used in the model.
name: string, optional (default="")
Name of the layer
"""
super(MolGANConvolutionLayer, self).__init__(name=name, **kwargs)
self.activation = activation
self.dropout_rate = dropout_rate
self.units = units
self.edges = edges
self.dense1 = [Dense(units=self.units) for _ in range(edges - 1)]
self.dense2 = Dense(units=self.units)
self.dropout = Dropout(self.dropout_rate)
self.activation_layer = Activation(self.activation)
def call(self, inputs, training=False):
"""
Invoke this layer
Parameters
----------
inputs: list
List of two input matrices, adjacency tensor and node features tensors
in one-hot encoding format.
training: bool
Should this layer be run in training mode.
Typically decided by main model, influences things like dropout.
Returns
--------
tuple(tf.Tensor,tf.Tensor,tf.Tensor)
First and second are original input tensors
Third is the result of convolution
"""
ic = len(inputs)
assert ic > 1, "MolGANConvolutionLayer requires at least two inputs: [adjacency_tensor, node_features_tensor]"
adjacency_tensor = inputs[0]
node_tensor = inputs[1]
# means that this is second loop of convolution
if ic > 2:
hidden_tensor = inputs[2]
annotations = tf.concat((hidden_tensor, node_tensor), -1)
else:
annotations = node_tensor
output = tf.stack([dense(annotations) for dense in self.dense1], 1)
adj = tf.transpose(adjacency_tensor[:, :, :, 1:], (0, 3, 1, 2))
output = tf.matmul(adj, output)
output = tf.reduce_sum(output, 1) + self.dense2(node_tensor)
output = self.activation_layer(output)
output = self.dropout(output)
return adjacency_tensor, node_tensor, output
def get_config(self) -> Dict:
"""
Returns config dictionary for this layer.
"""
config = super(MolGANConvolutionLayer, self).get_config()
config["activation"] = self.activation
config["dropout_rate"] = self.dropout_rate
config["units"] = self.units
config["edges"] = self.edges
return config
class MolGANAggregationLayer(tf.keras.layers.Layer):
"""
Graph Aggregation layer used in MolGAN model.
MolGAN is a WGAN type model for generation of small molecules.
Performs aggregation on tensor resulting from convolution layers.
Given its simple nature it might be removed in future and moved to
MolGANEncoderLayer.
Example
-------
>>> from tensorflow.keras import Model
>>> from tensorflow.keras.layers import Input
>>> vertices = 9
>>> nodes = 5
>>> edges = 5
>>> units = 128
>>> layer_1 = MolGANConvolutionLayer(units=units,edges=edges, name='layer1')
>>> layer_2 = MolGANConvolutionLayer(units=units,edges=edges, name='layer2')
>>> layer_3 = MolGANAggregationLayer(units=128, name='layer3')
>>> adjacency_tensor= Input(shape=(vertices, vertices, edges))
>>> node_tensor = Input(shape=(vertices,nodes))
>>> hidden_1 = layer_1([adjacency_tensor,node_tensor])
>>> hidden_2 = layer_2(hidden_1)
>>> output = layer_3(hidden_2[2])
>>> model = Model(inputs=[adjacency_tensor,node_tensor], outputs=[output])
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
units: int = 128,
activation: Callable = activations.tanh,
dropout_rate: float = 0.0,
name: str = "",
**kwargs):
"""
Initialize the layer
Parameters
---------
units: int, optional (default=128)
Dimesion of dense layers used for aggregation
activation: function, optional (default=Tanh)
activation function used across model, default is Tanh
dropout_rate: float, optional (default=0.0)
Used by dropout layer
name: string, optional (default="")
Name of the layer
"""
super(MolGANAggregationLayer, self).__init__(name=name, **kwargs)
self.units = units
self.activation = activation
self.dropout_rate = dropout_rate
self.d1 = Dense(units=units, activation="sigmoid")
self.d2 = Dense(units=units, activation=activation)
self.dropout_layer = Dropout(dropout_rate)
self.activation_layer = Activation(activation)
def call(self, inputs, training=False):
"""
Invoke this layer
Parameters
----------
inputs: List
Single tensor resulting from graph convolution layer
training: bool
Should this layer be run in training mode.
Typically decided by main model, influences things like dropout.
Returns
--------
aggregation tensor: tf.Tensor
Result of aggregation function on input convolution tensor.
"""
i = self.d1(inputs)
j = self.d2(inputs)
output = tf.reduce_sum(i * j, 1)
output = self.activation_layer(output)
output = self.dropout_layer(output)
return output
def get_config(self) -> Dict:
"""
Returns config dictionary for this layer.
"""
config = super(MolGANAggregationLayer, self).get_config()
config["units"] = self.units
config["activation"] = self.activation
config["dropout_rate"] = self.dropout_rate
config["edges"] = self.edges
return config
class MolGANMultiConvolutionLayer(tf.keras.layers.Layer):
"""
Multiple pass convolution layer used in MolGAN model.
MolGAN is a WGAN type model for generation of small molecules.
It takes outputs of previous convolution layer and uses
them as inputs for the next one.
It simplifies the overall framework, but might be moved to
MolGANEncoderLayer in the future in order to reduce number of layers.
Example
--------
>>> from tensorflow.keras import Model
>>> from tensorflow.keras.layers import Input
>>> vertices = 9
>>> nodes = 5
>>> edges = 5
>>> units = 128
>>> layer_1 = MolGANMultiConvolutionLayer(units=(128,64), name='layer1')
>>> layer_2 = MolGANAggregationLayer(units=128, name='layer2')
>>> adjacency_tensor= Input(shape=(vertices, vertices, edges))
>>> node_tensor = Input(shape=(vertices,nodes))
>>> hidden = layer_1([adjacency_tensor,node_tensor])
>>> output = layer_2(hidden)
>>> model = Model(inputs=[adjacency_tensor,node_tensor], outputs=[output])
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
units: Tuple = (128, 64),
activation: Callable = activations.tanh,
dropout_rate: float = 0.0,
edges: int = 5,
name: str = "",
**kwargs):
"""
Initialize the layer
Parameters
---------
units: Tuple, optional (default=(128,64)), min_length=2
List of dimensions used by consecutive convolution layers.
The more values the more convolution layers invoked.
activation: function, optional (default=tanh)
activation function used across model, default is Tanh
dropout_rate: float, optional (default=0.0)
Used by dropout layer
edges: int, optional (default=0)
Controls how many dense layers use for single convolution unit.
Typically matches number of bond types used in the molecule.
name: string, optional (default="")
Name of the layer
"""
super(MolGANMultiConvolutionLayer, self).__init__(name=name, **kwargs)
assert len(units) > 1, "Layer requires at least two values"
self.units = units
self.activation = activation
self.dropout_rate = dropout_rate
self.edges = edges
self.first_convolution = MolGANConvolutionLayer(self.units[0],
self.activation,
self.dropout_rate,
self.edges)
self.gcl = [
MolGANConvolutionLayer(u, self.activation, self.dropout_rate,
self.edges) for u in self.units[1:]
]
def call(self, inputs, training=False):
"""
Invoke this layer
Parameters
----------
inputs: list
List of two input matrices, adjacency tensor and node features tensors
in one-hot encoding format.
training: bool
Should this layer be run in training mode.
Typically decided by main model, influences things like dropout.
Returns
--------
convolution tensor: tf.Tensor
Result of input tensors going through convolution a number of times.
"""
adjacency_tensor = inputs[0]
node_tensor = inputs[1]
tensors = self.first_convolution([adjacency_tensor, node_tensor])
for layer in self.gcl:
tensors = layer(tensors)
_, _, hidden_tensor = tensors
return hidden_tensor
def get_config(self) -> Dict:
"""
Returns config dictionary for this layer.
"""
config = super(MolGANMultiConvolutionLayer, self).get_config()
config["units"] = self.units
config["activation"] = self.activation
config["dropout_rate"] = self.dropout_rate
config["edges"] = self.edges
return config
class MolGANEncoderLayer(tf.keras.layers.Layer):
"""
Main learning layer used by MolGAN model.
MolGAN is a WGAN type model for generation of small molecules.
It role is to further simplify model.
This layer can be manually built by stacking graph convolution layers
followed by graph aggregation.
Example
--------
>>> from tensorflow.keras import Model
>>> from tensorflow.keras.layers import Input, Dropout,Dense
>>> vertices = 9
>>> edges = 5
>>> nodes = 5
>>> dropout_rate = .0
>>> adjacency_tensor= Input(shape=(vertices, vertices, edges))
>>> node_tensor = Input(shape=(vertices, nodes))
>>> graph = MolGANEncoderLayer(units = [(128,64),128], dropout_rate= dropout_rate, edges=edges)([adjacency_tensor,node_tensor])
>>> dense = Dense(units=128, activation='tanh')(graph)
>>> dense = Dropout(dropout_rate)(dense)
>>> dense = Dense(units=64, activation='tanh')(dense)
>>> dense = Dropout(dropout_rate)(dense)
>>> output = Dense(units=1)(dense)
>>> model = Model(inputs=[adjacency_tensor,node_tensor], outputs=[output])
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
units: List = [(128, 64), 128],
activation: Callable = activations.tanh,
dropout_rate: float = 0.0,
edges: int = 5,
name: str = "",
**kwargs):
"""
Initialize the layer.
Parameters
---------
units: List, optional (default=[(128, 64), 128])
List of units for MolGANMultiConvolutionLayer and GraphAggregationLayer
i.e. [(128,64),128] means two convolution layers dims = [128,64]
followed by aggregation layer dims=128
activation: function, optional (default=Tanh)
activation function used across model, default is Tanh
dropout_rate: float, optional (default=0.0)
Used by dropout layer
edges: int, optional (default=0)
Controls how many dense layers use for single convolution unit.
Typically matches number of bond types used in the molecule.
name: string, optional (default="")
Name of the layer
"""
super(MolGANEncoderLayer, self).__init__(name=name, **kwargs)
assert len(units) == 2
self.graph_convolution_units, self.auxiliary_units = units
self.activation = activation
self.dropout_rate = dropout_rate
self.edges = edges
self.multi_graph_convolution_layer = MolGANMultiConvolutionLayer(
self.graph_convolution_units, self.activation, self.dropout_rate,
self.edges)
self.graph_aggregation_layer = MolGANAggregationLayer(
self.auxiliary_units, self.activation, self.dropout_rate)
def call(self, inputs, training=False):
"""
Invoke this layer
Parameters
----------
inputs: list
List of two input matrices, adjacency tensor and node features tensors
in one-hot encoding format.
training: bool
Should this layer be run in training mode.
Typically decided by main model, influences things like dropout.
Returns
--------
encoder tensor: tf.Tensor
Tensor that been through number of convolutions followed
by aggregation.
"""
output = self.multi_graph_convolution_layer(inputs)
node_tensor = inputs[1]
if len(inputs) > 2:
hidden_tensor = inputs[2]
annotations = tf.concat((output, hidden_tensor, node_tensor), -1)
else:
_, node_tensor = inputs
annotations = tf.concat((output, node_tensor), -1)
output = self.graph_aggregation_layer(annotations)
return output
def get_config(self) -> Dict:
"""
Returns config dictionary for this layer.
"""
config = super(MolGANEncoderLayer, self).get_config()
config["graph_convolution_units"] = self.graph_convolution_units
config["auxiliary_units"] = self.auxiliary_units
config["activation"] = self.activation
config["dropout_rate"] = self.dropout_rate
config["edges"] = self.edges
return config
class LSTMStep(tf.keras.layers.Layer):
"""Layer that performs a single step LSTM update.
This layer performs a single step LSTM update. Note that it is *not*
a full LSTM recurrent network. The LSTMStep layer is useful as a
primitive for designing layers such as the AttnLSTMEmbedding or the
IterRefLSTMEmbedding below.
"""
def __init__(self,
output_dim,
input_dim,
init_fn='glorot_uniform',
inner_init_fn='orthogonal',
activation_fn='tanh',
inner_activation_fn='hard_sigmoid',
**kwargs):
"""
Parameters
----------
output_dim: int
Dimensionality of output vectors.
input_dim: int
Dimensionality of input vectors.
init_fn: str
TensorFlow nitialization to use for W.
inner_init_fn: str
TensorFlow initialization to use for U.
activation_fn: str
TensorFlow activation to use for output.
inner_activation_fn: str
TensorFlow activation to use for inner steps.
"""
super(LSTMStep, self).__init__(**kwargs)
self.init = init_fn
self.inner_init = inner_init_fn
self.output_dim = output_dim
# No other forget biases supported right now.
self.activation = activation_fn
self.inner_activation = inner_activation_fn
self.activation_fn = activations.get(activation_fn)
self.inner_activation_fn = activations.get(inner_activation_fn)
self.input_dim = input_dim
def get_config(self):
config = super(LSTMStep, self).get_config()
config['output_dim'] = self.output_dim
config['input_dim'] = self.input_dim
config['init_fn'] = self.init
config['inner_init_fn'] = self.inner_init
config['activation_fn'] = self.activation
config['inner_activation_fn'] = self.inner_activation
return config
def get_initial_states(self, input_shape):
return [backend.zeros(input_shape), backend.zeros(input_shape)]
def build(self, input_shape):
"""Constructs learnable weights for this layer."""
init = initializers.get(self.init)
inner_init = initializers.get(self.inner_init)
self.W = init((self.input_dim, 4 * self.output_dim))
self.U = inner_init((self.output_dim, 4 * self.output_dim))
self.b = tf.Variable(np.hstack(
(np.zeros(self.output_dim), np.ones(self.output_dim),
np.zeros(self.output_dim), np.zeros(self.output_dim))),
dtype=tf.float32)
self.built = True
def call(self, inputs):
"""Execute this layer on input tensors.
Parameters
----------
inputs: list
List of three tensors (x, h_tm1, c_tm1). h_tm1 means "h, t-1".
Returns
-------
list
Returns h, [h, c]
"""
x, h_tm1, c_tm1 = inputs
# Taken from Keras code [citation needed]
z = backend.dot(x, self.W) + backend.dot(h_tm1, self.U) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim:2 * self.output_dim]
z2 = z[:, 2 * self.output_dim:3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
i = self.inner_activation_fn(z0)
f = self.inner_activation_fn(z1)
c = f * c_tm1 + i * self.activation_fn(z2)
o = self.inner_activation_fn(z3)
h = o * self.activation_fn(c)
return h, [h, c]
def cosine_dist(x, y):
"""Computes the inner product (cosine similarity) between two tensors.
This assumes that the two input tensors contain rows of vectors where
each column represents a different feature. The output tensor will have
elements that represent the inner product between pairs of normalized vectors
in the rows of `x` and `y`. The two tensors need to have the same number of
columns, because one cannot take the dot product between vectors of different
lengths. For example, in sentence similarity and sentence classification tasks,
the number of columns is the embedding size. In these tasks, the rows of the
input tensors would be different test vectors or sentences. The input tensors
themselves could be different batches. Using vectors or tensors of all 0s
should be avoided.
Methods
-------
The vectors in the input tensors are first l2-normalized such that each vector
has length or magnitude of 1. The inner product (dot product) is then taken
between corresponding pairs of row vectors in the input tensors and returned.
Examples
--------
The cosine similarity between two equivalent vectors will be 1. The cosine
similarity between two equivalent tensors (tensors where all the elements are
the same) will be a tensor of 1s. In this scenario, if the input tensors `x` and
`y` are each of shape `(n,p)`, where each element in `x` and `y` is the same, then
the output tensor would be a tensor of shape `(n,n)` with 1 in every entry.
>>> import numpy as np
>>> import tensorflow as tf
>>> import deepchem.models.layers as layers
>>> x = tf.ones((6, 4), dtype=tf.dtypes.float32, name=None)
>>> y_same = tf.ones((6, 4), dtype=tf.dtypes.float32, name=None)
>>> cos_sim_same = layers.cosine_dist(x,y_same)
`x` and `y_same` are the same tensor (equivalent at every element, in this
case 1). As such, the pairwise inner product of the rows in `x` and `y` will
always be 1. The output tensor will be of shape (6,6).
>>> diff = cos_sim_same - tf.ones((6, 6), dtype=tf.dtypes.float32, name=None)
>>> np.allclose(0.0, tf.reduce_sum(diff).numpy(), atol=1e-05)
True
>>> cos_sim_same.shape
TensorShape([6, 6])
The cosine similarity between two orthogonal vectors will be 0 (by definition).
If every row in `x` is orthogonal to every row in `y`, then the output will be a
tensor of 0s. In the following example, each row in the tensor `x1` is orthogonal
to each row in `x2` because they are halves of an identity matrix.
>>> identity_tensor = tf.eye(512, dtype=tf.dtypes.float32)
>>> x1 = identity_tensor[0:256,:]
>>> x2 = identity_tensor[256:512,:]
>>> cos_sim_orth = layers.cosine_dist(x1,x2)
Each row in `x1` is orthogonal to each row in `x2`. As such, the pairwise inner
product of the rows in `x1`and `x2` will always be 0. Furthermore, because the
shape of the input tensors are both of shape `(256,512)`, the output tensor will
be of shape `(256,256)`.
>>> np.allclose(0.0, tf.reduce_sum(cos_sim_orth).numpy(), atol=1e-05)
True
>>> cos_sim_orth.shape
TensorShape([256, 256])
Parameters
----------
x: tf.Tensor
Input Tensor of shape `(n, p)`.
The shape of this input tensor should be `n` rows by `p` columns.
Note that `n` need not equal `m` (the number of rows in `y`).
y: tf.Tensor
Input Tensor of shape `(m, p)`
The shape of this input tensor should be `m` rows by `p` columns.
Note that `m` need not equal `n` (the number of rows in `x`).
Returns
-------
tf.Tensor
Returns a tensor of shape `(n, m)`, that is, `n` rows by `m` columns.
Each `i,j`-th entry of this output tensor is the inner product between
the l2-normalized `i`-th row of the input tensor `x` and the
the l2-normalized `j`-th row of the output tensor `y`.
"""
x_norm = tf.math.l2_normalize(x, axis=1)
y_norm = tf.math.l2_normalize(y, axis=1)
return backend.dot(x_norm, tf.transpose(y_norm))
class AttnLSTMEmbedding(tf.keras.layers.Layer):
"""Implements AttnLSTM as in matching networks paper.
The AttnLSTM embedding adjusts two sets of vectors, the "test" and
"support" sets. The "support" consists of a set of evidence vectors.
Think of these as the small training set for low-data machine
learning. The "test" consists of the queries we wish to answer with
the small amounts of available data. The AttnLSTMEmbdding allows us to
modify the embedding of the "test" set depending on the contents of
the "support". The AttnLSTMEmbedding is thus a type of learnable
metric that allows a network to modify its internal notion of
distance.
See references [1]_ [2]_ for more details.
References
----------
.. [1] Vinyals, Oriol, et al. "Matching networks for one shot learning."
Advances in neural information processing systems. 2016.
.. [2] Vinyals, Oriol, <NAME>, and <NAME>. "Order matters:
Sequence to sequence for sets." arXiv preprint arXiv:1511.06391 (2015).
"""
def __init__(self, n_test, n_support, n_feat, max_depth, **kwargs):
"""
Parameters
----------
n_support: int
Size of support set.
n_test: int
Size of test set.
n_feat: int
Number of features per atom
max_depth: int
Number of "processing steps" used by sequence-to-sequence for sets model.
"""
super(AttnLSTMEmbedding, self).__init__(**kwargs)
self.max_depth = max_depth
self.n_test = n_test
self.n_support = n_support
self.n_feat = n_feat
def get_config(self):
config = super(AttnLSTMEmbedding, self).get_config()
config['n_test'] = self.n_test
config['n_support'] = self.n_support
config['n_feat'] = self.n_feat
config['max_depth'] = self.max_depth
return config
def build(self, input_shape):
n_feat = self.n_feat
self.lstm = LSTMStep(n_feat, 2 * n_feat)
self.q_init = backend.zeros([self.n_test, n_feat])
self.states_init = self.lstm.get_initial_states([self.n_test, n_feat])
self.built = True
def call(self, inputs):
"""Execute this layer on input tensors.
Parameters
----------
inputs: list
List of two tensors (X, Xp). X should be of shape (n_test,
n_feat) and Xp should be of shape (n_support, n_feat) where
n_test is the size of the test set, n_support that of the support
set, and n_feat is the number of per-atom features.
Returns
-------
list
Returns two tensors of same shape as input. Namely the output
shape will be [(n_test, n_feat), (n_support, n_feat)]
"""
if len(inputs) != 2:
raise ValueError(
"AttnLSTMEmbedding layer must have exactly two parents")
# x is test set, xp is support set.
x, xp = inputs
# Get initializations
q = self.q_init
states = self.states_init
for d in range(self.max_depth):
# Process using attention
# Eqn (4), appendix A.1 of Matching Networks paper
e = cosine_dist(x + q, xp)
a = tf.nn.softmax(e)
r = backend.dot(a, xp)
# Generate new attention states
y = backend.concatenate([q, r], axis=1)
q, states = self.lstm([y] + states)
return [x + q, xp]
class IterRefLSTMEmbedding(tf.keras.layers.Layer):
"""Implements the Iterative Refinement LSTM.
Much like AttnLSTMEmbedding, the IterRefLSTMEmbedding is another type
of learnable metric which adjusts "test" and "support." Recall that
"support" is the small amount of data available in a low data machine
learning problem, and that "test" is the query. The AttnLSTMEmbedding
only modifies the "test" based on the contents of the support.
However, the IterRefLSTM modifies both the "support" and "test" based
on each other. This allows the learnable metric to be more malleable
than that from AttnLSTMEmbeding.
"""
def __init__(self, n_test, n_support, n_feat, max_depth, **kwargs):
"""
Unlike the AttnLSTM model which only modifies the test vectors
additively, this model allows for an additive update to be
performed to both test and support using information from each
other.
Parameters
----------
n_support: int
Size of support set.
n_test: int
Size of test set.
n_feat: int
Number of input atom features
max_depth: int
Number of LSTM Embedding layers.
"""
super(IterRefLSTMEmbedding, self).__init__(**kwargs)
self.max_depth = max_depth
self.n_test = n_test
self.n_support = n_support
self.n_feat = n_feat
def get_config(self):
config = super(IterRefLSTMEmbedding, self).get_config()
config['n_test'] = self.n_test
config['n_support'] = self.n_support
config['n_feat'] = self.n_feat
config['max_depth'] = self.max_depth
return config
def build(self, input_shape):
n_feat = self.n_feat
# Support set lstm
self.support_lstm = LSTMStep(n_feat, 2 * n_feat)
self.q_init = backend.zeros([self.n_support, n_feat])
self.support_states_init = self.support_lstm.get_initial_states(
[self.n_support, n_feat])
# Test lstm
self.test_lstm = LSTMStep(n_feat, 2 * n_feat)
self.p_init = backend.zeros([self.n_test, n_feat])
self.test_states_init = self.test_lstm.get_initial_states(
[self.n_test, n_feat])
self.built = True
def call(self, inputs):
"""Execute this layer on input tensors.
Parameters
----------
inputs: list
List of two tensors (X, Xp). X should be of shape (n_test,
n_feat) and Xp should be of shape (n_support, n_feat) where
n_test is the size of the test set, n_support that of the
support set, and n_feat is the number of per-atom features.
Returns
-------
Returns two tensors of same shape as input. Namely the output
shape will be [(n_test, n_feat), (n_support, n_feat)]
"""
if len(inputs) != 2:
raise ValueError(
"IterRefLSTMEmbedding layer must have exactly two parents")
x, xp = inputs
# Get initializations
p = self.p_init
q = self.q_init
# Rename support
z = xp
states = self.support_states_init
x_states = self.test_states_init
for d in range(self.max_depth):
# Process support xp using attention
e = cosine_dist(z + q, xp)
a = tf.nn.softmax(e)
# Get linear combination of support set
r = backend.dot(a, xp)
# Process test x using attention
x_e = cosine_dist(x + p, z)
x_a = tf.nn.softmax(x_e)
s = backend.dot(x_a, z)
# Generate new support attention states
qr = backend.concatenate([q, r], axis=1)
q, states = self.support_lstm([qr] + states)
# Generate new test attention states
ps = backend.concatenate([p, s], axis=1)
p, x_states = self.test_lstm([ps] + x_states)
# Redefine
z = r
return [x + p, xp + q]
class SwitchedDropout(tf.keras.layers.Layer):
"""Apply dropout based on an input.
This is required for uncertainty prediction. The standard Keras
Dropout layer only performs dropout during training, but we
sometimes need to do it during prediction. The second input to this
layer should be a scalar equal to 0 or 1, indicating whether to
perform dropout.
"""
def __init__(self, rate, **kwargs):
self.rate = rate
super(SwitchedDropout, self).__init__(**kwargs)
def get_config(self):
config = super(SwitchedDropout, self).get_config()
config['rate'] = self.rate
return config
def call(self, inputs):
rate = self.rate * tf.squeeze(inputs[1])
return tf.nn.dropout(inputs[0], rate=rate)
class WeightedLinearCombo(tf.keras.layers.Layer):
"""Computes a weighted linear combination of input layers, with the weights defined by trainable variables."""
def __init__(self, std=0.3, **kwargs):
"""Initialize this layer.
Parameters
----------
std: float, optional (default 0.3)
The standard deviation to use when randomly initializing weights.
"""
super(WeightedLinearCombo, self).__init__(**kwargs)
self.std = std
def get_config(self):
config = super(WeightedLinearCombo, self).get_config()
config['std'] = self.std
return config
def build(self, input_shape):
init = tf.keras.initializers.RandomNormal(stddev=self.std)
self.input_weights = [
self.add_weight('weight_%d' % (i + 1), (1,),
initializer=init,
trainable=True) for i in range(len(input_shape))
]
self.built = True
def call(self, inputs):
out_tensor = None
for in_tensor, w in zip(inputs, self.input_weights):
if out_tensor is None:
out_tensor = w * in_tensor
else:
out_tensor += w * in_tensor
return out_tensor
class CombineMeanStd(tf.keras.layers.Layer):
"""Generate Gaussian nose."""
def __init__(self, training_only=False, noise_epsilon=1.0, **kwargs):
"""Create a CombineMeanStd layer.
This layer should have two inputs with the same shape, and its
output also has the same shape. Each element of the output is a
Gaussian distributed random number whose mean is the corresponding
element of the first input, and whose standard deviation is the
corresponding element of the second input.
Parameters
----------
training_only: bool
if True, noise is only generated during training. During
prediction, the output is simply equal to the first input (that
is, the mean of the distribution used during training).
noise_epsilon: float
The noise is scaled by this factor
"""
super(CombineMeanStd, self).__init__(**kwargs)
self.training_only = training_only
self.noise_epsilon = noise_epsilon
def get_config(self):
config = super(CombineMeanStd, self).get_config()
config['training_only'] = self.training_only
config['noise_epsilon'] = self.noise_epsilon
return config
def call(self, inputs, training=True):
if len(inputs) != 2:
raise ValueError("Must have two in_layers")
mean_parent, std_parent = inputs[0], inputs[1]
noise_scale = tf.cast(training or not self.training_only, tf.float32)
from tensorflow.python.ops import array_ops
sample_noise = tf.random.normal(array_ops.shape(mean_parent),
0,
self.noise_epsilon,
dtype=tf.float32)
return mean_parent + noise_scale * std_parent * sample_noise
class Stack(tf.keras.layers.Layer):
"""Stack the inputs along a new axis."""
def __init__(self, axis=1, **kwargs):
super(Stack, self).__init__(**kwargs)
self.axis = axis
def get_config(self):
config = super(Stack, self).get_config()
config['axis'] = self.axis
return config
def call(self, inputs):
return tf.stack(inputs, axis=self.axis)
class Variable(tf.keras.layers.Layer):
"""Output a trainable value.
Due to a quirk of Keras, you must pass an input value when invoking
this layer. It doesn't matter what value you pass. Keras assumes
every layer that is not an Input will have at least one parent, and
violating this assumption causes errors during evaluation.
"""
def __init__(self, initial_value, **kwargs):
"""Construct a variable layer.
Parameters
----------
initial_value: array or Tensor
the initial value the layer should output
"""
super(Variable, self).__init__(**kwargs)
self.initial_value = initial_value
def get_config(self):
config = super(Variable, self).get_config()
config['initial_value'] = self.initial_value
return config
def build(self, input_shape):
self.var = tf.Variable(self.initial_value, dtype=self.dtype)
self.built = True
def call(self, inputs):
return self.var
class VinaFreeEnergy(tf.keras.layers.Layer):
"""Computes free-energy as defined by Autodock Vina.
TODO(rbharath): Make this layer support batching.
"""
def __init__(self,
N_atoms,
M_nbrs,
ndim,
nbr_cutoff,
start,
stop,
stddev=.3,
Nrot=1,
**kwargs):
super(VinaFreeEnergy, self).__init__(**kwargs)
self.stddev = stddev
# Number of rotatable bonds
# TODO(rbharath): Vina actually sets this per-molecule. See if makes
# a difference.
self.Nrot = Nrot
self.N_atoms = N_atoms
self.M_nbrs = M_nbrs
self.ndim = ndim
self.nbr_cutoff = nbr_cutoff
self.start = start
self.stop = stop
def get_config(self):
config = super(VinaFreeEnergy, self).get_config()
config['N_atoms'] = self.N_atoms
config['M_nbrs'] = self.M_nbrs
config['ndim'] = self.ndim
config['nbr_cutoff'] = self.nbr_cutoff
config['start'] = self.start
config['stop'] = self.stop
config['stddev'] = self.stddev
config['Nrot'] = self.Nrot
return config
def build(self, input_shape):
self.weighted_combo = WeightedLinearCombo()
self.w = tf.Variable(tf.random.normal((1,), stddev=self.stddev))
self.built = True
def cutoff(self, d, x):
out_tensor = tf.where(d < 8, x, tf.zeros_like(x))
return out_tensor
def nonlinearity(self, c, w):
"""Computes non-linearity used in Vina."""
out_tensor = c / (1 + w * self.Nrot)
return w, out_tensor
def repulsion(self, d):
"""Computes Autodock Vina's repulsion interaction term."""
out_tensor = tf.where(d < 0, d**2, tf.zeros_like(d))
return out_tensor
def hydrophobic(self, d):
"""Computes Autodock Vina's hydrophobic interaction term."""
out_tensor = tf.where(d < 0.5, tf.ones_like(d),
tf.where(d < 1.5, 1.5 - d, tf.zeros_like(d)))
return out_tensor
def hydrogen_bond(self, d):
"""Computes Autodock Vina's hydrogen bond interaction term."""
out_tensor = tf.where(
d < -0.7, tf.ones_like(d),
tf.where(d < 0, (1.0 / 0.7) * (0 - d), tf.zeros_like(d)))
return out_tensor
def gaussian_first(self, d):
"""Computes Autodock Vina's first Gaussian interaction term."""
out_tensor = tf.exp(-(d / 0.5)**2)
return out_tensor
def gaussian_second(self, d):
"""Computes Autodock Vina's second Gaussian interaction term."""
out_tensor = tf.exp(-((d - 3) / 2)**2)
return out_tensor
def call(self, inputs):
"""
Parameters
----------
X: tf.Tensor of shape (N, d)
Coordinates/features.
Z: tf.Tensor of shape (N)
Atomic numbers of neighbor atoms.
Returns
-------
layer: tf.Tensor of shape (B)
The free energy of each complex in batch
"""
X = inputs[0]
# TODO(rbharath): This layer shouldn't be neighbor-listing. Make
# neighbors lists an argument instead of a part of this layer.
nbr_list = NeighborList(self.N_atoms, self.M_nbrs, self.ndim,
self.nbr_cutoff, self.start, self.stop)(X)
# Shape (N, M)
dists = InteratomicL2Distances(self.N_atoms, self.M_nbrs,
self.ndim)([X, nbr_list])
repulsion = self.repulsion(dists)
hydrophobic = self.hydrophobic(dists)
hbond = self.hydrogen_bond(dists)
gauss_1 = self.gaussian_first(dists)
gauss_2 = self.gaussian_second(dists)
# Shape (N, M)
interactions = self.weighted_combo(
[repulsion, hydrophobic, hbond, gauss_1, gauss_2])
# Shape (N, M)
thresholded = self.cutoff(dists, interactions)
weight, free_energies = self.nonlinearity(thresholded, self.w)
return tf.reduce_sum(free_energies)
class NeighborList(tf.keras.layers.Layer):
"""Computes a neighbor-list in Tensorflow.
Neighbor-lists (also called Verlet Lists) are a tool for grouping
atoms which are close to each other spatially. This layer computes a
Neighbor List from a provided tensor of atomic coordinates. You can
think of this as a general "k-means" layer, but optimized for the
case `k==3`.
TODO(rbharath): Make this layer support batching.
"""
def __init__(self, N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop,
**kwargs):
"""
Parameters
----------
N_atoms: int
Maximum number of atoms this layer will neighbor-list.
M_nbrs: int
Maximum number of spatial neighbors possible for atom.
ndim: int
Dimensionality of space atoms live in. (Typically 3D, but sometimes will
want to use higher dimensional descriptors for atoms).
nbr_cutoff: float
Length in Angstroms (?) at which atom boxes are gridded.
"""
super(NeighborList, self).__init__(**kwargs)
self.N_atoms = N_atoms
self.M_nbrs = M_nbrs
self.ndim = ndim
# Number of grid cells
n_cells = int(((stop - start) / nbr_cutoff)**ndim)
self.n_cells = n_cells
self.nbr_cutoff = nbr_cutoff
self.start = start
self.stop = stop
def get_config(self):
config = super(NeighborList, self).get_config()
config['N_atoms'] = self.N_atoms
config['M_nbrs'] = self.M_nbrs
config['ndim'] = self.ndim
config['nbr_cutoff'] = self.nbr_cutoff
config['start'] = self.start
config['stop'] = self.stop
return config
def call(self, inputs):
if isinstance(inputs, SequenceCollection):
if len(inputs) != 1:
raise ValueError("NeighborList can only have one input")
inputs = inputs[0]
if len(inputs.get_shape()) != 2:
# TODO(rbharath): Support batching
raise ValueError("Parent tensor must be (num_atoms, ndum)")
return self.compute_nbr_list(inputs)
def compute_nbr_list(self, coords):
"""Get closest neighbors for atoms.
Needs to handle padding for atoms with no neighbors.
Parameters
----------
coords: tf.Tensor
Shape (N_atoms, ndim)
Returns
-------
nbr_list: tf.Tensor
Shape (N_atoms, M_nbrs) of atom indices
"""
# Shape (n_cells, ndim)
cells = self.get_cells()
# List of length N_atoms, each element of different length uniques_i
nbrs = self.get_atoms_in_nbrs(coords, cells)
padding = tf.fill((self.M_nbrs,), -1)
padded_nbrs = [
tf.concat([unique_nbrs, padding], 0) for unique_nbrs in nbrs
]
# List of length N_atoms, each element of different length uniques_i
# List of length N_atoms, each a tensor of shape
# (uniques_i, ndim)
nbr_coords = [tf.gather(coords, atom_nbrs) for atom_nbrs in nbrs]
# Add phantom atoms that exist far outside the box
coord_padding = tf.cast(
tf.fill((self.M_nbrs, self.ndim), 2 * self.stop), tf.float32)
padded_nbr_coords = [
tf.concat([nbr_coord, coord_padding], 0) for nbr_coord in nbr_coords
]
# List of length N_atoms, each of shape (1, ndim)
atom_coords = tf.split(coords, self.N_atoms)
# TODO(rbharath): How does distance need to be modified here to
# account for periodic boundary conditions?
# List of length N_atoms each of shape (M_nbrs)
padded_dists = [
tf.reduce_sum((atom_coord - padded_nbr_coord)**2, axis=1)
for (atom_coord,
padded_nbr_coord) in zip(atom_coords, padded_nbr_coords)
]
padded_closest_nbrs = [
tf.nn.top_k(-padded_dist, k=self.M_nbrs)[1]
for padded_dist in padded_dists
]
# N_atoms elts of size (M_nbrs,) each
padded_neighbor_list = [
tf.gather(padded_atom_nbrs, padded_closest_nbr)
for (padded_atom_nbrs,
padded_closest_nbr) in zip(padded_nbrs, padded_closest_nbrs)
]
neighbor_list = tf.stack(padded_neighbor_list)
return neighbor_list
def get_atoms_in_nbrs(self, coords, cells):
"""Get the atoms in neighboring cells for each cells.
Returns
-------
atoms_in_nbrs = (N_atoms, n_nbr_cells, M_nbrs)
"""
# Shape (N_atoms, 1)
cells_for_atoms = self.get_cells_for_atoms(coords, cells)
# Find M_nbrs atoms closest to each cell
# Shape (n_cells, M_nbrs)
closest_atoms = self.get_closest_atoms(coords, cells)
# Associate each cell with its neighbor cells. Assumes periodic boundary
# conditions, so does wrapround. O(constant)
# Shape (n_cells, n_nbr_cells)
neighbor_cells = self.get_neighbor_cells(cells)
# Shape (N_atoms, n_nbr_cells)
neighbor_cells = tf.squeeze(tf.gather(neighbor_cells, cells_for_atoms))
# Shape (N_atoms, n_nbr_cells, M_nbrs)
atoms_in_nbrs = tf.gather(closest_atoms, neighbor_cells)
# Shape (N_atoms, n_nbr_cells*M_nbrs)
atoms_in_nbrs = tf.reshape(atoms_in_nbrs, [self.N_atoms, -1])
# List of length N_atoms, each element length uniques_i
nbrs_per_atom = tf.split(atoms_in_nbrs, self.N_atoms)
uniques = [
tf.unique(tf.squeeze(atom_nbrs))[0] for atom_nbrs in nbrs_per_atom
]
# TODO(rbharath): FRAGILE! Uses fact that identity seems to be the first
# element removed to remove self from list of neighbors. Need to verify
# this holds more broadly or come up with robust alternative.
uniques = [unique[1:] for unique in uniques]
return uniques
def get_closest_atoms(self, coords, cells):
"""For each cell, find M_nbrs closest atoms.
Let N_atoms be the number of atoms.
Parameters
----------
coords: tf.Tensor
(N_atoms, ndim) shape.
cells: tf.Tensor
(n_cells, ndim) shape.
Returns
-------
closest_inds: tf.Tensor
Of shape (n_cells, M_nbrs)
"""
N_atoms, n_cells, ndim, M_nbrs = (self.N_atoms, self.n_cells, self.ndim,
self.M_nbrs)
# Tile both cells and coords to form arrays of size (N_atoms*n_cells, ndim)
tiled_cells = tf.reshape(tf.tile(cells, (1, N_atoms)),
(N_atoms * n_cells, ndim))
# Shape (N_atoms*n_cells, ndim) after tile
tiled_coords = tf.tile(coords, (n_cells, 1))
# Shape (N_atoms*n_cells)
coords_vec = tf.reduce_sum((tiled_coords - tiled_cells)**2, axis=1)
# Shape (n_cells, N_atoms)
coords_norm = tf.reshape(coords_vec, (n_cells, N_atoms))
# Find k atoms closest to this cell. Notice negative sign since
# tf.nn.top_k returns *largest* not smallest.
# Tensor of shape (n_cells, M_nbrs)
closest_inds = tf.nn.top_k(-coords_norm, k=M_nbrs)[1]
return closest_inds
def get_cells_for_atoms(self, coords, cells):
"""Compute the cells each atom belongs to.
Parameters
----------
coords: tf.Tensor
Shape (N_atoms, ndim)
cells: tf.Tensor
(n_cells, ndim) shape.
Returns
-------
cells_for_atoms: tf.Tensor
Shape (N_atoms, 1)
"""
N_atoms, n_cells, ndim = self.N_atoms, self.n_cells, self.ndim
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (N_atoms*n_cells, ndim)
tiled_cells = tf.tile(cells, (N_atoms, 1))
# Shape (N_atoms*n_cells, 1) after tile
tiled_coords = tf.reshape(tf.tile(coords, (1, n_cells)),
(n_cells * N_atoms, ndim))
coords_vec = tf.reduce_sum((tiled_coords - tiled_cells)**2, axis=1)
coords_norm = tf.reshape(coords_vec, (N_atoms, n_cells))
closest_inds = tf.nn.top_k(-coords_norm, k=1)[1]
return closest_inds
def _get_num_nbrs(self):
"""Get number of neighbors in current dimensionality space."""
ndim = self.ndim
if ndim == 1:
n_nbr_cells = 3
elif ndim == 2:
# 9 neighbors in 2-space
n_nbr_cells = 9
# TODO(rbharath): Shoddy handling of higher dimensions...
elif ndim >= 3:
# Number of cells for cube in 3-space is
n_nbr_cells = 27 # (26 faces on Rubik's cube for example)
return n_nbr_cells
def get_neighbor_cells(self, cells):
"""Compute neighbors of cells in grid.
# TODO(rbharath): Do we need to handle periodic boundary conditions
properly here?
# TODO(rbharath): This doesn't handle boundaries well. We hard-code
# looking for n_nbr_cells neighbors, which isn't right for boundary cells in
# the cube.
Parameters
----------
cells: tf.Tensor
(n_cells, ndim) shape.
Returns
-------
nbr_cells: tf.Tensor
(n_cells, n_nbr_cells)
"""
ndim, n_cells = self.ndim, self.n_cells
n_nbr_cells = self._get_num_nbrs()
# Tile cells to form arrays of size (n_cells*n_cells, ndim)
# Two tilings (a, b, c, a, b, c, ...) vs. (a, a, a, b, b, b, etc.)
# Tile (a, a, a, b, b, b, etc.)
tiled_centers = tf.reshape(tf.tile(cells, (1, n_cells)),
(n_cells * n_cells, ndim))
# Tile (a, b, c, a, b, c, ...)
tiled_cells = tf.tile(cells, (n_cells, 1))
coords_vec = tf.reduce_sum((tiled_centers - tiled_cells)**2, axis=1)
coords_norm = tf.reshape(coords_vec, (n_cells, n_cells))
closest_inds = tf.nn.top_k(-coords_norm, k=n_nbr_cells)[1]
return closest_inds
def get_cells(self):
"""Returns the locations of all grid points in box.
Suppose start is -10 Angstrom, stop is 10 Angstrom, nbr_cutoff is 1.
Then would return a list of length 20^3 whose entries would be
[(-10, -10, -10), (-10, -10, -9), ..., (9, 9, 9)]
Returns
-------
cells: tf.Tensor
(n_cells, ndim) shape.
"""
start, stop, nbr_cutoff = self.start, self.stop, self.nbr_cutoff
mesh_args = [
tf.range(start, stop, nbr_cutoff) for _ in range(self.ndim)
]
return tf.cast(
tf.reshape(tf.transpose(tf.stack(tf.meshgrid(*mesh_args))),
(self.n_cells, self.ndim)), tf.float32)
class AtomicConvolution(tf.keras.layers.Layer):
"""Implements the atomic convolutional transform introduced in
<NAME>, et al. "Atomic convolutional networks for predicting
protein-ligand binding affinity." arXiv preprint arXiv:1703.10603
(2017).
At a high level, this transform performs a graph convolution
on the nearest neighbors graph in 3D space.
"""
def __init__(self,
atom_types=None,
radial_params=list(),
boxsize=None,
**kwargs):
"""Atomic convolution layer
N = max_num_atoms, M = max_num_neighbors, B = batch_size, d = num_features
l = num_radial_filters * num_atom_types
Parameters
----------
atom_types: list or None
Of length a, where a is number of atom types for filtering.
radial_params: list
Of length l, where l is number of radial filters learned.
boxsize: float or None
Simulation box length [Angstrom].
"""
super(AtomicConvolution, self).__init__(**kwargs)
self.boxsize = boxsize
self.radial_params = radial_params
self.atom_types = atom_types
def get_config(self):
config = super(AtomicConvolution, self).get_config()
config['atom_types'] = self.atom_types
config['radial_params'] = self.radial_params
config['boxsize'] = self.boxsize
return config
def build(self, input_shape):
vars = []
for i in range(3):
val = np.array([p[i] for p in self.radial_params]).reshape(
(-1, 1, 1, 1))
vars.append(tf.Variable(val, dtype=tf.float32))
self.rc = vars[0]
self.rs = vars[1]
self.re = vars[2]
self.built = True
def call(self, inputs):
"""
Parameters
----------
X: tf.Tensor of shape (B, N, d)
Coordinates/features.
Nbrs: tf.Tensor of shape (B, N, M)
Neighbor list.
Nbrs_Z: tf.Tensor of shape (B, N, M)
Atomic numbers of neighbor atoms.
Returns
-------
layer: tf.Tensor of shape (B, N, l)
A new tensor representing the output of the atomic conv layer
"""
X = inputs[0]
Nbrs = tf.cast(inputs[1], tf.int32)
Nbrs_Z = inputs[2]
# N: Maximum number of atoms
# M: Maximum number of neighbors
# d: Number of coordinates/features/filters
# B: Batch Size
N = X.get_shape()[-2]
d = X.get_shape()[-1]
M = Nbrs.get_shape()[-1]
B = X.get_shape()[0]
# Compute the distances and radial symmetry functions.
D = self.distance_tensor(X, Nbrs, self.boxsize, B, N, M, d)
R = self.distance_matrix(D)
R = tf.expand_dims(R, 0)
rsf = self.radial_symmetry_function(R, self.rc, self.rs, self.re)
if not self.atom_types:
cond = tf.cast(tf.not_equal(Nbrs_Z, 0), tf.float32)
cond = tf.reshape(cond, (1, -1, N, M))
layer = tf.reduce_sum(cond * rsf, 3)
else:
sym = []
for j in range(len(self.atom_types)):
cond = tf.cast(tf.equal(Nbrs_Z, self.atom_types[j]), tf.float32)
cond = tf.reshape(cond, (1, -1, N, M))
sym.append(tf.reduce_sum(cond * rsf, 3))
layer = tf.concat(sym, 0)
layer = tf.transpose(layer, [1, 2, 0]) # (l, B, N) -> (B, N, l)
m, v = tf.nn.moments(layer, axes=[0])
return tf.nn.batch_normalization(layer, m, v, None, None, 1e-3)
def radial_symmetry_function(self, R, rc, rs, e):
"""Calculates radial symmetry function.
B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_filters
Parameters
----------
R: tf.Tensor of shape (B, N, M)
Distance matrix.
rc: float
Interaction cutoff [Angstrom].
rs: float
Gaussian distance matrix mean.
e: float
Gaussian distance matrix width.
Returns
-------
retval: tf.Tensor of shape (B, N, M)
Radial symmetry function (before summation)
"""
K = self.gaussian_distance_matrix(R, rs, e)
FC = self.radial_cutoff(R, rc)
return tf.multiply(K, FC)
def radial_cutoff(self, R, rc):
"""Calculates radial cutoff matrix.
B = batch_size, N = max_num_atoms, M = max_num_neighbors
Parameters
----------
R [B, N, M]: tf.Tensor
Distance matrix.
rc: tf.Variable
Interaction cutoff [Angstrom].
Returns
-------
FC [B, N, M]: tf.Tensor
Radial cutoff matrix.
"""
T = 0.5 * (tf.cos(np.pi * R / (rc)) + 1)
E = tf.zeros_like(T)
cond = tf.less_equal(R, rc)
FC = tf.where(cond, T, E)
return FC
def gaussian_distance_matrix(self, R, rs, e):
"""Calculates gaussian distance matrix.
B = batch_size, N = max_num_atoms, M = max_num_neighbors
Parameters
----------
R [B, N, M]: tf.Tensor
Distance matrix.
rs: tf.Variable
Gaussian distance matrix mean.
e: tf.Variable
Gaussian distance matrix width (e = .5/std**2).
Returns
-------
retval [B, N, M]: tf.Tensor
Gaussian distance matrix.
"""
return tf.exp(-e * (R - rs)**2)
def distance_tensor(self, X, Nbrs, boxsize, B, N, M, d):
"""Calculates distance tensor for batch of molecules.
B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features
Parameters
----------
X: tf.Tensor of shape (B, N, d)
Coordinates/features tensor.
Nbrs: tf.Tensor of shape (B, N, M)
Neighbor list tensor.
boxsize: float or None
Simulation box length [Angstrom].
Returns
-------
D: tf.Tensor of shape (B, N, M, d)
Coordinates/features distance tensor.
"""
flat_neighbors = tf.reshape(Nbrs, [-1, N * M])
neighbor_coords = tf.gather(X, flat_neighbors, batch_dims=-1, axis=1)
neighbor_coords = tf.reshape(neighbor_coords, [-1, N, M, d])
D = neighbor_coords - tf.expand_dims(X, 2)
if boxsize is not None:
boxsize = tf.reshape(boxsize, [1, 1, 1, d])
D -= tf.round(D / boxsize) * boxsize
return D
def distance_matrix(self, D):
"""Calcuates the distance matrix from the distance tensor
B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features
Parameters
----------
D: tf.Tensor of shape (B, N, M, d)
Distance tensor.
Returns
-------
R: tf.Tensor of shape (B, N, M)
Distance matrix.
"""
R = tf.reduce_sum(tf.multiply(D, D), 3)
R = tf.sqrt(R)
return R
class AlphaShareLayer(tf.keras.layers.Layer):
"""
Part of a sluice network. Adds alpha parameters to control
sharing between the main and auxillary tasks
Factory method AlphaShare should be used for construction
Parameters
----------
in_layers: list of Layers or tensors
tensors in list must be the same size and list must include two or more tensors
Returns
-------
out_tensor: a tensor with shape [len(in_layers), x, y] where x, y were the original layer dimensions
Distance matrix.
"""
def __init__(self, **kwargs):
super(AlphaShareLayer, self).__init__(**kwargs)
def get_config(self):
config = super(AlphaShareLayer, self).get_config()
return config
def build(self, input_shape):
n_alphas = 2 * len(input_shape)
self.alphas = tf.Variable(tf.random.normal([n_alphas, n_alphas]),
name='alphas')
self.built = True
def call(self, inputs):
# check that there isnt just one or zero inputs
if len(inputs) <= 1:
raise ValueError("AlphaShare must have more than one input")
self.num_outputs = len(inputs)
# create subspaces
subspaces = []
original_cols = int(inputs[0].get_shape()[-1])
subspace_size = int(original_cols / 2)
for input_tensor in inputs:
subspaces.append(tf.reshape(input_tensor[:, :subspace_size], [-1]))
subspaces.append(tf.reshape(input_tensor[:, subspace_size:], [-1]))
n_alphas = len(subspaces)
subspaces = tf.reshape(tf.stack(subspaces), [n_alphas, -1])
subspaces = tf.matmul(self.alphas, subspaces)
# concatenate subspaces, reshape to size of original input, then stack
# such that out_tensor has shape (2,?,original_cols)
count = 0
out_tensors = []
tmp_tensor = []
for row in range(n_alphas):
tmp_tensor.append(tf.reshape(subspaces[row,], [-1, subspace_size]))
count += 1
if (count == 2):
out_tensors.append(tf.concat(tmp_tensor, 1))
tmp_tensor = []
count = 0
return out_tensors
class SluiceLoss(tf.keras.layers.Layer):
"""
Calculates the loss in a Sluice Network
Every input into an AlphaShare should be used in SluiceLoss
"""
def __init__(self, **kwargs):
super(SluiceLoss, self).__init__(**kwargs)
def get_config(self):
config = super(SluiceLoss, self).get_config()
return config
def call(self, inputs):
temp = []
subspaces = []
# creates subspaces the same way it was done in AlphaShare
for input_tensor in inputs:
subspace_size = int(input_tensor.get_shape()[-1] / 2)
subspaces.append(input_tensor[:, :subspace_size])
subspaces.append(input_tensor[:, subspace_size:])
product = tf.matmul(tf.transpose(subspaces[0]), subspaces[1])
subspaces = []
# calculate squared Frobenius norm
temp.append(tf.reduce_sum(tf.pow(product, 2)))
return tf.reduce_sum(temp)
class BetaShare(tf.keras.layers.Layer):
"""
Part of a sluice network. Adds beta params to control which layer
outputs are used for prediction
Parameters
----------
in_layers: list of Layers or tensors
tensors in list must be the same size and list must include two or
more tensors
Returns
-------
output_layers: list of Layers or tensors with same size as in_layers
Distance matrix.
"""
def __init__(self, **kwargs):
super(BetaShare, self).__init__(**kwargs)
def get_config(self):
config = super(BetaShare, self).get_config()
return config
def build(self, input_shape):
n_betas = len(input_shape)
self.betas = tf.Variable(tf.random.normal([1, n_betas]), name='betas')
self.built = True
def call(self, inputs):
"""
Size of input layers must all be the same
"""
subspaces = []
original_cols = int(inputs[0].get_shape()[-1])
for input_tensor in inputs:
subspaces.append(tf.reshape(input_tensor, [-1]))
n_betas = len(inputs)
subspaces = tf.reshape(tf.stack(subspaces), [n_betas, -1])
out_tensor = tf.matmul(self.betas, subspaces)
return tf.reshape(out_tensor, [-1, original_cols])
class ANIFeat(tf.keras.layers.Layer):
"""Performs transform from 3D coordinates to ANI symmetry functions"""
def __init__(self,
max_atoms=23,
radial_cutoff=4.6,
angular_cutoff=3.1,
radial_length=32,
angular_length=8,
atom_cases=[1, 6, 7, 8, 16],
atomic_number_differentiated=True,
coordinates_in_bohr=True,
**kwargs):
"""
Only X can be transformed
"""
super(ANIFeat, self).__init__(**kwargs)
self.max_atoms = max_atoms
self.radial_cutoff = radial_cutoff
self.angular_cutoff = angular_cutoff
self.radial_length = radial_length
self.angular_length = angular_length
self.atom_cases = atom_cases
self.atomic_number_differentiated = atomic_number_differentiated
self.coordinates_in_bohr = coordinates_in_bohr
def get_config(self):
config = super(ANIFeat, self).get_config()
config['max_atoms'] = self.max_atoms
config['radial_cutoff'] = self.radial_cutoff
config['angular_cutoff'] = self.angular_cutoff
config['radial_length'] = self.radial_length
config['angular_length'] = self.angular_length
config['atom_cases'] = self.atom_cases
config[
'atomic_number_differentiated'] = self.atomic_number_differentiated
config['coordinates_in_bohr'] = self.coordinates_in_bohr
return config
def call(self, inputs):
"""In layers should be of shape dtype tf.float32, (None, self.max_atoms, 4)"""
atom_numbers = tf.cast(inputs[:, :, 0], tf.int32)
flags = tf.sign(atom_numbers)
flags = tf.cast(
tf.expand_dims(flags, 1) * tf.expand_dims(flags, 2), tf.float32)
coordinates = inputs[:, :, 1:]
if self.coordinates_in_bohr:
coordinates = coordinates * 0.52917721092
d = self.distance_matrix(coordinates, flags)
d_radial_cutoff = self.distance_cutoff(d, self.radial_cutoff, flags)
d_angular_cutoff = self.distance_cutoff(d, self.angular_cutoff, flags)
radial_sym = self.radial_symmetry(d_radial_cutoff, d, atom_numbers)
angular_sym = self.angular_symmetry(d_angular_cutoff, d, atom_numbers,
coordinates)
return tf.concat([
tf.cast(tf.expand_dims(atom_numbers, 2), tf.float32), radial_sym,
angular_sym
],
axis=2)
def distance_matrix(self, coordinates, flags):
""" Generate distance matrix """
# (TODO YTZ:) faster, less memory intensive way
# r = tf.reduce_sum(tf.square(coordinates), 2)
# r = tf.expand_dims(r, -1)
# inner = 2*tf.matmul(coordinates, tf.transpose(coordinates, perm=[0,2,1]))
# # inner = 2*tf.matmul(coordinates, coordinates, transpose_b=True)
# d = r - inner + tf.transpose(r, perm=[0,2,1])
# d = tf.nn.relu(d) # fix numerical instabilities about diagonal
# d = tf.sqrt(d) # does this have negative elements? may be unstable for diagonals
max_atoms = self.max_atoms
tensor1 = tf.stack([coordinates] * max_atoms, axis=1)
tensor2 = tf.stack([coordinates] * max_atoms, axis=2)
# Calculate pairwise distance
d = tf.sqrt(
tf.reduce_sum(tf.math.squared_difference(tensor1, tensor2), axis=3)
+ 1e-7)
d = d * flags
return d
def distance_cutoff(self, d, cutoff, flags):
""" Generate distance matrix with trainable cutoff """
# Cutoff with threshold Rc
d_flag = flags * tf.sign(cutoff - d)
d_flag = tf.nn.relu(d_flag)
d_flag = d_flag * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)
d = 0.5 * (tf.cos(np.pi * d / cutoff) + 1)
return d * d_flag
# return d
def radial_symmetry(self, d_cutoff, d, atom_numbers):
""" Radial Symmetry Function """
embedding = tf.eye(np.max(self.atom_cases) + 1)
atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)
Rs = np.linspace(0., self.radial_cutoff, self.radial_length)
ita = np.ones_like(Rs) * 3 / (Rs[1] - Rs[0])**2
Rs = tf.cast(np.reshape(Rs, (1, 1, 1, -1)), tf.float32)
ita = tf.cast(np.reshape(ita, (1, 1, 1, -1)), tf.float32)
length = ita.get_shape().as_list()[-1]
d_cutoff = tf.stack([d_cutoff] * length, axis=3)
d = tf.stack([d] * length, axis=3)
out = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
if self.atomic_number_differentiated:
out_tensors = []
for atom_type in self.atom_cases:
selected_atoms = tf.expand_dims(tf.expand_dims(
atom_numbers_embedded[:, :, atom_type], axis=1),
axis=3)
out_tensors.append(tf.reduce_sum(out * selected_atoms, axis=2))
return tf.concat(out_tensors, axis=2)
else:
return tf.reduce_sum(out, axis=2)
def angular_symmetry(self, d_cutoff, d, atom_numbers, coordinates):
""" Angular Symmetry Function """
max_atoms = self.max_atoms
embedding = tf.eye(np.max(self.atom_cases) + 1)
atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)
Rs = np.linspace(0., self.angular_cutoff, self.angular_length)
ita = 3 / (Rs[1] - Rs[0])**2
thetas = np.linspace(0., np.pi, self.angular_length)
zeta = float(self.angular_length**2)
ita, zeta, Rs, thetas = np.meshgrid(ita, zeta, Rs, thetas)
zeta = tf.cast(np.reshape(zeta, (1, 1, 1, 1, -1)), tf.float32)
ita = tf.cast(np.reshape(ita, (1, 1, 1, 1, -1)), tf.float32)
Rs = tf.cast(np.reshape(Rs, (1, 1, 1, 1, -1)), tf.float32)
thetas = tf.cast(np.reshape(thetas, (1, 1, 1, 1, -1)), tf.float32)
length = zeta.get_shape().as_list()[-1]
# tf.stack issues again...
vector_distances = tf.stack([coordinates] * max_atoms, 1) - tf.stack(
[coordinates] * max_atoms, 2)
R_ij = tf.stack([d] * max_atoms, axis=3)
R_ik = tf.stack([d] * max_atoms, axis=2)
f_R_ij = tf.stack([d_cutoff] * max_atoms, axis=3)
f_R_ik = tf.stack([d_cutoff] * max_atoms, axis=2)
# Define angle theta = arccos(R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance))
vector_mul = tf.reduce_sum(tf.stack([vector_distances] * max_atoms, axis=3) * \
tf.stack([vector_distances] * max_atoms, axis=2), axis=4)
vector_mul = vector_mul * tf.sign(f_R_ij) * tf.sign(f_R_ik)
theta = tf.acos(tf.math.divide(vector_mul, R_ij * R_ik + 1e-5))
R_ij = tf.stack([R_ij] * length, axis=4)
R_ik = tf.stack([R_ik] * length, axis=4)
f_R_ij = tf.stack([f_R_ij] * length, axis=4)
f_R_ik = tf.stack([f_R_ik] * length, axis=4)
theta = tf.stack([theta] * length, axis=4)
out_tensor = tf.pow((1. + tf.cos(theta - thetas)) / 2., zeta) * \
tf.exp(-ita * tf.square((R_ij + R_ik) / 2. - Rs)) * f_R_ij * f_R_ik * 2
if self.atomic_number_differentiated:
out_tensors = []
for id_j, atom_type_j in enumerate(self.atom_cases):
for atom_type_k in self.atom_cases[id_j:]:
selected_atoms = tf.stack([atom_numbers_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \
tf.stack([atom_numbers_embedded[:, :, atom_type_k]] * max_atoms, axis=1)
selected_atoms = tf.expand_dims(tf.expand_dims(
selected_atoms, axis=1),
axis=4)
out_tensors.append(
tf.reduce_sum(out_tensor * selected_atoms, axis=(2, 3)))
return tf.concat(out_tensors, axis=2)
else:
return tf.reduce_sum(out_tensor, axis=(2, 3))
def get_num_feats(self):
n_feat = self.outputs.get_shape().as_list()[-1]
return n_feat
class GraphEmbedPoolLayer(tf.keras.layers.Layer):
r"""
GraphCNNPool Layer from Robust Spatial Filtering with Graph Convolutional Neural Networks
https://arxiv.org/abs/1703.00792
This is a learnable pool operation It constructs a new adjacency
matrix for a graph of specified number of nodes.
This differs from our other pool operations which set vertices to a
function value without altering the adjacency matrix.
..math:: V_{emb} = SpatialGraphCNN({V_{in}})
..math:: V_{out} = \sigma(V_{emb})^{T} * V_{in}
..math:: A_{out} = V_{emb}^{T} * A_{in} * V_{emb}
"""
def __init__(self, num_vertices, **kwargs):
self.num_vertices = num_vertices
super(GraphEmbedPoolLayer, self).__init__(**kwargs)
def get_config(self):
config = super(GraphEmbedPoolLayer, self).get_config()
config['num_vertices'] = self.num_vertices
return config
def build(self, input_shape):
no_features = int(input_shape[0][-1])
self.W = tf.Variable(
tf.random.truncated_normal([no_features, self.num_vertices],
stddev=1.0 / np.sqrt(no_features)),
name='weights',
dtype=tf.float32)
self.b = tf.Variable(tf.constant(0.1), name='bias', dtype=tf.float32)
self.built = True
def call(self, inputs):
"""
Parameters
----------
num_filters: int
Number of filters to have in the output
in_layers: list of Layers or tensors
[V, A, mask]
V are the vertex features must be of shape (batch, vertex, channel)
A are the adjacency matrixes for each graph
Shape (batch, from_vertex, adj_matrix, to_vertex)
mask is optional, to be used when not every graph has the
same number of vertices
Returns
-------
Returns a `tf.tensor` with a graph convolution applied
The shape will be `(batch, vertex, self.num_filters)`.
"""
if len(inputs) == 3:
V, A, mask = inputs
else:
V, A = inputs
mask = None
factors = self.embedding_factors(V)
if mask is not None:
factors = tf.multiply(factors, mask)
factors = self.softmax_factors(factors)
result = tf.matmul(factors, V, transpose_a=True)
result_A = tf.reshape(A, (tf.shape(A)[0], -1, tf.shape(A)[-1]))
result_A = tf.matmul(result_A, factors)
result_A = tf.reshape(result_A, (tf.shape(A)[0], tf.shape(A)[-1], -1))
result_A = tf.matmul(factors, result_A, transpose_a=True)
result_A = tf.reshape(result_A, (tf.shape(A)[0], self.num_vertices,
A.get_shape()[2], self.num_vertices))
return result, result_A
def embedding_factors(self, V):
no_features = V.get_shape()[-1]
V_reshape = tf.reshape(V, (-1, no_features))
s = tf.slice(tf.shape(V), [0], [len(V.get_shape()) - 1])
s = tf.concat([s, tf.stack([self.num_vertices])], 0)
result = tf.reshape(tf.matmul(V_reshape, self.W) + self.b, s)
return result
def softmax_factors(self, V, axis=1):
max_value = tf.reduce_max(V, axis=axis, keepdims=True)
exp = tf.exp(tf.subtract(V, max_value))
prob = tf.math.divide(exp, tf.reduce_sum(exp, axis=axis, keepdims=True))
return prob
class GraphCNN(tf.keras.layers.Layer):
r"""
GraphCNN Layer from Robust Spatial Filtering with Graph Convolutional Neural Networks
https://arxiv.org/abs/1703.00792
Spatial-domain convolutions can be defined as
H = h_0I + h_1A + h_2A^2 + ... + hkAk, H ∈ R**(N×N)
We approximate it by
H ≈ h_0I + h_1A
We can define a convolution as applying multiple these linear filters
over edges of different types (think up, down, left, right, diagonal in images)
Where each edge type has its own adjacency matrix
H ≈ h_0I + h_1A_1 + h_2A_2 + . . . h_(L−1)A_(L−1)
V_out = \sum_{c=1}^{C} H^{c} V^{c} + b
"""
def __init__(self, num_filters, **kwargs):
"""
Parameters
----------
num_filters: int
Number of filters to have in the output
in_layers: list of Layers or tensors
[V, A, mask]
V are the vertex features must be of shape (batch, vertex, channel)
A are the adjacency matrixes for each graph
Shape (batch, from_vertex, adj_matrix, to_vertex)
mask is optional, to be used when not every graph has the
same number of vertices
Returns: tf.tensor
Returns a tf.tensor with a graph convolution applied
The shape will be (batch, vertex, self.num_filters)
"""
super(GraphCNN, self).__init__(**kwargs)
self.num_filters = num_filters
def get_config(self):
config = super(GraphCNN, self).get_config()
config['num_filters'] = self.num_filters
return config
def build(self, input_shape):
no_features = int(input_shape[0][2])
no_A = int(input_shape[1][2])
self.W = tf.Variable(tf.random.truncated_normal(
[no_features * no_A, self.num_filters],
stddev=np.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
name='weights',
dtype=tf.float32)
self.W_I = tf.Variable(tf.random.truncated_normal(
[no_features, self.num_filters],
stddev=np.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
name='weights_I',
dtype=tf.float32)
self.b = tf.Variable(tf.constant(0.1), name='bias', dtype=tf.float32)
self.built = True
def call(self, inputs):
if len(inputs) == 3:
V, A, mask = inputs
else:
V, A = inputs
no_A = A.get_shape()[2]
no_features = V.get_shape()[2]
n = self.graphConvolution(V, A)
A_shape = tf.shape(A)
n = tf.reshape(n, [-1, A_shape[1], no_A * no_features])
return self.batch_mat_mult(n, self.W) + self.batch_mat_mult(
V, self.W_I) + self.b
def graphConvolution(self, V, A):
no_A = A.get_shape()[2]
no_features = V.get_shape()[2]
A_shape = tf.shape(A)
A_reshape = tf.reshape(A, tf.stack([-1, A_shape[1] * no_A, A_shape[1]]))
n = tf.matmul(A_reshape, V)
return tf.reshape(n, [-1, A_shape[1], no_A, no_features])
def batch_mat_mult(self, A, B):
A_shape = tf.shape(A)
A_reshape = tf.reshape(A, [-1, A_shape[-1]])
# So the Tensor has known dimensions
if B.get_shape()[1] is None:
axis_2 = -1
else:
axis_2 = B.get_shape()[1]
result = tf.matmul(A_reshape, B)
return tf.reshape(result, tf.stack([A_shape[0], A_shape[1], axis_2]))
class Highway(tf.keras.layers.Layer):
""" Create a highway layer. y = H(x) * T(x) + x * (1 - T(x))
H(x) = activation_fn(matmul(W_H, x) + b_H) is the non-linear transformed output
T(x) = sigmoid(matmul(W_T, x) + b_T) is the transform gate
Implementation based on paper
Srivastava, <NAME>, <NAME>, and <NAME>. "Highway networks." arXiv preprint arXiv:1505.00387 (2015).
This layer expects its input to be a two dimensional tensor
of shape (batch size, # input features). Outputs will be in
the same shape.
"""
def __init__(self,
activation_fn='relu',
biases_initializer='zeros',
weights_initializer=None,
**kwargs):
"""
Parameters
----------
activation_fn: object
the Tensorflow activation function to apply to the output
biases_initializer: callable object
the initializer for bias values. This may be None, in which case the layer
will not include biases.
weights_initializer: callable object
the initializer for weight values
"""
super(Highway, self).__init__(**kwargs)
self.activation_fn = activation_fn
self.biases_initializer = biases_initializer
self.weights_initializer = weights_initializer
def get_config(self):
config = super(Highway, self).get_config()
config['activation_fn'] = self.activation_fn
config['biases_initializer'] = self.biases_initializer
config['weights_initializer'] = self.weights_initializer
return config
def build(self, input_shape):
if isinstance(input_shape, SequenceCollection):
input_shape = input_shape[0]
out_channels = input_shape[1]
if self.weights_initializer is None:
weights_initializer = tf.keras.initializers.VarianceScaling
else:
weights_initializer = self.weights_initializer
self.dense_H = tf.keras.layers.Dense(
out_channels,
activation=self.activation_fn,
bias_initializer=self.biases_initializer,
kernel_initializer=weights_initializer)
self.dense_T = tf.keras.layers.Dense(
out_channels,
activation=tf.nn.sigmoid,
bias_initializer=tf.constant_initializer(-1),
kernel_initializer=weights_initializer)
self.built = True
def call(self, inputs):
if isinstance(inputs, SequenceCollection):
parent = inputs[0]
else:
parent = inputs
dense_H = self.dense_H(parent)
dense_T = self.dense_T(parent)
return tf.multiply(dense_H, dense_T) + tf.multiply(parent, 1 - dense_T)
class WeaveLayer(tf.keras.layers.Layer):
"""This class implements the core Weave convolution from the
Google graph convolution paper [1]_
This model contains atom features and bond features
separately.Here, bond features are also called pair features.
There are 2 types of transformation, atom->atom, atom->pair,
pair->atom, pair->pair that this model implements.
Examples
--------
This layer expects 4 inputs in a list of the form `[atom_features,
pair_features, pair_split, atom_to_pair]`. We'll walk through the structure
of these inputs. Let's start with some basic definitions.
>>> import deepchem as dc
>>> import numpy as np
Suppose you have a batch of molecules
>>> smiles = ["CCC", "C"]
Note that there are 4 atoms in total in this system. This layer expects its
input molecules to be batched together.
>>> total_n_atoms = 4
Let's suppose that we have a featurizer that computes `n_atom_feat` features
per atom.
>>> n_atom_feat = 75
Then conceptually, `atom_feat` is the array of shape `(total_n_atoms,
n_atom_feat)` of atomic features. For simplicity, let's just go with a
random such matrix.
>>> atom_feat = np.random.rand(total_n_atoms, n_atom_feat)
Let's suppose we have `n_pair_feat` pairwise features
>>> n_pair_feat = 14
For each molecule, we compute a matrix of shape `(n_atoms*n_atoms,
n_pair_feat)` of pairwise features for each pair of atoms in the molecule.
Let's construct this conceptually for our example.
>>> pair_feat = [np.random.rand(3*3, n_pair_feat), np.random.rand(1*1, n_pair_feat)]
>>> pair_feat = np.concatenate(pair_feat, axis=0)
>>> pair_feat.shape
(10, 14)
`pair_split` is an index into `pair_feat` which tells us which atom each row belongs to. In our case, we hve
>>> pair_split = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3])
That is, the first 9 entries belong to "CCC" and the last entry to "C". The
final entry `atom_to_pair` goes in a little more in-depth than `pair_split`
and tells us the precise pair each pair feature belongs to. In our case
>>> atom_to_pair = np.array([[0, 0],
... [0, 1],
... [0, 2],
... [1, 0],
... [1, 1],
... [1, 2],
... [2, 0],
... [2, 1],
... [2, 2],
... [3, 3]])
Let's now define the actual layer
>>> layer = WeaveLayer()
And invoke it
>>> [A, P] = layer([atom_feat, pair_feat, pair_split, atom_to_pair])
The weave layer produces new atom/pair features. Let's check their shapes
>>> A = np.array(A)
>>> A.shape
(4, 50)
>>> P = np.array(P)
>>> P.shape
(10, 50)
The 4 is `total_num_atoms` and the 10 is the total number of pairs. Where
does `50` come from? It's from the default arguments `n_atom_input_feat` and
`n_pair_input_feat`.
References
----------
.. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond
fingerprints." Journal of computer-aided molecular design 30.8 (2016):
595-608.
"""
def __init__(self,
n_atom_input_feat: int = 75,
n_pair_input_feat: int = 14,
n_atom_output_feat: int = 50,
n_pair_output_feat: int = 50,
n_hidden_AA: int = 50,
n_hidden_PA: int = 50,
n_hidden_AP: int = 50,
n_hidden_PP: int = 50,
update_pair: bool = True,
init: str = 'glorot_uniform',
activation: str = 'relu',
batch_normalize: bool = True,
batch_normalize_kwargs: Dict = {"renorm": True},
**kwargs):
"""
Parameters
----------
n_atom_input_feat: int, optional (default 75)
Number of features for each atom in input.
n_pair_input_feat: int, optional (default 14)
Number of features for each pair of atoms in input.
n_atom_output_feat: int, optional (default 50)
Number of features for each atom in output.
n_pair_output_feat: int, optional (default 50)
Number of features for each pair of atoms in output.
n_hidden_AA: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
n_hidden_PA: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
n_hidden_AP: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
n_hidden_PP: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
update_pair: bool, optional (default True)
Whether to calculate for pair features,
could be turned off for last layer
init: str, optional (default 'glorot_uniform')
Weight initialization for filters.
activation: str, optional (default 'relu')
Activation function applied
batch_normalize: bool, optional (default True)
If this is turned on, apply batch normalization before applying
activation functions on convolutional layers.
batch_normalize_kwargs: Dict, optional (default `{renorm=True}`)
Batch normalization is a complex layer which has many potential
argumentswhich change behavior. This layer accepts user-defined
parameters which are passed to all `BatchNormalization` layers in
`WeaveModel`, `WeaveLayer`, and `WeaveGather`.
"""
super(WeaveLayer, self).__init__(**kwargs)
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
self.update_pair = update_pair # last weave layer does not need to update
self.n_hidden_AA = n_hidden_AA
self.n_hidden_PA = n_hidden_PA
self.n_hidden_AP = n_hidden_AP
self.n_hidden_PP = n_hidden_PP
self.n_hidden_A = n_hidden_AA + n_hidden_PA
self.n_hidden_P = n_hidden_AP + n_hidden_PP
self.batch_normalize = batch_normalize
self.batch_normalize_kwargs = batch_normalize_kwargs
self.n_atom_input_feat = n_atom_input_feat
self.n_pair_input_feat = n_pair_input_feat
self.n_atom_output_feat = n_atom_output_feat
self.n_pair_output_feat = n_pair_output_feat
self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P = None, None, None, None, None, None
def get_config(self) -> Dict:
"""Returns config dictionary for this layer."""
config = super(WeaveLayer, self).get_config()
config['n_atom_input_feat'] = self.n_atom_input_feat
config['n_pair_input_feat'] = self.n_pair_input_feat
config['n_atom_output_feat'] = self.n_atom_output_feat
config['n_pair_output_feat'] = self.n_pair_output_feat
config['n_hidden_AA'] = self.n_hidden_AA
config['n_hidden_PA'] = self.n_hidden_PA
config['n_hidden_AP'] = self.n_hidden_AP
config['n_hidden_PP'] = self.n_hidden_PP
config['batch_normalize'] = self.batch_normalize
config['batch_normalize_kwargs'] = self.batch_normalize_kwargs
config['update_pair'] = self.update_pair
config['init'] = self.init
config['activation'] = self.activation
return config
def build(self, input_shape):
""" Construct internal trainable weights.
Parameters
----------
input_shape: tuple
Ignored since we don't need the input shape to create internal weights.
"""
def init(input_shape):
return self.add_weight(name='kernel',
shape=(input_shape[0], input_shape[1]),
initializer=self.init,
trainable=True)
self.W_AA = init([self.n_atom_input_feat, self.n_hidden_AA])
self.b_AA = backend.zeros(shape=[
self.n_hidden_AA,
])
self.AA_bn = BatchNormalization(**self.batch_normalize_kwargs)
self.W_PA = init([self.n_pair_input_feat, self.n_hidden_PA])
self.b_PA = backend.zeros(shape=[
self.n_hidden_PA,
])
self.PA_bn = BatchNormalization(**self.batch_normalize_kwargs)
self.W_A = init([self.n_hidden_A, self.n_atom_output_feat])
self.b_A = backend.zeros(shape=[
self.n_atom_output_feat,
])
self.A_bn = BatchNormalization(**self.batch_normalize_kwargs)
if self.update_pair:
self.W_AP = init([self.n_atom_input_feat * 2, self.n_hidden_AP])
self.b_AP = backend.zeros(shape=[
self.n_hidden_AP,
])
self.AP_bn = BatchNormalization(**self.batch_normalize_kwargs)
self.W_PP = init([self.n_pair_input_feat, self.n_hidden_PP])
self.b_PP = backend.zeros(shape=[
self.n_hidden_PP,
])
self.PP_bn = BatchNormalization(**self.batch_normalize_kwargs)
self.W_P = init([self.n_hidden_P, self.n_pair_output_feat])
self.b_P = backend.zeros(shape=[
self.n_pair_output_feat,
])
self.P_bn = BatchNormalization(**self.batch_normalize_kwargs)
self.built = True
def call(self, inputs: List) -> List:
"""Creates weave tensors.
Parameters
----------
inputs: List
Should contain 4 tensors [atom_features, pair_features, pair_split,
atom_to_pair]
"""
atom_features = inputs[0]
pair_features = inputs[1]
pair_split = inputs[2]
atom_to_pair = inputs[3]
activation = self.activation_fn
AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
if self.batch_normalize:
AA = self.AA_bn(AA)
AA = activation(AA)
PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
if self.batch_normalize:
PA = self.PA_bn(PA)
PA = activation(PA)
PA = tf.math.segment_sum(PA, pair_split)
A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
if self.batch_normalize:
A = self.A_bn(A)
A = activation(A)
if self.update_pair:
# Note that AP_ij and AP_ji share the same self.AP_bn batch
# normalization
AP_ij = tf.matmul(
tf.reshape(tf.gather(atom_features, atom_to_pair),
[-1, 2 * self.n_atom_input_feat]),
self.W_AP) + self.b_AP
if self.batch_normalize:
AP_ij = self.AP_bn(AP_ij)
AP_ij = activation(AP_ij)
AP_ji = tf.matmul(
tf.reshape(
tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
[-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
if self.batch_normalize:
AP_ji = self.AP_bn(AP_ji)
AP_ji = activation(AP_ji)
PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
if self.batch_normalize:
PP = self.PP_bn(PP)
PP = activation(PP)
P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1),
self.W_P) + self.b_P
if self.batch_normalize:
P = self.P_bn(P)
P = activation(P)
else:
P = pair_features
return [A, P]
class WeaveGather(tf.keras.layers.Layer):
"""Implements the weave-gathering section of weave convolutions.
Implements the gathering layer from [1]_. The weave gathering layer gathers
per-atom features to create a molecule-level fingerprint in a weave
convolutional network. This layer can also performs Gaussian histogram
expansion as detailed in [1]_. Note that the gathering function here is
simply addition as in [1]_>
Examples
--------
This layer expects 2 inputs in a list of the form `[atom_features,
pair_features]`. We'll walk through the structure
of these inputs. Let's start with some basic definitions.
>>> import deepchem as dc
>>> import numpy as np
Suppose you have a batch of molecules
>>> smiles = ["CCC", "C"]
Note that there are 4 atoms in total in this system. This layer expects its
input molecules to be batched together.
>>> total_n_atoms = 4
Let's suppose that we have `n_atom_feat` features per atom.
>>> n_atom_feat = 75
Then conceptually, `atom_feat` is the array of shape `(total_n_atoms,
n_atom_feat)` of atomic features. For simplicity, let's just go with a
random such matrix.
>>> atom_feat = np.random.rand(total_n_atoms, n_atom_feat)
We then need to provide a mapping of indices to the atoms they belong to. In
ours case this would be
>>> atom_split = np.array([0, 0, 0, 1])
Let's now define the actual layer
>>> gather = WeaveGather(batch_size=2, n_input=n_atom_feat)
>>> output_molecules = gather([atom_feat, atom_split])
>>> len(output_molecules)
2
References
----------
.. [1] <NAME>, et al. "Molecular graph convolutions: moving beyond
fingerprints." Journal of computer-aided molecular design 30.8 (2016):
595-608.
Note
----
This class requires `tensorflow_probability` to be installed.
"""
def __init__(self,
batch_size: int,
n_input: int = 128,
gaussian_expand: bool = True,
compress_post_gaussian_expansion: bool = False,
init: str = 'glorot_uniform',
activation: str = 'tanh',
**kwargs):
"""
Parameters
----------
batch_size: int
number of molecules in a batch
n_input: int, optional (default 128)
number of features for each input molecule
gaussian_expand: boolean, optional (default True)
Whether to expand each dimension of atomic features by gaussian histogram
compress_post_gaussian_expansion: bool, optional (default False)
If True, compress the results of the Gaussian expansion back to the
original dimensions of the input by using a linear layer with specified
activation function. Note that this compression was not in the original
paper, but was present in the original DeepChem implementation so is
left present for backwards compatibility.
init: str, optional (default 'glorot_uniform')
Weight initialization for filters if `compress_post_gaussian_expansion`
is True.
activation: str, optional (default 'tanh')
Activation function applied for filters if
`compress_post_gaussian_expansion` is True. Should be recognizable by
`tf.keras.activations`.
"""
try:
import tensorflow_probability as tfp # noqa: F401
except ModuleNotFoundError:
raise ImportError(
"This class requires tensorflow-probability to be installed.")
super(WeaveGather, self).__init__(**kwargs)
self.n_input = n_input
self.batch_size = batch_size
self.gaussian_expand = gaussian_expand
self.compress_post_gaussian_expansion = compress_post_gaussian_expansion
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
def get_config(self):
config = super(WeaveGather, self).get_config()
config['batch_size'] = self.batch_size
config['n_input'] = self.n_input
config['gaussian_expand'] = self.gaussian_expand
config['init'] = self.init
config['activation'] = self.activation
config[
'compress_post_gaussian_expansion'] = self.compress_post_gaussian_expansion
return config
def build(self, input_shape):
if self.compress_post_gaussian_expansion:
def init(input_shape):
return self.add_weight(name='kernel',
shape=(input_shape[0], input_shape[1]),
initializer=self.init,
trainable=True)
self.W = init([self.n_input * 11, self.n_input])
self.b = backend.zeros(shape=[self.n_input])
self.built = True
def call(self, inputs: List) -> List:
"""Creates weave tensors.
Parameters
----------
inputs: List
Should contain 2 tensors [atom_features, atom_split]
Returns
-------
output_molecules: List
Each entry in this list is of shape `(self.n_inputs,)`
"""
outputs = inputs[0]
atom_split = inputs[1]
if self.gaussian_expand:
outputs = self.gaussian_histogram(outputs)
output_molecules = tf.math.segment_sum(outputs, atom_split)
if self.compress_post_gaussian_expansion:
output_molecules = tf.matmul(output_molecules, self.W) + self.b
output_molecules = self.activation_fn(output_molecules)
return output_molecules
def gaussian_histogram(self, x):
"""Expands input into a set of gaussian histogram bins.
Parameters
----------
x: tf.Tensor
Of shape `(N, n_feat)`
Examples
--------
This method uses 11 bins spanning portions of a Gaussian with zero mean
and unit standard deviation.
>>> gaussian_memberships = [(-1.645, 0.283), (-1.080, 0.170),
... (-0.739, 0.134), (-0.468, 0.118),
... (-0.228, 0.114), (0., 0.114),
... (0.228, 0.114), (0.468, 0.118),
... (0.739, 0.134), (1.080, 0.170),
... (1.645, 0.283)]
We construct a Gaussian at `gaussian_memberships[i][0]` with standard
deviation `gaussian_memberships[i][1]`. Each feature in `x` is assigned
the probability of falling in each Gaussian, and probabilities are
normalized across the 11 different Gaussians.
Returns
-------
outputs: tf.Tensor
Of shape `(N, 11*n_feat)`
"""
import tensorflow_probability as tfp
gaussian_memberships = [(-1.645, 0.283), (-1.080, 0.170),
(-0.739, 0.134), (-0.468, 0.118),
(-0.228, 0.114), (0., 0.114), (0.228, 0.114),
(0.468, 0.118), (0.739, 0.134), (1.080, 0.170),
(1.645, 0.283)]
dist = [
tfp.distributions.Normal(p[0], p[1]) for p in gaussian_memberships
]
dist_max = [dist[i].prob(gaussian_memberships[i][0]) for i in range(11)]
outputs = [dist[i].prob(x) / dist_max[i] for i in range(11)]
outputs = tf.stack(outputs, axis=2)
outputs = outputs / tf.reduce_sum(outputs, axis=2, keepdims=True)
outputs = tf.reshape(outputs, [-1, self.n_input * 11])
return outputs
class DTNNEmbedding(tf.keras.layers.Layer):
def __init__(self,
n_embedding=30,
periodic_table_length=30,
init='glorot_uniform',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
periodic_table_length: int, optional
Length of embedding, 83=Bi
init: str, optional
Weight initialization for filters.
"""
super(DTNNEmbedding, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.periodic_table_length = periodic_table_length
self.init = init # Set weight initialization
def get_config(self):
config = super(DTNNEmbedding, self).get_config()
config['n_embedding'] = self.n_embedding
config['periodic_table_length'] = self.periodic_table_length
config['init'] = self.init
return config
def build(self, input_shape):
def init(input_shape):
return self.add_weight(name='kernel',
shape=(input_shape[0], input_shape[1]),
initializer=self.init,
trainable=True)
self.embedding_list = init(
[self.periodic_table_length, self.n_embedding])
self.built = True
def call(self, inputs):
"""
parent layers: atom_number
"""
atom_number = inputs
return tf.nn.embedding_lookup(self.embedding_list, atom_number)
class DTNNStep(tf.keras.layers.Layer):
def __init__(self,
n_embedding=30,
n_distance=100,
n_hidden=60,
init='glorot_uniform',
activation='tanh',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
n_distance: int, optional
granularity of distance matrix
n_hidden: int, optional
Number of nodes in hidden layer
init: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied
"""
super(DTNNStep, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.n_distance = n_distance
self.n_hidden = n_hidden
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
def get_config(self):
config = super(DTNNStep, self).get_config()
config['n_embedding'] = self.n_embedding
config['n_distance'] = self.n_distance
config['n_hidden'] = self.n_hidden
config['activation'] = self.activation
config['init'] = self.init
return config
def build(self, input_shape):
def init(input_shape):
return self.add_weight(name='kernel',
shape=(input_shape[0], input_shape[1]),
initializer=self.init,
trainable=True)
self.W_cf = init([self.n_embedding, self.n_hidden])
self.W_df = init([self.n_distance, self.n_hidden])
self.W_fc = init([self.n_hidden, self.n_embedding])
self.b_cf = backend.zeros(shape=[
self.n_hidden,
])
self.b_df = backend.zeros(shape=[
self.n_hidden,
])
self.built = True
def call(self, inputs):
"""
parent layers: atom_features, distance, distance_membership_i, distance_membership_j
"""
atom_features = inputs[0]
distance = inputs[1]
distance_membership_i = inputs[2]
distance_membership_j = inputs[3]
distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
outputs = tf.multiply(
distance_hidden,
tf.gather(atom_features_hidden, distance_membership_j))
# for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
# and embeddings of atom j(both gone through a hidden layer)
outputs = tf.matmul(outputs, self.W_fc)
outputs = self.activation_fn(outputs)
output_ii = tf.multiply(self.b_df, atom_features_hidden)
output_ii = tf.matmul(output_ii, self.W_fc)
output_ii = self.activation_fn(output_ii)
# for atom i, sum the influence from all other atom j in the molecule
return tf.math.segment_sum(
outputs, distance_membership_i) - output_ii + atom_features
class DTNNGather(tf.keras.layers.Layer):
def __init__(self,
n_embedding=30,
n_outputs=100,
layer_sizes=[100],
output_activation=True,
init='glorot_uniform',
activation='tanh',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
n_outputs: int, optional
Number of features for each molecule(output)
layer_sizes: list of int, optional(default=[1000])
Structure of hidden layer(s)
init: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied
"""
super(DTNNGather, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.n_outputs = n_outputs
self.layer_sizes = layer_sizes
self.output_activation = output_activation
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
def get_config(self):
config = super(DTNNGather, self).get_config()
config['n_embedding'] = self.n_embedding
config['n_outputs'] = self.n_outputs
config['layer_sizes'] = self.layer_sizes
config['output_activation'] = self.output_activation
config['init'] = self.init
config['activation'] = self.activation
return config
def build(self, input_shape):
self.W_list = []
self.b_list = []
def init(input_shape):
return self.add_weight(name='kernel',
shape=(input_shape[0], input_shape[1]),
initializer=self.init,
trainable=True)
prev_layer_size = self.n_embedding
for i, layer_size in enumerate(self.layer_sizes):
self.W_list.append(init([prev_layer_size, layer_size]))
self.b_list.append(backend.zeros(shape=[
layer_size,
]))
prev_layer_size = layer_size
self.W_list.append(init([prev_layer_size, self.n_outputs]))
self.b_list.append(backend.zeros(shape=[
self.n_outputs,
]))
self.built = True
def call(self, inputs):
"""
parent layers: atom_features, atom_membership
"""
output = inputs[0]
atom_membership = inputs[1]
for i, W in enumerate(self.W_list[:-1]):
output = tf.matmul(output, W) + self.b_list[i]
output = self.activation_fn(output)
output = tf.matmul(output, self.W_list[-1]) + self.b_list[-1]
if self.output_activation:
output = self.activation_fn(output)
return tf.math.segment_sum(output, atom_membership)
def _DAGgraph_step(batch_inputs, W_list, b_list, activation_fn, dropouts,
training):
outputs = batch_inputs
for idw, (dropout, W) in enumerate(zip(dropouts, W_list)):
outputs = tf.nn.bias_add(tf.matmul(outputs, W), b_list[idw])
outputs = activation_fn(outputs)
if dropout is not None:
outputs = dropout(outputs, training=training)
return outputs
class DAGLayer(tf.keras.layers.Layer):
"""DAG computation layer.
This layer generates a directed acyclic graph for each atom
in a molecule. This layer is based on the algorithm from the
following paper:
Lusci, Alessandro, <NAME>, and <NAME>. "Deep architectures and deep learning in chemoinformatics: the prediction of aqueous solubility for drug-like molecules." Journal of chemical information and modeling 53.7 (2013): 1563-1575.
This layer performs a sort of inward sweep. Recall that for
each atom, a DAG is generated that "points inward" to that
atom from the undirected molecule graph. Picture this as
"picking up" the atom as the vertex and using the natural
tree structure that forms from gravity. The layer "sweeps
inwards" from the leaf nodes of the DAG upwards to the
atom. This is batched so the transformation is done for
each atom.
"""
def __init__(self,
n_graph_feat=30,
n_atom_feat=75,
max_atoms=50,
layer_sizes=[100],
init='glorot_uniform',
activation='relu',
dropout=None,
batch_size=64,
**kwargs):
"""
Parameters
----------
n_graph_feat: int, optional
Number of features for each node(and the whole grah).
n_atom_feat: int, optional
Number of features listed per atom.
max_atoms: int, optional
Maximum number of atoms in molecules.
layer_sizes: list of int, optional(default=[100])
List of hidden layer size(s):
length of this list represents the number of hidden layers,
and each element is the width of corresponding hidden layer.
init: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied.
dropout: float, optional
Dropout probability in hidden layer(s).
batch_size: int, optional
number of molecules in a batch.
"""
super(DAGLayer, self).__init__(**kwargs)
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
self.layer_sizes = layer_sizes
self.dropout = dropout
self.max_atoms = max_atoms
self.batch_size = batch_size
self.n_inputs = n_atom_feat + (self.max_atoms - 1) * n_graph_feat
# number of inputs each step
self.n_graph_feat = n_graph_feat
self.n_outputs = n_graph_feat
self.n_atom_feat = n_atom_feat
def get_config(self):
config = super(DAGLayer, self).get_config()
config['n_graph_feat'] = self.n_graph_feat
config['n_atom_feat'] = self.n_atom_feat
config['max_atoms'] = self.max_atoms
config['layer_sizes'] = self.layer_sizes
config['init'] = self.init
config['activation'] = self.activation
config['dropout'] = self.dropout
config['batch_size'] = self.batch_size
return config
def build(self, input_shape):
""""Construct internal trainable weights."""
self.W_list = []
self.b_list = []
self.dropouts = []
prev_layer_size = self.n_inputs
for layer_size in self.layer_sizes:
self.W_list.append(
self.add_weight(name='kernel',
shape=(prev_layer_size, layer_size),
initializer=self.init,
trainable=True))
self.b_list.append(
self.add_weight(name='bias',
shape=(layer_size,),
initializer='zeros',
trainable=True))
if self.dropout is not None and self.dropout > 0.0:
self.dropouts.append(Dropout(rate=self.dropout))
else:
self.dropouts.append(None)
prev_layer_size = layer_size
self.W_list.append(
self.add_weight(name='kernel',
shape=(prev_layer_size, self.n_outputs),
initializer=self.init,
trainable=True))
self.b_list.append(
self.add_weight(name='bias',
shape=(self.n_outputs,),
initializer='zeros',
trainable=True))
if self.dropout is not None and self.dropout > 0.0:
self.dropouts.append(Dropout(rate=self.dropout))
else:
self.dropouts.append(None)
self.built = True
def call(self, inputs, training=True):
"""
parent layers: atom_features, parents, calculation_orders, calculation_masks, n_atoms
"""
atom_features = inputs[0]
# each atom corresponds to a graph, which is represented by the `max_atoms*max_atoms` int32 matrix of index
# each gragh include `max_atoms` of steps(corresponding to rows) of calculating graph features
parents = tf.cast(inputs[1], dtype=tf.int32)
# target atoms for each step: (batch_size*max_atoms) * max_atoms
calculation_orders = inputs[2]
calculation_masks = inputs[3]
n_atoms = tf.squeeze(inputs[4])
graph_features = tf.zeros((self.max_atoms * self.batch_size,
self.max_atoms + 1, self.n_graph_feat))
for count in range(self.max_atoms):
# `count`-th step
# extracting atom features of target atoms: (batch_size*max_atoms) * n_atom_features
mask = calculation_masks[:, count]
current_round = tf.boolean_mask(calculation_orders[:, count], mask)
batch_atom_features = tf.gather(atom_features, current_round)
# generating index for graph features used in the inputs
stack1 = tf.reshape(
tf.stack([tf.boolean_mask(tf.range(n_atoms), mask)] *
(self.max_atoms - 1),
axis=1), [-1])
stack2 = tf.reshape(tf.boolean_mask(parents[:, count, 1:], mask),
[-1])
index = tf.stack([stack1, stack2], axis=1)
# extracting graph features for parents of the target atoms, then flatten
# shape: (batch_size*max_atoms) * [(max_atoms-1)*n_graph_features]
batch_graph_features = tf.reshape(
tf.gather_nd(graph_features, index),
[-1, (self.max_atoms - 1) * self.n_graph_feat])
# concat into the input tensor: (batch_size*max_atoms) * n_inputs
batch_inputs = tf.concat(
axis=1, values=[batch_atom_features, batch_graph_features])
# DAGgraph_step maps from batch_inputs to a batch of graph_features
# of shape: (batch_size*max_atoms) * n_graph_features
# representing the graph features of target atoms in each graph
batch_outputs = _DAGgraph_step(batch_inputs, self.W_list,
self.b_list, self.activation_fn,
self.dropouts, training)
# index for targe atoms
target_index = tf.stack([tf.range(n_atoms), parents[:, count, 0]],
axis=1)
target_index = tf.boolean_mask(target_index, mask)
graph_features = tf.tensor_scatter_nd_update(
graph_features, target_index, batch_outputs)
return batch_outputs
class DAGGather(tf.keras.layers.Layer):
def __init__(self,
n_graph_feat=30,
n_outputs=30,
max_atoms=50,
layer_sizes=[100],
init='glorot_uniform',
activation='relu',
dropout=None,
**kwargs):
"""DAG vector gathering layer
Parameters
----------
n_graph_feat: int, optional
Number of features for each atom.
n_outputs: int, optional
Number of features for each molecule.
max_atoms: int, optional
Maximum number of atoms in molecules.
layer_sizes: list of int, optional
List of hidden layer size(s):
length of this list represents the number of hidden layers,
and each element is the width of corresponding hidden layer.
init: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied.
dropout: float, optional
Dropout probability in the hidden layer(s).
"""
super(DAGGather, self).__init__(**kwargs)
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
self.layer_sizes = layer_sizes
self.dropout = dropout
self.max_atoms = max_atoms
self.n_graph_feat = n_graph_feat
self.n_outputs = n_outputs
def get_config(self):
config = super(DAGGather, self).get_config()
config['n_graph_feat'] = self.n_graph_feat
config['n_outputs'] = self.n_outputs
config['max_atoms'] = self.max_atoms
config['layer_sizes'] = self.layer_sizes
config['init'] = self.init
config['activation'] = self.activation
config['dropout'] = self.dropout
return config
def build(self, input_shape):
self.W_list = []
self.b_list = []
self.dropouts = []
prev_layer_size = self.n_graph_feat
for layer_size in self.layer_sizes:
self.W_list.append(
self.add_weight(name='kernel',
shape=(prev_layer_size, layer_size),
initializer=self.init,
trainable=True))
self.b_list.append(
self.add_weight(name='bias',
shape=(layer_size,),
initializer='zeros',
trainable=True))
if self.dropout is not None and self.dropout > 0.0:
self.dropouts.append(Dropout(rate=self.dropout))
else:
self.dropouts.append(None)
prev_layer_size = layer_size
self.W_list.append(
self.add_weight(name='kernel',
shape=(prev_layer_size, self.n_outputs),
initializer=self.init,
trainable=True))
self.b_list.append(
self.add_weight(name='bias',
shape=(self.n_outputs,),
initializer='zeros',
trainable=True))
if self.dropout is not None and self.dropout > 0.0:
self.dropouts.append(Dropout(rate=self.dropout))
else:
self.dropouts.append(None)
self.built = True
def call(self, inputs, training=True):
"""
parent layers: atom_features, membership
"""
atom_features = inputs[0]
membership = inputs[1]
# Extract atom_features
graph_features = tf.math.segment_sum(atom_features, membership)
# sum all graph outputs
return _DAGgraph_step(graph_features, self.W_list, self.b_list,
self.activation_fn, self.dropouts, training)
class MessagePassing(tf.keras.layers.Layer):
""" General class for MPNN
default structures built according to https://arxiv.org/abs/1511.06391 """
def __init__(self,
T,
message_fn='enn',
update_fn='gru',
n_hidden=100,
**kwargs):
"""
Parameters
----------
T: int
Number of message passing steps
message_fn: str, optional
message function in the model
update_fn: str, optional
update function in the model
n_hidden: int, optional
number of hidden units in the passing phase
"""
super(MessagePassing, self).__init__(**kwargs)
self.T = T
self.message_fn = message_fn
self.update_fn = update_fn
self.n_hidden = n_hidden
def get_config(self):
config = super(MessagePassing, self).get_config()
config['T'] = self.T
config['message_fn'] = self.message_fn
config['update_fn'] = self.update_fn
config['n_hidden'] = self.n_hidden
return config
def build(self, input_shape):
n_pair_features = int(input_shape[1][-1])
if self.message_fn == 'enn':
# Default message function: edge network, update function: GRU
# more options to be implemented
self.message_function = EdgeNetwork(n_pair_features, self.n_hidden)
if self.update_fn == 'gru':
self.update_function = GatedRecurrentUnit(self.n_hidden)
self.built = True
def call(self, inputs):
""" Perform T steps of message passing """
atom_features, pair_features, atom_to_pair = inputs
n_atom_features = atom_features.get_shape().as_list()[-1]
if n_atom_features < self.n_hidden:
pad_length = self.n_hidden - n_atom_features
out = tf.pad(atom_features, ((0, 0), (0, pad_length)),
mode='CONSTANT')
elif n_atom_features > self.n_hidden:
raise ValueError("Too large initial feature vector")
else:
out = atom_features
for i in range(self.T):
message = self.message_function([pair_features, out, atom_to_pair])
out = self.update_function([out, message])
return out
class EdgeNetwork(tf.keras.layers.Layer):
""" Submodule for Message Passing """
def __init__(self,
n_pair_features=8,
n_hidden=100,
init='glorot_uniform',
**kwargs):
super(EdgeNetwork, self).__init__(**kwargs)
self.n_pair_features = n_pair_features
self.n_hidden = n_hidden
self.init = init
def get_config(self):
config = super(EdgeNetwork, self).get_config()
config['n_pair_features'] = self.n_pair_features
config['n_hidden'] = self.n_hidden
config['init'] = self.init
return config
def build(self, input_shape):
def init(input_shape):
return self.add_weight(name='kernel',
shape=(input_shape[0], input_shape[1]),
initializer=self.init,
trainable=True)
n_pair_features = self.n_pair_features
n_hidden = self.n_hidden
self.W = init([n_pair_features, n_hidden * n_hidden])
self.b = backend.zeros(shape=(n_hidden * n_hidden,))
self.built = True
def call(self, inputs):
pair_features, atom_features, atom_to_pair = inputs
A = tf.nn.bias_add(tf.matmul(pair_features, self.W), self.b)
A = tf.reshape(A, (-1, self.n_hidden, self.n_hidden))
out = tf.expand_dims(tf.gather(atom_features, atom_to_pair[:, 1]), 2)
out = tf.squeeze(tf.matmul(A, out), axis=2)
return tf.math.segment_sum(out, atom_to_pair[:, 0])
class GatedRecurrentUnit(tf.keras.layers.Layer):
""" Submodule for Message Passing """
def __init__(self, n_hidden=100, init='glorot_uniform', **kwargs):
super(GatedRecurrentUnit, self).__init__(**kwargs)
self.n_hidden = n_hidden
self.init = init
def get_config(self):
config = super(GatedRecurrentUnit, self).get_config()
config['n_hidden'] = self.n_hidden
config['init'] = self.init
return config
def build(self, input_shape):
n_hidden = self.n_hidden
def init(input_shape):
return self.add_weight(name='kernel',
shape=(input_shape[0], input_shape[1]),
initializer=self.init,
trainable=True)
self.Wz = init([n_hidden, n_hidden])
self.Wr = init([n_hidden, n_hidden])
self.Wh = init([n_hidden, n_hidden])
self.Uz = init([n_hidden, n_hidden])
self.Ur = init([n_hidden, n_hidden])
self.Uh = init([n_hidden, n_hidden])
self.bz = backend.zeros(shape=(n_hidden,))
self.br = backend.zeros(shape=(n_hidden,))
self.bh = backend.zeros(shape=(n_hidden,))
self.built = True
def call(self, inputs):
z = tf.nn.sigmoid(
tf.matmul(inputs[1], self.Wz) + tf.matmul(inputs[0], self.Uz) +
self.bz)
r = tf.nn.sigmoid(
tf.matmul(inputs[1], self.Wr) + tf.matmul(inputs[0], self.Ur) +
self.br)
h = (1 - z) * tf.nn.tanh(
tf.matmul(inputs[1], self.Wh) + tf.matmul(inputs[0] * r, self.Uh) +
self.bh) + z * inputs[0]
return h
class SetGather(tf.keras.layers.Layer):
"""set2set gather layer for graph-based model
Models using this layer must set `pad_batches=True`.
"""
def __init__(self,
M,
batch_size,
n_hidden=100,
init='orthogonal',
**kwargs):
"""
Parameters
----------
M: int
Number of LSTM steps
batch_size: int
Number of samples in a batch(all batches must have same size)
n_hidden: int, optional
number of hidden units in the passing phase
"""
super(SetGather, self).__init__(**kwargs)
self.M = M
self.batch_size = batch_size
self.n_hidden = n_hidden
self.init = init
def get_config(self):
config = super(SetGather, self).get_config()
config['M'] = self.M
config['batch_size'] = self.batch_size
config['n_hidden'] = self.n_hidden
config['init'] = self.init
return config
def build(self, input_shape):
def init(input_shape):
return self.add_weight(name='kernel',
shape=(input_shape[0], input_shape[1]),
initializer=self.init,
trainable=True)
self.U = init((2 * self.n_hidden, 4 * self.n_hidden))
self.b = tf.Variable(np.concatenate(
(np.zeros(self.n_hidden), np.ones(self.n_hidden),
np.zeros(self.n_hidden), np.zeros(self.n_hidden))),
dtype=tf.float32)
self.built = True
def call(self, inputs):
"""Perform M steps of set2set gather,
Detailed descriptions in: https://arxiv.org/abs/1511.06391
"""
atom_features, atom_split = inputs
c = tf.zeros((self.batch_size, self.n_hidden))
h = tf.zeros((self.batch_size, self.n_hidden))
for i in range(self.M):
q_expanded = tf.gather(h, atom_split)
e = tf.reduce_sum(atom_features * q_expanded, 1)
e_mols = tf.dynamic_partition(e, atom_split, self.batch_size)
# Add another value(~-Inf) to prevent error in softmax
e_mols = [
tf.concat([e_mol, tf.constant([-1000.])], 0) for e_mol in e_mols
]
a = tf.concat([tf.nn.softmax(e_mol)[:-1] for e_mol in e_mols], 0)
r = tf.math.segment_sum(
tf.reshape(a, [-1, 1]) * atom_features, atom_split)
# Model using this layer must set pad_batches=True
q_star = tf.concat([h, r], axis=1)
h, c = self.LSTMStep(q_star, c)
return q_star
def LSTMStep(self, h, c, x=None):
# Perform one step of LSTM
z = tf.nn.bias_add(tf.matmul(h, self.U), self.b)
i = tf.nn.sigmoid(z[:, :self.n_hidden])
f = tf.nn.sigmoid(z[:, self.n_hidden:2 * self.n_hidden])
o = tf.nn.sigmoid(z[:, 2 * self.n_hidden:3 * self.n_hidden])
z3 = z[:, 3 * self.n_hidden:]
c_out = f * c + i * tf.nn.tanh(z3)
h_out = o * tf.nn.tanh(c_out)
return h_out, c_out
<file_sep>import rdkit
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import pandas as pd
import gzip
import os
def main() :
sdf_root_path = "/media/data/pubchem/SDF"
for path, dirs, filenames in os.walk(sdf_root_path) :
for filename in filenames:
filepath = os.path.join(sdf_root_path, filename)
# This SDF file fails to parse with RDKit on Ubuntu 16.04
if "Compound_102125001_102150000" in filename:
continue
with gzip.open(filepath, 'rb') as myfile:
suppl = Chem.ForwardSDMolSupplier(myfile)
for mol in suppl:
if not mol:
continue
try :
info = {}
rdMolDescriptors.GetMorganFingerprint(mol,1,bitInfo=info)
keys = info.keys()
keys_list = list(keys)
for k in keys_list:
print(k,end=' ')
print()
except Exception:
pass
if __name__ == "__main__" :
main()
<file_sep># Message Passing Neural Networks
MPNNs aim to generalize molecular machine learning models that operate on graph-valued inputs. Graph-Convolutions [https://arxiv.org/abs/1509.09292] and Weaves \
[https://arxiv.org/abs/1603.00856] (among others) can be recast into this framework [https://arxiv.org/abs/1704.01212]
The premise is that the featurization of arbitrary chemical multigraphs can be broken down into a message function, vertex-update function, and a readout functi\
on that is invariant to graph isomorphisms. All functions must be subdifferentiable to preserve gradient-flow and ideally are learnable as well
Models of this style introduce an additional parameter **T**, which is the number of iterations for the message-passing stage. Values greater than 4 don't seem \
to improve performance.
##MPNN-S Variant
MPNNs do provide a nice mathematical framework that can capture modern molecular machine learning algorithms we work with today. One criticism of this algorithm class is that training is slow, due to the sheer number of training iterations required for convergence - at batch size 20 on QM9, the MPNN authors trained for 540 epochs.
This can be improved significantly by using batch normalization, or more interestingly, the new SELU activation [https://arxiv.org/pdf/1706.02515.pdf]. In order to use SELUs straight through the system, we dropped the GRU unit [https://arxiv.org/pdf/1412.3555.pdf] the authors used in favor of a SELU activated fully-connected neural network for each time step **T**. This modified approach now achieves peak performance in as little as 60 epochs on most molecular machine learning datasets.
MPNN-S sets new records on the Delaney & PPB datasets:
| Dataset | Num Examples | MP-DNN Val R2 [Scaffold Split] | GraphConv Val R2 [Scaffold Split] |
| ------ | ------ | ------ | ------ |
| Delaney | 1102 | **.820** | .606 |
| PPB | 1600 | **.427** | .381 |
| Clearance | 838 | **.32** | .28 |
## Run Code
```sh
$ python mpnn.py
```
License
----
MIT
<file_sep>"""
Topological fingerprints.
"""
from typing import Dict
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class CircularFingerprint(MolecularFeaturizer):
"""Circular (Morgan) fingerprints.
Extended Connectivity Circular Fingerprints compute a bag-of-words style
representation of a molecule by breaking it into local neighborhoods and
hashing into a bit vector of the specified size. It is used specifically
for structure-activity modelling. See [1]_ for more details.
References
----------
.. [1] Rogers, David, and <NAME>. "Extended-connectivity fingerprints."
Journal of chemical information and modeling 50.5 (2010): 742-754.
Note
----
This class requires RDKit to be installed.
Examples
--------
>>> import deepchem as dc
>>> from rdkit import Chem
>>> smiles = ['C1=CC=CC=C1']
>>> # Example 1: (size = 2048, radius = 4)
>>> featurizer = dc.feat.CircularFingerprint(size=2048, radius=4)
>>> features = featurizer.featurize(smiles)
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(2048,)
>>> # Example 2: (size = 2048, radius = 4, sparse = True, smiles = True)
>>> featurizer = dc.feat.CircularFingerprint(size=2048, radius=8,
... sparse=True, smiles=True)
>>> features = featurizer.featurize(smiles)
>>> type(features[0]) # dict containing fingerprints
<class 'dict'>
"""
def __init__(self,
radius: int = 2,
size: int = 2048,
chiral: bool = False,
bonds: bool = True,
features: bool = False,
sparse: bool = False,
smiles: bool = False,
is_counts_based: bool = False):
"""
Parameters
----------
radius: int, optional (default 2)
Fingerprint radius.
size: int, optional (default 2048)
Length of generated bit vector.
chiral: bool, optional (default False)
Whether to consider chirality in fingerprint generation.
bonds: bool, optional (default True)
Whether to consider bond order in fingerprint generation.
features: bool, optional (default False)
Whether to use feature information instead of atom information; see
RDKit docs for more info.
sparse: bool, optional (default False)
Whether to return a dict for each molecule containing the sparse
fingerprint.
smiles: bool, optional (default False)
Whether to calculate SMILES strings for fragment IDs (only applicable
when calculating sparse fingerprints).
is_counts_based: bool, optional (default False)
Whether to generates a counts-based fingerprint.
"""
self.radius = radius
self.size = size
self.chiral = chiral
self.bonds = bonds
self.features = features
self.sparse = sparse
self.smiles = smiles
self.is_counts_based = is_counts_based
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""Calculate circular fingerprint.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
A numpy array of circular fingerprint.
"""
try:
from rdkit import Chem, DataStructs
from rdkit.Chem import rdMolDescriptors
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.sparse:
info: Dict = {}
fp = rdMolDescriptors.GetMorganFingerprint(
datapoint,
self.radius,
useChirality=self.chiral,
useBondTypes=self.bonds,
useFeatures=self.features,
bitInfo=info)
fp = fp.GetNonzeroElements() # convert to a dict
# generate SMILES for fragments
if self.smiles:
fp_smiles = {}
for fragment_id, count in fp.items():
root, radius = info[fragment_id][0]
env = Chem.FindAtomEnvironmentOfRadiusN(
datapoint, radius, root)
frag = Chem.PathToSubmol(datapoint, env)
smiles = Chem.MolToSmiles(frag)
fp_smiles[fragment_id] = {'smiles': smiles, 'count': count}
fp = fp_smiles
else:
if self.is_counts_based:
fp_sparse = rdMolDescriptors.GetHashedMorganFingerprint(
datapoint,
self.radius,
nBits=self.size,
useChirality=self.chiral,
useBondTypes=self.bonds,
useFeatures=self.features)
fp = np.zeros(
(self.size,), dtype=float
) # initialise numpy array of zeros (shape: (required size,))
DataStructs.ConvertToNumpyArray(fp_sparse, fp)
else:
fp = rdMolDescriptors.GetMorganFingerprintAsBitVect(
datapoint,
self.radius,
nBits=self.size,
useChirality=self.chiral,
useBondTypes=self.bonds,
useFeatures=self.features)
fp = np.asarray(fp, dtype=float)
return fp
def __hash__(self):
return hash((self.radius, self.size, self.chiral, self.bonds,
self.features, self.sparse, self.smiles))
def __eq__(self, other):
if not isinstance(self, other.__class__):
return False
return self.radius == other.radius and \
self.size == other.size and \
self.chiral == other.chiral and \
self.bonds == other.bonds and \
self.features == other.features and \
self.sparse == other.sparse and \
self.smiles == other.smiles
<file_sep>import tempfile
import os
import deepchem as dc
import numpy as np
def test_flattening_with_csv_load_withtask():
fin = tempfile.NamedTemporaryFile(mode='w', delete=False)
fin.write("smiles,endpoint\nc1ccccc1,1")
fin.close()
loader = dc.data.CSVLoader(
["endpoint"],
feature_field="smiles",
featurizer=dc.feat.ConvMolFeaturizer(per_atom_fragmentation=True))
frag_dataset = loader.create_dataset(fin.name)
transformer = dc.trans.FlatteningTransformer(dataset=frag_dataset)
frag_dataset = transformer.transform(frag_dataset)
assert len(frag_dataset) == 6
assert np.shape(frag_dataset.y) == (6, 1
) # y should be expanded up to X shape
assert np.shape(frag_dataset.w) == (6, 1
) # w should be expanded up to X shape
def test_flattening_with_csv_load_notask():
fin = tempfile.NamedTemporaryFile(mode='w', delete=False)
fin.write("smiles,endpoint\nc1ccccc1,1")
fin.close()
loader = dc.data.CSVLoader(
[],
feature_field="smiles",
featurizer=dc.feat.ConvMolFeaturizer(per_atom_fragmentation=True))
frag_dataset = loader.create_dataset(fin.name)
transformer = dc.trans.FlatteningTransformer(dataset=frag_dataset)
frag_dataset = transformer.transform(frag_dataset)
assert len(frag_dataset) == 6
def test_flattening_with_sdf_load_withtask():
cur_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.ConvMolFeaturizer(per_atom_fragmentation=True)
loader = dc.data.SDFLoader(["LogP(RRCK)"],
featurizer=featurizer,
sanitize=True)
dataset = loader.create_dataset(
os.path.join(cur_dir, "membrane_permeability.sdf"))
transformer = dc.trans.FlatteningTransformer(dataset=dataset)
frag_dataset = transformer.transform(dataset)
assert len(frag_dataset) == 98
assert np.shape(frag_dataset.y) == (98, 1
) # y should be expanded up to X shape
assert np.shape(frag_dataset.w) == (98, 1
) # w should be expanded up to X shape
def test_flattening_with_sdf_load_notask():
cur_dir = os.path.dirname(os.path.realpath(__file__))
featurizer = dc.feat.ConvMolFeaturizer(per_atom_fragmentation=True)
loader = dc.data.SDFLoader([], featurizer=featurizer, sanitize=True)
dataset = loader.create_dataset(
os.path.join(cur_dir, "membrane_permeability.sdf"))
transformer = dc.trans.FlatteningTransformer(dataset=dataset)
frag_dataset = transformer.transform(dataset)
assert len(frag_dataset) == 98
<file_sep>from collections.abc import Sequence as SequenceCollection
import deepchem as dc
import numpy as np
import tensorflow as tf
from typing import List, Union, Tuple, Iterable, Dict, Optional
from deepchem.utils.typing import OneOrMany, LossFn, ActivationFn
from deepchem.data import Dataset, pad_features
from deepchem.feat.mol_graphs import ConvMol
from deepchem.metrics import to_one_hot
from deepchem.models import KerasModel, layers
from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy, Loss
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Activation, BatchNormalization
class TrimGraphOutput(tf.keras.layers.Layer):
"""Trim the output to the correct number of samples.
GraphGather always outputs fixed size batches. This layer trims the output
to the number of samples that were in the actual input tensors.
"""
def __init__(self, **kwargs):
super(TrimGraphOutput, self).__init__(**kwargs)
def call(self, inputs):
n_samples = tf.squeeze(inputs[1])
return inputs[0][0:n_samples]
class WeaveModel(KerasModel):
"""Implements Google-style Weave Graph Convolutions
This model implements the Weave style graph convolutions
from [1]_.
The biggest difference between WeaveModel style convolutions
and GraphConvModel style convolutions is that Weave
convolutions model bond features explicitly. This has the
side effect that it needs to construct a NxN matrix
explicitly to model bond interactions. This may cause
scaling issues, but may possibly allow for better modeling
of subtle bond effects.
Note that [1]_ introduces a whole variety of different architectures for
Weave models. The default settings in this class correspond to the W2N2
variant from [1]_ which is the most commonly used variant..
Examples
--------
Here's an example of how to fit a `WeaveModel` on a tiny sample dataset.
>>> import numpy as np
>>> import deepchem as dc
>>> featurizer = dc.feat.WeaveFeaturizer()
>>> X = featurizer(["C", "CC"])
>>> y = np.array([1, 0])
>>> dataset = dc.data.NumpyDataset(X, y)
>>> model = dc.models.WeaveModel(n_tasks=1, n_weave=2, fully_connected_layer_sizes=[2000, 1000], mode="classification")
>>> loss = model.fit(dataset)
Note
----
In general, the use of batch normalization can cause issues with NaNs. If
you're having trouble with NaNs while using this model, consider setting
`batch_normalize_kwargs={"trainable": False}` or turning off batch
normalization entirely with `batch_normalize=False`.
References
----------
.. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond
fingerprints." Journal of computer-aided molecular design 30.8 (2016):
595-608.
"""
def __init__(self,
n_tasks: int,
n_atom_feat: OneOrMany[int] = 75,
n_pair_feat: OneOrMany[int] = 14,
n_hidden: int = 50,
n_graph_feat: int = 128,
n_weave: int = 2,
fully_connected_layer_sizes: List[int] = [2000, 100],
conv_weight_init_stddevs: OneOrMany[float] = 0.03,
weight_init_stddevs: OneOrMany[float] = 0.01,
bias_init_consts: OneOrMany[float] = 0.0,
weight_decay_penalty: float = 0.0,
weight_decay_penalty_type: str = "l2",
dropouts: OneOrMany[float] = 0.25,
final_conv_activation_fn: Optional[ActivationFn] = tf.nn.tanh,
activation_fns: OneOrMany[ActivationFn] = tf.nn.relu,
batch_normalize: bool = True,
batch_normalize_kwargs: Dict = {
"renorm": True,
"fused": False
},
gaussian_expand: bool = True,
compress_post_gaussian_expansion: bool = False,
mode: str = "classification",
n_classes: int = 2,
batch_size: int = 100,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
n_atom_feat: int, optional (default 75)
Number of features per atom. Note this is 75 by default and should be 78
if chirality is used by `WeaveFeaturizer`.
n_pair_feat: int, optional (default 14)
Number of features per pair of atoms.
n_hidden: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
n_graph_feat: int, optional (default 128)
Number of output features for each molecule(graph)
n_weave: int, optional (default 2)
The number of weave layers in this model.
fully_connected_layer_sizes: list (default `[2000, 100]`)
The size of each dense layer in the network. The length of
this list determines the number of layers.
conv_weight_init_stddevs: list or float (default 0.03)
The standard deviation of the distribution to use for weight
initialization of each convolutional layer. The length of this lisst
should equal `n_weave`. Alternatively, this may be a single value instead
of a list, in which case the same value is used for each layer.
weight_init_stddevs: list or float (default 0.01)
The standard deviation of the distribution to use for weight
initialization of each fully connected layer. The length of this list
should equal len(layer_sizes). Alternatively this may be a single value
instead of a list, in which case the same value is used for every layer.
bias_init_consts: list or float (default 0.0)
The value to initialize the biases in each fully connected layer. The
length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in
which case the same value is used for every layer.
weight_decay_penalty: float (default 0.0)
The magnitude of the weight decay penalty to use
weight_decay_penalty_type: str (default "l2")
The type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float (default 0.25)
The dropout probablity to use for each fully connected layer. The length of this list
should equal len(layer_sizes). Alternatively this may be a single value
instead of a list, in which case the same value is used for every layer.
final_conv_activation_fn: Optional[ActivationFn] (default `tf.nn.tanh`)
The Tensorflow activation funcntion to apply to the final
convolution at the end of the weave convolutions. If `None`, then no
activate is applied (hence linear).
activation_fns: list or object (default `tf.nn.relu`)
The Tensorflow activation function to apply to each fully connected layer. The length
of this list should equal len(layer_sizes). Alternatively this may be a
single value instead of a list, in which case the same value is used for
every layer.
batch_normalize: bool, optional (default True)
If this is turned on, apply batch normalization before applying
activation functions on convolutional and fully connected layers.
batch_normalize_kwargs: Dict, optional (default `{"renorm"=True, "fused": False}`)
Batch normalization is a complex layer which has many potential
argumentswhich change behavior. This layer accepts user-defined
parameters which are passed to all `BatchNormalization` layers in
`WeaveModel`, `WeaveLayer`, and `WeaveGather`.
gaussian_expand: boolean, optional (default True)
Whether to expand each dimension of atomic features by gaussian
histogram
compress_post_gaussian_expansion: bool, optional (default False)
If True, compress the results of the Gaussian expansion back to the
original dimensions of the input.
mode: str (default "classification")
Either "classification" or "regression" for type of model.
n_classes: int (default 2)
Number of classes to predict (only used in classification mode)
batch_size: int (default 100)
Batch size used by this model for training.
"""
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
if not isinstance(n_atom_feat, SequenceCollection):
n_atom_feat = [n_atom_feat] * n_weave
if not isinstance(n_pair_feat, SequenceCollection):
n_pair_feat = [n_pair_feat] * n_weave
n_layers = len(fully_connected_layer_sizes)
if not isinstance(conv_weight_init_stddevs, SequenceCollection):
conv_weight_init_stddevs = [conv_weight_init_stddevs] * n_weave
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
if weight_decay_penalty != 0.0:
if weight_decay_penalty_type == 'l1':
regularizer = tf.keras.regularizers.l1(weight_decay_penalty)
else:
regularizer = tf.keras.regularizers.l2(weight_decay_penalty)
else:
regularizer = None
self.n_tasks = n_tasks
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
self.n_hidden = n_hidden
self.n_graph_feat = n_graph_feat
self.mode = mode
self.n_classes = n_classes
# Build the model.
atom_features = Input(shape=(self.n_atom_feat[0],))
pair_features = Input(shape=(self.n_pair_feat[0],))
pair_split = Input(shape=tuple(), dtype=tf.int32)
atom_split = Input(shape=tuple(), dtype=tf.int32)
atom_to_pair = Input(shape=(2,), dtype=tf.int32)
inputs = [atom_features, pair_features, pair_split, atom_to_pair]
for ind in range(n_weave):
n_atom = self.n_atom_feat[ind]
n_pair = self.n_pair_feat[ind]
if ind < n_weave - 1:
n_atom_next = self.n_atom_feat[ind + 1]
n_pair_next = self.n_pair_feat[ind + 1]
else:
n_atom_next = n_hidden
n_pair_next = n_hidden
weave_layer_ind_A, weave_layer_ind_P = layers.WeaveLayer(
n_atom_input_feat=n_atom,
n_pair_input_feat=n_pair,
n_atom_output_feat=n_atom_next,
n_pair_output_feat=n_pair_next,
init=tf.keras.initializers.TruncatedNormal(
stddev=conv_weight_init_stddevs[ind]),
batch_normalize=batch_normalize)(inputs)
inputs = [
weave_layer_ind_A, weave_layer_ind_P, pair_split, atom_to_pair
]
# Final atom-layer convolution. Note this differs slightly from the paper
# since we use a tanh activation as default. This seems necessary for numerical
# stability.
dense1 = Dense(self.n_graph_feat,
activation=final_conv_activation_fn)(weave_layer_ind_A)
if batch_normalize:
dense1 = BatchNormalization(**batch_normalize_kwargs)(dense1)
weave_gather = layers.WeaveGather(
batch_size,
n_input=self.n_graph_feat,
gaussian_expand=gaussian_expand,
compress_post_gaussian_expansion=compress_post_gaussian_expansion)(
[dense1, atom_split])
if n_layers > 0:
# Now fully connected layers
input_layer = weave_gather
for layer_size, weight_stddev, bias_const, dropout, activation_fn in zip(
fully_connected_layer_sizes, weight_init_stddevs,
bias_init_consts, dropouts, activation_fns):
layer = Dense(
layer_size,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_stddev),
bias_initializer=tf.constant_initializer(value=bias_const),
kernel_regularizer=regularizer)(input_layer)
if dropout > 0.0:
layer = Dropout(rate=dropout)(layer)
if batch_normalize:
# Should this allow for training?
layer = BatchNormalization(**batch_normalize_kwargs)(layer)
layer = Activation(activation_fn)(layer)
input_layer = layer
output = input_layer
else:
output = weave_gather
n_tasks = self.n_tasks
if self.mode == 'classification':
n_classes = self.n_classes
logits = Reshape(
(n_tasks, n_classes))(Dense(n_tasks * n_classes)(output))
output = Softmax()(logits)
outputs = [output, logits]
output_types = ['prediction', 'loss']
loss: Loss = SoftmaxCrossEntropy()
else:
output = Dense(n_tasks)(output)
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(inputs=[
atom_features, pair_features, pair_split, atom_split, atom_to_pair
],
outputs=outputs)
super(WeaveModel, self).__init__(model,
loss,
output_types=output_types,
batch_size=batch_size,
**kwargs)
def compute_features_on_batch(self, X_b):
"""Compute tensors that will be input into the model from featurized representation.
The featurized input to `WeaveModel` is instances of `WeaveMol` created by
`WeaveFeaturizer`. This method converts input `WeaveMol` objects into
tensors used by the Keras implementation to compute `WeaveModel` outputs.
Parameters
----------
X_b: np.ndarray
A numpy array with dtype=object where elements are `WeaveMol` objects.
Returns
-------
atom_feat: np.ndarray
Of shape `(N_atoms, N_atom_feat)`.
pair_feat: np.ndarray
Of shape `(N_pairs, N_pair_feat)`. Note that `N_pairs` will depend on
the number of pairs being considered. If `max_pair_distance` is
`None`, then this will be `N_atoms**2`. Else it will be the number
of pairs within the specifed graph distance.
pair_split: np.ndarray
Of shape `(N_pairs,)`. The i-th entry in this array will tell you the
originating atom for this pair (the "source"). Note that pairs are
symmetric so for a pair `(a, b)`, both `a` and `b` will separately be
sources at different points in this array.
atom_split: np.ndarray
Of shape `(N_atoms,)`. The i-th entry in this array will be the molecule
with the i-th atom belongs to.
atom_to_pair: np.ndarray
Of shape `(N_pairs, 2)`. The i-th row in this array will be the array
`[a, b]` if `(a, b)` is a pair to be considered. (Note by symmetry, this
implies some other row will contain `[b, a]`.
"""
atom_feat = []
pair_feat = []
atom_split = []
atom_to_pair = []
pair_split = []
start = 0
for im, mol in enumerate(X_b):
n_atoms = mol.get_num_atoms()
# pair_edges is of shape (2, N)
pair_edges = mol.get_pair_edges()
# number of atoms in each molecule
atom_split.extend([im] * n_atoms)
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(pair_edges.T + start)
# Get starting pair atoms
pair_starts = pair_edges.T[:, 0]
# number of pairs for each atom
pair_split.extend(pair_starts + start)
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(mol.get_pair_features())
return (np.concatenate(atom_feat, axis=0),
np.concatenate(pair_feat, axis=0), np.array(pair_split),
np.array(atom_split), np.concatenate(atom_to_pair, axis=0))
def default_generator(
self,
dataset: Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
"""Convert a dataset into the tensors needed for learning.
Parameters
----------
dataset: `dc.data.Dataset`
Dataset to convert
epochs: int, optional (Default 1)
Number of times to walk over `dataset`
mode: str, optional (Default 'fit')
Ignored in this implementation.
deterministic: bool, optional (Default True)
Whether the dataset should be walked in a deterministic fashion
pad_batches: bool, optional (Default True)
If true, each returned batch will have size `self.batch_size`.
Returns
-------
Iterator which walks over the batches
"""
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None:
if self.mode == 'classification':
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
inputs = self.compute_features_on_batch(X_b)
yield (inputs, [y_b], [w_b])
class DTNNModel(KerasModel):
"""Deep Tensor Neural Networks
This class implements deep tensor neural networks as first defined in [1]_
References
----------
.. [1] Schütt, <NAME>., et al. "Quantum-chemical insights from deep
tensor neural networks." Nature communications 8.1 (2017): 1-8.
"""
def __init__(self,
n_tasks,
n_embedding=30,
n_hidden=100,
n_distance=100,
distance_min=-1,
distance_max=18,
output_activation=True,
mode="regression",
dropout=0.0,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
n_embedding: int, optional
Number of features per atom.
n_hidden: int, optional
Number of features for each molecule after DTNNStep
n_distance: int, optional
granularity of distance matrix
step size will be (distance_max-distance_min)/n_distance
distance_min: float, optional
minimum distance of atom pairs, default = -1 Angstorm
distance_max: float, optional
maximum distance of atom pairs, default = 18 Angstorm
mode: str
Only "regression" is currently supported.
dropout: float
the dropout probablity to use.
"""
if mode not in ['regression']:
raise ValueError("Only 'regression' mode is currently supported")
self.n_tasks = n_tasks
self.n_embedding = n_embedding
self.n_hidden = n_hidden
self.n_distance = n_distance
self.distance_min = distance_min
self.distance_max = distance_max
self.step_size = (distance_max - distance_min) / n_distance
self.steps = np.array(
[distance_min + i * self.step_size for i in range(n_distance)])
self.steps = np.expand_dims(self.steps, 0)
self.output_activation = output_activation
self.mode = mode
self.dropout = dropout
# Build the model.
atom_number = Input(shape=tuple(), dtype=tf.int32)
distance = Input(shape=(self.n_distance,))
atom_membership = Input(shape=tuple(), dtype=tf.int32)
distance_membership_i = Input(shape=tuple(), dtype=tf.int32)
distance_membership_j = Input(shape=tuple(), dtype=tf.int32)
dtnn_embedding = layers.DTNNEmbedding(
n_embedding=self.n_embedding)(atom_number)
if self.dropout > 0.0:
dtnn_embedding = Dropout(rate=self.dropout)(dtnn_embedding)
dtnn_layer1 = layers.DTNNStep(n_embedding=self.n_embedding,
n_distance=self.n_distance)([
dtnn_embedding, distance,
distance_membership_i,
distance_membership_j
])
if self.dropout > 0.0:
dtnn_layer1 = Dropout(rate=self.dropout)(dtnn_layer1)
dtnn_layer2 = layers.DTNNStep(n_embedding=self.n_embedding,
n_distance=self.n_distance)([
dtnn_layer1, distance,
distance_membership_i,
distance_membership_j
])
if self.dropout > 0.0:
dtnn_layer2 = Dropout(rate=self.dropout)(dtnn_layer2)
dtnn_gather = layers.DTNNGather(
n_embedding=self.n_embedding,
layer_sizes=[self.n_hidden],
n_outputs=self.n_tasks,
output_activation=self.output_activation)(
[dtnn_layer2, atom_membership])
if self.dropout > 0.0:
dtnn_gather = Dropout(rate=self.dropout)(dtnn_gather)
n_tasks = self.n_tasks
output = Dense(n_tasks)(dtnn_gather)
model = tf.keras.Model(inputs=[
atom_number, distance, atom_membership, distance_membership_i,
distance_membership_j
],
outputs=[output])
super(DTNNModel, self).__init__(model, L2Loss(), **kwargs)
def compute_features_on_batch(self, X_b):
"""Computes the values for different Feature Layers on given batch
A tf.py_func wrapper is written around this when creating the
input_fn for tf.Estimator
"""
distance = []
atom_membership = []
distance_membership_i = []
distance_membership_j = []
num_atoms = list(map(sum, X_b.astype(bool)[:, :, 0]))
atom_number = [
np.round(
np.power(2 * np.diag(X_b[i, :num_atoms[i], :num_atoms[i]]),
1 / 2.4)).astype(int) for i in range(len(num_atoms))
]
start = 0
for im, molecule in enumerate(atom_number):
distance_matrix = np.outer(
molecule, molecule) / X_b[im, :num_atoms[im], :num_atoms[im]]
np.fill_diagonal(distance_matrix, -100)
distance.append(np.expand_dims(distance_matrix.flatten(), 1))
atom_membership.append([im] * num_atoms[im])
membership = np.array([np.arange(num_atoms[im])] * num_atoms[im])
membership_i = membership.flatten(order='F')
membership_j = membership.flatten()
distance_membership_i.append(membership_i + start)
distance_membership_j.append(membership_j + start)
start = start + num_atoms[im]
atom_number = np.concatenate(atom_number).astype(np.int32)
distance = np.concatenate(distance, axis=0)
gaussian_dist = np.exp(-np.square(distance - self.steps) /
(2 * self.step_size**2))
gaussian_dist = gaussian_dist.astype(np.float32)
atom_mem = np.concatenate(atom_membership).astype(np.int32)
dist_mem_i = np.concatenate(distance_membership_i).astype(np.int32)
dist_mem_j = np.concatenate(distance_membership_j).astype(np.int32)
features = [
atom_number, gaussian_dist, atom_mem, dist_mem_i, dist_mem_j
]
return features
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
yield (self.compute_features_on_batch(X_b), [y_b], [w_b])
class DAGModel(KerasModel):
"""Directed Acyclic Graph models for molecular property prediction.
This model is based on the following paper:
Lusci, Alessandro, <NAME>, and <NAME>. "Deep architectures and deep learning in chemoinformatics: the prediction of aqueous solubility for drug-like molecules." Journal of chemical information and modeling 53.7 (2013): 1563-1575.
The basic idea for this paper is that a molecule is usually
viewed as an undirected graph. However, you can convert it to
a series of directed graphs. The idea is that for each atom,
you make a DAG using that atom as the vertex of the DAG and
edges pointing "inwards" to it. This transformation is
implemented in
`dc.trans.transformers.DAGTransformer.UG_to_DAG`.
This model accepts ConvMols as input, just as GraphConvModel
does, but these ConvMol objects must be transformed by
dc.trans.DAGTransformer.
As a note, performance of this model can be a little
sensitive to initialization. It might be worth training a few
different instantiations to get a stable set of parameters.
"""
def __init__(self,
n_tasks,
max_atoms=50,
n_atom_feat=75,
n_graph_feat=30,
n_outputs=30,
layer_sizes=[100],
layer_sizes_gather=[100],
dropout=None,
mode="classification",
n_classes=2,
uncertainty=False,
batch_size=100,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
max_atoms: int, optional
Maximum number of atoms in a molecule, should be defined based on dataset.
n_atom_feat: int, optional
Number of features per atom.
n_graph_feat: int, optional
Number of features for atom in the graph.
n_outputs: int, optional
Number of features for each molecule.
layer_sizes: list of int, optional
List of hidden layer size(s) in the propagation step:
length of this list represents the number of hidden layers,
and each element is the width of corresponding hidden layer.
layer_sizes_gather: list of int, optional
List of hidden layer size(s) in the gather step.
dropout: None or float, optional
Dropout probability, applied after each propagation step and gather step.
mode: str, optional
Either "classification" or "regression" for type of model.
n_classes: int
the number of classes to predict (only used in classification mode)
uncertainty: bool
if True, include extra outputs and loss terms to enable the uncertainty
in outputs to be predicted
"""
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
self.n_tasks = n_tasks
self.max_atoms = max_atoms
self.n_atom_feat = n_atom_feat
self.n_graph_feat = n_graph_feat
self.n_outputs = n_outputs
self.layer_sizes = layer_sizes
self.layer_sizes_gather = layer_sizes_gather
self.dropout = dropout
self.mode = mode
self.n_classes = n_classes
self.uncertainty = uncertainty
if uncertainty:
if mode != "regression":
raise ValueError(
"Uncertainty is only supported in regression mode")
if dropout is None or dropout == 0.0:
raise ValueError(
'Dropout must be included to predict uncertainty')
# Build the model.
atom_features = Input(shape=(self.n_atom_feat,))
parents = Input(shape=(self.max_atoms, self.max_atoms), dtype=tf.int32)
calculation_orders = Input(shape=(self.max_atoms,), dtype=tf.int32)
calculation_masks = Input(shape=(self.max_atoms,), dtype=tf.bool)
membership = Input(shape=tuple(), dtype=tf.int32)
n_atoms = Input(shape=tuple(), dtype=tf.int32)
dag_layer1 = layers.DAGLayer(n_graph_feat=self.n_graph_feat,
n_atom_feat=self.n_atom_feat,
max_atoms=self.max_atoms,
layer_sizes=self.layer_sizes,
dropout=self.dropout,
batch_size=batch_size)([
atom_features, parents,
calculation_orders, calculation_masks,
n_atoms
])
dag_gather = layers.DAGGather(
n_graph_feat=self.n_graph_feat,
n_outputs=self.n_outputs,
max_atoms=self.max_atoms,
layer_sizes=self.layer_sizes_gather,
dropout=self.dropout)([dag_layer1, membership])
n_tasks = self.n_tasks
if self.mode == 'classification':
n_classes = self.n_classes
logits = Reshape(
(n_tasks, n_classes))(Dense(n_tasks * n_classes)(dag_gather))
output = Softmax()(logits)
outputs = [output, logits]
output_types = ['prediction', 'loss']
loss = SoftmaxCrossEntropy()
else:
output = Dense(n_tasks)(dag_gather)
if self.uncertainty:
log_var = Dense(n_tasks)(dag_gather)
var = Activation(tf.exp)(log_var)
outputs = [output, var, output, log_var]
output_types = ['prediction', 'variance', 'loss', 'loss']
def loss(outputs, labels, weights):
output, labels = dc.models.losses._make_tf_shapes_consistent(
outputs[0], labels[0])
output, labels = dc.models.losses._ensure_float(
output, labels)
losses = tf.square(output - labels) / tf.exp(
outputs[1]) + outputs[1]
w = weights[0]
if len(w.shape) < len(losses.shape):
if tf.is_tensor(w):
shape = tuple(w.shape.as_list())
else:
shape = w.shape
shape = tuple(-1 if x is None else x for x in shape)
w = tf.reshape(
w,
shape + (1,) * (len(losses.shape) - len(w.shape)))
return tf.reduce_mean(losses * w) + sum(self.model.losses)
else:
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(
inputs=[
atom_features,
parents,
calculation_orders,
calculation_masks,
membership,
n_atoms, # dropout_switch
],
outputs=outputs)
super(DAGModel, self).__init__(model,
loss,
output_types=output_types,
batch_size=batch_size,
**kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
"""Convert a dataset into the tensors needed for learning"""
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None and self.mode == 'classification':
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
atoms_per_mol = [mol.get_num_atoms() for mol in X_b]
n_atoms = sum(atoms_per_mol)
start_index = [0] + list(np.cumsum(atoms_per_mol)[:-1])
atoms_all = []
# calculation orders for a batch of molecules
parents_all = []
calculation_orders = []
calculation_masks = []
membership = []
for idm, mol in enumerate(X_b):
# padding atom features vector of each molecule with 0
atoms_all.append(mol.get_atom_features())
parents = mol.parents
parents_all.extend(parents)
calculation_index = np.array(parents)[:, :, 0]
mask = np.array(calculation_index - self.max_atoms,
dtype=bool)
calculation_orders.append(calculation_index +
start_index[idm])
calculation_masks.append(mask)
membership.extend([idm] * atoms_per_mol[idm])
if mode == 'predict':
dropout = np.array(0.0)
else:
dropout = np.array(1.0)
yield ([
np.concatenate(atoms_all, axis=0),
np.stack(parents_all, axis=0),
np.concatenate(calculation_orders, axis=0),
np.concatenate(calculation_masks, axis=0),
np.array(membership),
np.array(n_atoms), dropout
], [y_b], [w_b])
class _GraphConvKerasModel(tf.keras.Model):
def __init__(self,
n_tasks,
graph_conv_layers,
dense_layer_size=128,
dropout=0.0,
mode="classification",
number_atom_features=75,
n_classes=2,
batch_normalize=True,
uncertainty=False,
batch_size=100):
"""An internal keras model class.
The graph convolutions use a nonstandard control flow so the
standard Keras functional API can't support them. We instead
use the imperative "subclassing" API to implement the graph
convolutions.
All arguments have the same meaning as in GraphConvModel.
"""
super(_GraphConvKerasModel, self).__init__()
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
self.mode = mode
self.uncertainty = uncertainty
if not isinstance(dropout, SequenceCollection):
dropout = [dropout] * (len(graph_conv_layers) + 1)
if len(dropout) != len(graph_conv_layers) + 1:
raise ValueError('Wrong number of dropout probabilities provided')
if uncertainty:
if mode != "regression":
raise ValueError(
"Uncertainty is only supported in regression mode")
if any(d == 0.0 for d in dropout):
raise ValueError(
'Dropout must be included in every layer to predict uncertainty'
)
self.graph_convs = [
layers.GraphConv(layer_size, activation_fn=tf.nn.relu)
for layer_size in graph_conv_layers
]
self.batch_norms = [
BatchNormalization(fused=False) if batch_normalize else None
for _ in range(len(graph_conv_layers) + 1)
]
self.dropouts = [
Dropout(rate=rate) if rate > 0.0 else None for rate in dropout
]
self.graph_pools = [layers.GraphPool() for _ in graph_conv_layers]
self.dense = Dense(dense_layer_size, activation=tf.nn.relu)
self.graph_gather = layers.GraphGather(batch_size=batch_size,
activation_fn=tf.nn.tanh)
self.trim = TrimGraphOutput()
if self.mode == 'classification':
self.reshape_dense = Dense(n_tasks * n_classes)
self.reshape = Reshape((n_tasks, n_classes))
self.softmax = Softmax()
else:
self.regression_dense = Dense(n_tasks)
if self.uncertainty:
self.uncertainty_dense = Dense(n_tasks)
self.uncertainty_trim = TrimGraphOutput()
self.uncertainty_activation = Activation(tf.exp)
def call(self, inputs, training=False):
atom_features = inputs[0]
degree_slice = tf.cast(inputs[1], dtype=tf.int32)
membership = tf.cast(inputs[2], dtype=tf.int32)
n_samples = tf.cast(inputs[3], dtype=tf.int32)
deg_adjs = [tf.cast(deg_adj, dtype=tf.int32) for deg_adj in inputs[4:]]
in_layer = atom_features
for i in range(len(self.graph_convs)):
gc_in = [in_layer, degree_slice, membership] + deg_adjs
gc1 = self.graph_convs[i](gc_in)
if self.batch_norms[i] is not None:
gc1 = self.batch_norms[i](gc1, training=training)
if training and self.dropouts[i] is not None:
gc1 = self.dropouts[i](gc1, training=training)
gp_in = [gc1, degree_slice, membership] + deg_adjs
in_layer = self.graph_pools[i](gp_in)
dense = self.dense(in_layer)
if self.batch_norms[-1] is not None:
dense = self.batch_norms[-1](dense, training=training)
if training and self.dropouts[-1] is not None:
dense = self.dropouts[-1](dense, training=training)
neural_fingerprint = self.graph_gather(
[dense, degree_slice, membership] + deg_adjs)
if self.mode == 'classification':
logits = self.reshape(self.reshape_dense(neural_fingerprint))
logits = self.trim([logits, n_samples])
output = self.softmax(logits)
outputs = [output, logits, neural_fingerprint]
else:
output = self.regression_dense(neural_fingerprint)
output = self.trim([output, n_samples])
if self.uncertainty:
log_var = self.uncertainty_dense(neural_fingerprint)
log_var = self.uncertainty_trim([log_var, n_samples])
var = self.uncertainty_activation(log_var)
outputs = [output, var, output, log_var, neural_fingerprint]
else:
outputs = [output, neural_fingerprint]
return outputs
class GraphConvModel(KerasModel):
"""Graph Convolutional Models.
This class implements the graph convolutional model from the
following paper [1]_. These graph convolutions start with a per-atom set of
descriptors for each atom in a molecule, then combine and recombine these
descriptors over convolutional layers.
following [1]_.
References
----------
.. [1] Duvenaud, <NAME>., et al. "Convolutional networks on graphs for
learning molecular fingerprints." Advances in neural information processing
systems. 2015.
"""
def __init__(self,
n_tasks: int,
graph_conv_layers: List[int] = [64, 64],
dense_layer_size: int = 128,
dropout: float = 0.0,
mode: str = "classification",
number_atom_features: int = 75,
n_classes: int = 2,
batch_size: int = 100,
batch_normalize: bool = True,
uncertainty: bool = False,
**kwargs):
"""The wrapper class for graph convolutions.
Note that since the underlying _GraphConvKerasModel class is
specified using imperative subclassing style, this model
cannout make predictions for arbitrary outputs.
Parameters
----------
n_tasks: int
Number of tasks
graph_conv_layers: list of int
Width of channels for the Graph Convolution Layers
dense_layer_size: int
Width of channels for Atom Level Dense Layer after GraphPool
dropout: list or float
the dropout probablity to use for each layer. The length of this list
should equal len(graph_conv_layers)+1 (one value for each convolution
layer, and one for the dense layer). Alternatively this may be a single
value instead of a list, in which case the same value is used for every
layer.
mode: str
Either "classification" or "regression"
number_atom_features: int
75 is the default number of atom features created, but
this can vary if various options are passed to the
function atom_features in graph_features
n_classes: int
the number of classes to predict (only used in classification mode)
batch_normalize: True
if True, apply batch normalization to model
uncertainty: bool
if True, include extra outputs and loss terms to enable the uncertainty
in outputs to be predicted
"""
self.mode = mode
self.n_tasks = n_tasks
self.n_classes = n_classes
self.batch_size = batch_size
self.uncertainty = uncertainty
model = _GraphConvKerasModel(n_tasks,
graph_conv_layers=graph_conv_layers,
dense_layer_size=dense_layer_size,
dropout=dropout,
mode=mode,
number_atom_features=number_atom_features,
n_classes=n_classes,
batch_normalize=batch_normalize,
uncertainty=uncertainty,
batch_size=batch_size)
if mode == "classification":
output_types = ['prediction', 'loss', 'embedding']
loss: Union[Loss, LossFn] = SoftmaxCrossEntropy()
else:
if self.uncertainty:
output_types = [
'prediction', 'variance', 'loss', 'loss', 'embedding'
]
def loss(outputs, labels, weights):
output, labels = dc.models.losses._make_tf_shapes_consistent(
outputs[0], labels[0])
output, labels = dc.models.losses._ensure_float(
output, labels)
losses = tf.square(output - labels) / tf.exp(
outputs[1]) + outputs[1]
w = weights[0]
if len(w.shape) < len(losses.shape):
if tf.is_tensor(w):
shape = tuple(w.shape.as_list())
else:
shape = w.shape
shape = tuple(-1 if x is None else x for x in shape)
w = tf.reshape(
w,
shape + (1,) * (len(losses.shape) - len(w.shape)))
return tf.reduce_mean(losses * w) + sum(self.model.losses)
else:
output_types = ['prediction', 'embedding']
loss = L2Loss()
super(GraphConvModel, self).__init__(model,
loss,
output_types=output_types,
batch_size=batch_size,
**kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None and self.mode == 'classification' and not (
mode == 'predict'):
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
multiConvMol = ConvMol.agglomerate_mols(X_b)
n_samples = np.array(X_b.shape[0])
inputs = [
multiConvMol.get_atom_features(), multiConvMol.deg_slice,
np.array(multiConvMol.membership), n_samples
]
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
inputs.append(multiConvMol.get_deg_adjacency_lists()[i])
yield (inputs, [y_b], [w_b])
class MPNNModel(KerasModel):
""" Message Passing Neural Network,
Message Passing Neural Networks [1]_ treat graph convolutional
operations as an instantiation of a more general message
passing schem. Recall that message passing in a graph is when
nodes in a graph send each other "messages" and update their
internal state as a consequence of these messages.
Ordering structures in this model are built according to [2]_
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"Neural Message Passing for Quantum Chemistry." ICML 2017.
.. [2] Vinyals, Oriol, <NAME>, and <NAME>. "Order matters:
Sequence to sequence for sets." arXiv preprint arXiv:1511.06391 (2015).
"""
def __init__(self,
n_tasks,
n_atom_feat=70,
n_pair_feat=8,
n_hidden=100,
T=5,
M=10,
mode="regression",
dropout=0.0,
n_classes=2,
uncertainty=False,
batch_size=100,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
n_atom_feat: int, optional
Number of features per atom.
n_pair_feat: int, optional
Number of features per pair of atoms.
n_hidden: int, optional
Number of units(convolution depths) in corresponding hidden layer
n_graph_feat: int, optional
Number of output features for each molecule(graph)
dropout: float
the dropout probablity to use.
n_classes: int
the number of classes to predict (only used in classification mode)
uncertainty: bool
if True, include extra outputs and loss terms to enable the uncertainty
in outputs to be predicted
"""
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
self.n_tasks = n_tasks
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
self.n_hidden = n_hidden
self.T = T
self.M = M
self.mode = mode
self.n_classes = n_classes
self.uncertainty = uncertainty
if uncertainty:
if mode != "regression":
raise ValueError(
"Uncertainty is only supported in regression mode")
if dropout == 0.0:
raise ValueError(
'Dropout must be included to predict uncertainty')
# Build the model.
atom_features = Input(shape=(self.n_atom_feat,))
pair_features = Input(shape=(self.n_pair_feat,))
atom_split = Input(shape=tuple(), dtype=tf.int32)
atom_to_pair = Input(shape=(2,), dtype=tf.int32)
n_samples = Input(shape=tuple(), dtype=tf.int32)
message_passing = layers.MessagePassing(self.T,
message_fn='enn',
update_fn='gru',
n_hidden=self.n_hidden)([
atom_features,
pair_features, atom_to_pair
])
atom_embeddings = Dense(self.n_hidden)(message_passing)
mol_embeddings = layers.SetGather(self.M,
batch_size,
n_hidden=self.n_hidden)(
[atom_embeddings, atom_split])
dense1 = Dense(2 * self.n_hidden, activation=tf.nn.relu)(mol_embeddings)
n_tasks = self.n_tasks
if self.mode == 'classification':
n_classes = self.n_classes
logits = Reshape(
(n_tasks, n_classes))(Dense(n_tasks * n_classes)(dense1))
logits = TrimGraphOutput()([logits, n_samples])
output = Softmax()(logits)
outputs = [output, logits]
output_types = ['prediction', 'loss']
loss = SoftmaxCrossEntropy()
else:
output = Dense(n_tasks)(dense1)
output = TrimGraphOutput()([output, n_samples])
if self.uncertainty:
log_var = Dense(n_tasks)(dense1)
log_var = TrimGraphOutput()([log_var, n_samples])
var = Activation(tf.exp)(log_var)
outputs = [output, var, output, log_var]
output_types = ['prediction', 'variance', 'loss', 'loss']
def loss(outputs, labels, weights):
output, labels = dc.models.losses._make_tf_shapes_consistent(
outputs[0], labels[0])
output, labels = dc.models.losses._ensure_float(
output, labels)
losses = tf.square(output - labels) / tf.exp(
outputs[1]) + outputs[1]
w = weights[0]
if len(w.shape) < len(losses.shape):
if tf.is_tensor(w):
shape = tuple(w.shape.as_list())
else:
shape = w.shape
shape = tuple(-1 if x is None else x for x in shape)
w = tf.reshape(
w,
shape + (1,) * (len(losses.shape) - len(w.shape)))
return tf.reduce_mean(losses * w) + sum(self.model.losses)
else:
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(inputs=[
atom_features, pair_features, atom_split, atom_to_pair, n_samples
],
outputs=outputs)
super(MPNNModel, self).__init__(model,
loss,
output_types=output_types,
batch_size=batch_size,
**kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
n_samples = np.array(X_b.shape[0])
X_b = pad_features(self.batch_size, X_b)
if y_b is not None and self.mode == 'classification':
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
atom_feat = []
pair_feat = []
atom_split = []
atom_to_pair = []
pair_split = []
start = 0
for im, mol in enumerate(X_b):
n_atoms = mol.get_num_atoms()
# number of atoms in each molecule
atom_split.extend([im] * n_atoms)
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(
np.transpose(
np.array(
[C1.flatten() + start,
C0.flatten() + start])))
# number of pairs for each atom
pair_split.extend(C1.flatten() + start)
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(),
(n_atoms * n_atoms, self.n_pair_feat)))
inputs = [
np.concatenate(atom_feat, axis=0),
np.concatenate(pair_feat, axis=0),
np.array(atom_split),
np.concatenate(atom_to_pair, axis=0), n_samples
]
yield (inputs, [y_b], [w_b])
#################### Deprecation warnings for renamed TensorGraph models #################### # noqa: E266
import warnings # noqa: E402
TENSORGRAPH_DEPRECATION = "{} is deprecated and has been renamed to {} and will be removed in DeepChem 3.0."
class GraphConvTensorGraph(GraphConvModel):
def __init__(self, *args, **kwargs):
warnings.warn(
TENSORGRAPH_DEPRECATION.format("GraphConvTensorGraph",
"GraphConvModel"), FutureWarning)
super(GraphConvTensorGraph, self).__init__(*args, **kwargs)
class WeaveTensorGraph(WeaveModel):
def __init__(self, *args, **kwargs):
warnings.warn(
TENSORGRAPH_DEPRECATION.format("WeaveTensorGraph", "WeaveModel"),
FutureWarning)
super(WeaveModel, self).__init__(*args, **kwargs)
class DTNNTensorGraph(DTNNModel):
def __init__(self, *args, **kwargs):
warnings.warn(
TENSORGRAPH_DEPRECATION.format("DTNNTensorGraph", "DTNNModel"),
FutureWarning)
super(DTNNModel, self).__init__(*args, **kwargs)
class DAGTensorGraph(DAGModel):
def __init__(self, *args, **kwargs):
warnings.warn(
TENSORGRAPH_DEPRECATION.format("DAGTensorGraph", "DAGModel"),
FutureWarning)
super(DAGModel, self).__init__(*args, **kwargs)
class MPNNTensorGraph(MPNNModel):
def __init__(self, *args, **kwargs):
warnings.warn(
TENSORGRAPH_DEPRECATION.format("MPNNTensorGraph", "MPNNModel"),
FutureWarning)
super(MPNNModel, self).__init__(*args, **kwargs)
<file_sep>import os
import pytest
import deepchem as dc
import numpy as np
import math
import unittest
try:
import torch
import torch.nn.functional as F
has_pytorch = True
except:
has_pytorch = False
try:
import wandb # noqa: F401
has_wandb = True
except:
has_wandb = False
@pytest.mark.torch
def test_overfit_subclass_model():
"""Test fitting a TorchModel defined by subclassing Module."""
n_data_points = 10
n_features = 2
np.random.seed(1234)
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
class ExampleModel(torch.nn.Module):
def __init__(self, layer_sizes):
super(ExampleModel, self).__init__()
self.layers = torch.nn.ModuleList()
in_size = n_features
for out_size in layer_sizes:
self.layers.append(torch.nn.Linear(in_size, out_size))
in_size = out_size
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
if i < len(self.layers) - 1:
x = F.relu(x)
return torch.sigmoid(x), x
pytorch_model = ExampleModel([10, 1])
model = dc.models.TorchModel(pytorch_model,
dc.models.losses.SigmoidCrossEntropy(),
output_types=['prediction', 'loss'],
learning_rate=0.005)
model.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(model.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
scores = model.evaluate(dataset, [metric])
assert scores[metric.name] > 0.9
@pytest.mark.torch
def test_overfit_sequential_model():
"""Test fitting a TorchModel defined as a sequential model."""
n_data_points = 10
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
pytorch_model = torch.nn.Sequential(torch.nn.Linear(2, 10), torch.nn.ReLU(),
torch.nn.Linear(10, 1),
torch.nn.Sigmoid())
model = dc.models.TorchModel(pytorch_model,
dc.models.losses.BinaryCrossEntropy(),
learning_rate=0.005)
model.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(model.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
generator = model.default_generator(dataset, pad_batches=False)
scores = model.evaluate_generator(generator, [metric])
assert scores[metric.name] > 0.9
@pytest.mark.torch
def test_fit_use_all_losses():
"""Test fitting a TorchModel and getting a loss curve back."""
n_data_points = 10
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
pytorch_model = torch.nn.Sequential(torch.nn.Linear(2, 10), torch.nn.ReLU(),
torch.nn.Linear(10, 1),
torch.nn.Sigmoid())
model = dc.models.TorchModel(pytorch_model,
dc.models.losses.BinaryCrossEntropy(),
learning_rate=0.005,
log_frequency=10)
losses = []
model.fit(dataset, nb_epoch=1000, all_losses=losses)
# Each epoch is a single step for this model
assert len(losses) == 100
assert np.count_nonzero(np.array(losses)) == 100
@pytest.mark.torch
def test_fit_on_batch():
"""Test fitting a TorchModel to individual batches."""
n_data_points = 10
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
pytorch_model = torch.nn.Sequential(torch.nn.Linear(2, 10), torch.nn.ReLU(),
torch.nn.Linear(10, 1),
torch.nn.Sigmoid())
model = dc.models.TorchModel(pytorch_model,
dc.models.losses.BinaryCrossEntropy(),
learning_rate=0.005)
i = 0
for X, y, w, ids in dataset.iterbatches(model.batch_size, 500):
i += 1
model.fit_on_batch(X, y, w, checkpoint=False)
prediction = np.squeeze(model.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
generator = model.default_generator(dataset, pad_batches=False)
scores = model.evaluate_generator(generator, [metric])
assert scores[metric.name] > 0.9
@pytest.mark.torch
def test_checkpointing():
"""Test loading and saving checkpoints with TorchModel."""
# Create two models using the same model directory.
pytorch_model1 = torch.nn.Sequential(torch.nn.Linear(5, 10))
pytorch_model2 = torch.nn.Sequential(torch.nn.Linear(5, 10))
model1 = dc.models.TorchModel(pytorch_model1, dc.models.losses.L2Loss())
model2 = dc.models.TorchModel(pytorch_model2,
dc.models.losses.L2Loss(),
model_dir=model1.model_dir)
# Check that they produce different results.
X = np.random.rand(5, 5)
y1 = model1.predict_on_batch(X)
y2 = model2.predict_on_batch(X)
assert not np.array_equal(y1, y2)
# Save a checkpoint from the first model and load it into the second one,
# and make sure they now match.
model1.save_checkpoint()
model2.restore()
y3 = model1.predict_on_batch(X)
y4 = model2.predict_on_batch(X)
assert np.array_equal(y1, y3)
assert np.array_equal(y1, y4)
@pytest.mark.torch
def test_fit_restore():
"""Test specifying restore=True when calling fit()."""
n_data_points = 10
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
# Train a model to overfit the dataset.
pytorch_model = torch.nn.Sequential(torch.nn.Linear(2, 10), torch.nn.ReLU(),
torch.nn.Linear(10, 1),
torch.nn.Sigmoid())
model = dc.models.TorchModel(pytorch_model,
dc.models.losses.BinaryCrossEntropy(),
learning_rate=0.005)
model.fit(dataset, nb_epoch=1000)
prediction = np.squeeze(model.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
# Create an identical model, do a single step of fitting with restore=True,
# and make sure it got restored correctly.
pytorch_model2 = torch.nn.Sequential(torch.nn.Linear(2,
10), torch.nn.ReLU(),
torch.nn.Linear(10, 1),
torch.nn.Sigmoid())
model2 = dc.models.TorchModel(pytorch_model2,
dc.models.losses.BinaryCrossEntropy(),
model_dir=model.model_dir)
model2.fit(dataset, nb_epoch=1, restore=True)
prediction = np.squeeze(model2.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
@pytest.mark.torch
def test_uncertainty():
"""Test estimating uncertainty a TorchModel."""
n_samples = 30
n_features = 1
noise = 0.1
X = np.random.rand(n_samples, n_features)
y = (10 * X + np.random.normal(scale=noise, size=(n_samples, n_features)))
dataset = dc.data.NumpyDataset(X, y)
# Build a model that predicts uncertainty.
class PyTorchUncertainty(torch.nn.Module):
def __init__(self):
super(PyTorchUncertainty, self).__init__()
self.hidden = torch.nn.Linear(n_features, 200)
self.output = torch.nn.Linear(200, n_features)
self.log_var = torch.nn.Linear(200, n_features)
def forward(self, inputs):
x, use_dropout = inputs
x = self.hidden(x)
if use_dropout:
x = F.dropout(x, 0.1)
output = self.output(x)
log_var = self.log_var(x)
var = torch.exp(log_var)
return (output, var, output, log_var)
def loss(outputs, labels, weights):
diff = labels[0] - outputs[0]
log_var = outputs[1]
var = torch.exp(log_var)
return torch.mean(diff * diff / var + log_var)
class UncertaintyModel(dc.models.TorchModel):
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if mode == 'predict':
dropout = np.array(False)
else:
dropout = np.array(True)
yield ([X_b, dropout], [y_b], [w_b])
pytorch_model = PyTorchUncertainty()
model = UncertaintyModel(
pytorch_model,
loss,
output_types=['prediction', 'variance', 'loss', 'loss'],
learning_rate=0.003)
# Fit the model and see if its predictions are correct.
model.fit(dataset, nb_epoch=2500)
pred, std = model.predict_uncertainty(dataset)
assert np.mean(np.abs(y - pred)) < 1.0
assert noise < np.mean(std) < 1.0
@pytest.mark.torch
def test_saliency_mapping():
"""Test computing a saliency map."""
n_tasks = 3
n_features = 5
pytorch_model = torch.nn.Sequential(torch.nn.Linear(n_features, 20),
torch.nn.Tanh(),
torch.nn.Linear(20, n_tasks))
model = dc.models.TorchModel(pytorch_model, dc.models.losses.L2Loss())
x = np.random.random(n_features)
s = model.compute_saliency(x)
assert s.shape[0] == n_tasks
assert s.shape[1] == n_features
# Take a tiny step in the direction of s and see if the output changes by
# the expected amount.
delta = 0.01
for task in range(n_tasks):
norm = np.sqrt(np.sum(s[task]**2))
step = 0.5 * delta / norm
pred1 = model.predict_on_batch((x + s[task] * step).reshape(
(1, n_features))).flatten()
pred2 = model.predict_on_batch((x - s[task] * step).reshape(
(1, n_features))).flatten()
assert np.allclose(pred1[task], (pred2 + norm * delta)[task], atol=1e-6)
@pytest.mark.torch
def test_saliency_shapes():
"""Test computing saliency maps for multiple outputs with multiple dimensions."""
class SaliencyModel(torch.nn.Module):
def __init__(self):
super(SaliencyModel, self).__init__()
self.layer1 = torch.nn.Linear(6, 4)
self.layer2 = torch.nn.Linear(6, 5)
def forward(self, x):
x = torch.flatten(x)
output1 = self.layer1(x).reshape(1, 4, 1)
output2 = self.layer2(x).reshape(1, 1, 5)
return output1, output2
pytorch_model = SaliencyModel()
model = dc.models.TorchModel(pytorch_model, dc.models.losses.L2Loss())
x = np.random.random((2, 3))
s = model.compute_saliency(x)
assert len(s) == 2
assert s[0].shape == (4, 1, 2, 3)
assert s[1].shape == (1, 5, 2, 3)
@pytest.mark.torch
def test_tensorboard():
"""Test logging to Tensorboard."""
n_data_points = 20
n_features = 2
X = np.random.rand(n_data_points, n_features)
y = [[0.0, 1.0] for x in range(n_data_points)]
dataset = dc.data.NumpyDataset(X, y)
pytorch_model = torch.nn.Sequential(torch.nn.Linear(n_features, 2),
torch.nn.Softmax(dim=1))
model = dc.models.TorchModel(pytorch_model,
dc.models.losses.CategoricalCrossEntropy(),
tensorboard=True,
log_frequency=1)
model.fit(dataset, nb_epoch=10)
files_in_dir = os.listdir(model.model_dir)
event_file = list(filter(lambda x: x.startswith("events"), files_in_dir))
assert len(event_file) > 0
event_file = os.path.join(model.model_dir, event_file[0])
file_size = os.stat(event_file).st_size
assert file_size > 0
@pytest.mark.torch
@unittest.skipIf((not has_pytorch) or (not has_wandb),
'PyTorch and/or Wandb is not installed')
def test_wandblogger():
"""Test logging to Weights & Biases."""
# Load dataset and Models
tasks, datasets, transformers = dc.molnet.load_delaney(featurizer='ECFP',
splitter='random')
train_dataset, valid_dataset, test_dataset = datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
wandblogger = dc.models.WandbLogger(anonymous="allow",
save_run_history=True)
pytorch_model = torch.nn.Sequential(torch.nn.Linear(1024, 1000),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(1000, 1))
model = dc.models.TorchModel(pytorch_model,
dc.models.losses.L2Loss(),
wandb_logger=wandblogger)
vc_train = dc.models.ValidationCallback(train_dataset, 1, [metric])
vc_valid = dc.models.ValidationCallback(valid_dataset, 1, [metric])
model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid])
# call model.fit again to test multiple fit() calls
model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid])
wandblogger.finish()
run_data = wandblogger.run_history
valid_score = model.evaluate(valid_dataset, [metric], transformers)
assert math.isclose(valid_score["pearson_r2_score"],
run_data['eval/pearson_r2_score_(1)'],
abs_tol=0.0005)
@pytest.mark.torch
def test_fit_variables():
"""Test training a subset of the variables in a model."""
class VarModel(torch.nn.Module):
def __init__(self, **kwargs):
super(VarModel, self).__init__(**kwargs)
self.var1 = torch.nn.Parameter(torch.Tensor([0.5]))
self.var2 = torch.nn.Parameter(torch.Tensor([0.5]))
def forward(self, inputs):
return [self.var1, self.var2]
def loss(outputs, labels, weights):
return (outputs[0] * outputs[1] - labels[0])**2
pytorch_model = VarModel()
model = dc.models.TorchModel(pytorch_model, loss, learning_rate=0.02)
x = np.ones((1, 1))
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 0.5)
assert np.allclose(vars[1], 0.5)
model.fit_generator([(x, x, x)] * 300)
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 1.0)
assert np.allclose(vars[1], 1.0)
model.fit_generator([(x, 2 * x, x)] * 300, variables=[pytorch_model.var1])
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 2.0)
assert np.allclose(vars[1], 1.0)
model.fit_generator([(x, x, x)] * 300, variables=[pytorch_model.var2])
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 2.0)
assert np.allclose(vars[1], 0.5)
@pytest.mark.torch
def test_fit_loss():
"""Test specifying a different loss function when calling fit()."""
class VarModel(torch.nn.Module):
def __init__(self):
super(VarModel, self).__init__()
self.var1 = torch.nn.Parameter(torch.Tensor([0.5]))
self.var2 = torch.nn.Parameter(torch.Tensor([0.5]))
def forward(self, inputs):
return [self.var1, self.var2]
def loss1(outputs, labels, weights):
return (outputs[0] * outputs[1] - labels[0])**2
def loss2(outputs, labels, weights):
return (outputs[0] + outputs[1] - labels[0])**2
pytorch_model = VarModel()
model = dc.models.TorchModel(pytorch_model, loss1, learning_rate=0.01)
x = np.ones((1, 1))
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 0.5)
assert np.allclose(vars[1], 0.5)
model.fit_generator([(x, x, x)] * 300)
vars = model.predict_on_batch(x)
assert np.allclose(vars[0], 1.0)
assert np.allclose(vars[1], 1.0)
model.fit_generator([(x, 3 * x, x)] * 300, loss=loss2)
vars = model.predict_on_batch(x)
assert np.allclose(vars[0] + vars[1], 3.0)
<file_sep>"""
NCI dataset loader.
Original Author - <NAME>
Author - <NAME>
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import numpy as np
import shutil
import deepchem as dc
def load_nci(featurizer='ECFP', shard_size=1000, split='random'):
current_dir = os.path.dirname(os.path.realpath(__file__))
# Load nci dataset
print("About to load NCI dataset.")
dataset_path = os.path.join(
current_dir, "../../datasets/nci_unique.csv")
# Featurize nci dataset
print("About to featurize nci dataset.")
if featurizer == 'ECFP':
featurizer = dc.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = dc.feat.ConvMolFeaturizer()
all_nci_tasks = (['CCRF-CEM', 'HL-60(TB)', 'K-562', 'MOLT-4', 'RPMI-8226',
'SR', 'A549/ATCC', 'EKVX', 'HOP-62', 'HOP-92', 'NCI-H226',
'NCI-H23', 'NCI-H322M', 'NCI-H460', 'NCI-H522', 'COLO 205',
'HCC-2998', 'HCT-116', 'HCT-15', 'HT29', 'KM12', 'SW-620',
'SF-268', 'SF-295', 'SF-539', 'SNB-19', 'SNB-75', 'U251',
'LOX IMVI', 'MALME-3M', 'M14', 'MDA-MB-435', 'SK-MEL-2',
'SK-MEL-28', 'SK-MEL-5', 'UACC-257', 'UACC-62', 'IGR-OV1',
'OVCAR-3', 'OVCAR-4', 'OVCAR-5', 'OVCAR-8', 'NCI/ADR-RES',
'SK-OV-3', '786-0', 'A498', 'ACHN', 'CAKI-1', 'RXF 393',
'SN12C', 'TK-10', 'UO-31', 'PC-3', 'DU-145', 'MCF7',
'MDA-MB-231/ATCC', 'MDA-MB-468', 'HS 578T', 'BT-549',
'T-47D'])
loader = dc.data.CSVLoader(
tasks=all_nci_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_path, shard_size=shard_size)
# Initialize transformers
print("About to transform data")
transformers = [
dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset)]
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter()}
splitter = splitters[split]
print("Performing new split.")
train, valid, test = splitter.train_valid_test_split(dataset)
return all_nci_tasks, (train, valid, test), transformers
<file_sep>import unittest
import os
import platform
from deepchem.utils import sequence_utils as seq_utils
IS_WINDOWS = platform.system() == 'Windows'
@unittest.skipIf(IS_WINDOWS,
"Skip test on Windows") # hhsuite does not run on windows
class TestSeq(unittest.TestCase):
"""
Tests sequence handling utilities.
"""
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
self.dataset_file = os.path.join(current_dir, 'assets/example.fasta')
self.database_name = 'example_db'
self.data_dir = os.path.join(current_dir, 'assets')
def test_hhsearch(self):
seq_utils.hhsearch(self.dataset_file,
database=self.database_name,
data_dir=self.data_dir)
results_file = os.path.join(self.data_dir, 'results.a3m')
hhr_file = os.path.join(self.data_dir, 'example.hhr')
with open(results_file, 'r') as f:
resultsline = next(f)
with open(hhr_file, 'r') as g:
hhrline = next(g)
assert hhrline[0:5] == 'Query'
assert resultsline[0:5] == '>seq0'
os.remove(results_file)
os.remove(hhr_file)
def test_hhblits(self):
seq_utils.hhsearch(self.dataset_file,
database=self.database_name,
data_dir=self.data_dir)
results_file = os.path.join(self.data_dir, 'results.a3m')
hhr_file = os.path.join(self.data_dir, 'example.hhr')
with open(results_file, 'r') as f:
resultsline = next(f)
with open(hhr_file, 'r') as g:
hhrline = next(g)
assert hhrline[0:5] == 'Query'
assert resultsline[0:5] == '>seq0'
os.remove(results_file)
os.remove(hhr_file)
def test_MSA_to_dataset(self):
seq_utils.hhsearch(self.dataset_file,
database=self.database_name,
data_dir=self.data_dir)
results_file = os.path.join(self.data_dir, 'results.a3m')
msa_path = results_file
dataset = seq_utils.MSA_to_dataset(msa_path)
print(dataset.ids[0])
print(dataset.X)
assert dataset.ids[0] == 'seq0'
assert dataset.ids[1] == 'seq1'
bool_arr = dataset.X[0] == ['X', 'Y']
assert bool_arr.all()
os.remove(results_file)
<file_sep>"""
PDBBind dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import pandas as pd
from atomicnet_coordinates import ComplexNeighborListFragmentAtomicCoordinates
def load_pdbbind_labels(labels_file):
"""Loads pdbbind labels as dataframe
Parameters
----------
labels_file: str
Location of PDBbind datafile.
Returns
-------
contents_df: pd.DataFrame
Dataframe containing contents of PDBbind datafile.
"""
contents = []
with open(labels_file) as f:
for line in f:
if line.startswith("#"):
continue
else:
splitline = line.split()
if len(splitline) == 8:
contents.append(splitline)
else:
print("Incorrect data format")
print(splitline)
contents_df = pd.DataFrame(
contents,
columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki",
"ignore-this-field", "reference", "ligand name"))
return contents_df
def compute_pdbbind_coordinate_features(complex_featurizer, pdb_subdir,
pdb_code):
"""Compute features for a given complex
Parameters
----------
complex_featurizer: dc.feat.ComplexFeaturizer
Complex featurizer.
pdb_subdir: str
Location of complex PDB files.
pdb_core: str
Complex PDB code.
Returns
-------
feature: Tuple
Complex features.
"""
protein_file = os.path.join(pdb_subdir, "%s_pocket.pdb" % pdb_code)
ligand_file = os.path.join(pdb_subdir, "%s_ligand.pdb" % pdb_code)
feature = complex_featurizer._featurize_complex(
str(ligand_file), str(protein_file))
return feature
def load_pdbbind_fragment_coordinates(frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
neighbor_cutoff,
pdbbind_dir,
base_dir,
datafile="INDEX_core_data.2013"):
"""Featurize PDBBind dataset.
Parameters
----------
frag1_num_atoms: int
Maximum number of atoms in fragment 1.
frag2_num_atoms: int
Maximum number of atoms in fragment 2.
complex_num_atoms: int
Maximum number of atoms in complex.
max_num_neighbors: int
Maximum number of neighbors per atom.
neighbor_cutoff: float
Interaction cutoff [Angstrom].
pdbbind_dir: str
Location of PDBbind datafile.
base_dir: str
Location for storing featurized dataset.
datafile: str
Name of PDBbind datafile, optional (Default "INDEX_core_data.2013").
Returns
-------
tasks: list
PDBbind tasks.
dataset: dc.data.DiskDataset
PDBbind featurized dataset.
transformers: list
dc.trans.Transformer objects.
"""
# Create some directories for analysis
# The base_dir holds the results of all analysis
if not reload:
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
current_dir = os.path.dirname(os.path.realpath(__file__))
#Make directories to store the raw and featurized datasets.
data_dir = os.path.join(base_dir, "dataset")
# Load PDBBind dataset
labels_file = os.path.join(pdbbind_dir, datafile)
tasks = ["-logKd/Ki"]
print("About to load contents.")
contents_df = load_pdbbind_labels(labels_file)
ids = contents_df["PDB code"].values
y = np.array([float(val) for val in contents_df["-logKd/Ki"].values])
# Define featurizers
featurizer = ComplexNeighborListFragmentAtomicCoordinates(
frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors,
neighbor_cutoff)
w = np.ones_like(y)
#Currently featurizes with shard_size=1
#Dataset can be reshard: dataset = dataset.reshard(48) for example
def shard_generator():
for ind, pdb_code in enumerate(ids):
print("Processing %s" % str(pdb_code))
pdb_subdir = os.path.join(pdbbind_dir, pdb_code)
computed_feature = compute_pdbbind_coordinate_features(
featurizer, pdb_subdir, pdb_code)
if computed_feature[0] is None:
print("Bad featurization")
continue
else:
X_b = np.reshape(np.array(computed_feature), (1, 9))
y_b = y[ind]
w_b = w[ind]
y_b = np.reshape(y_b, (1, -1))
w_b = np.reshape(w_b, (1, -1))
yield (X_b, y_b, w_b, pdb_code)
dataset = dc.data.DiskDataset.create_dataset(
shard_generator(), data_dir=data_dir, tasks=tasks)
transformers = []
return tasks, dataset, transformers
<file_sep>MoleculeNet Cheatsheet
----------------------
When training a model or performing a benchmark, the user needs specific datasets.
However, at the beginning, this search can be exhaustive and confusing. The
following cheatsheet is aimed at helping DeepChem users identify more easily which
dataset to use depending on their purposes.
Each row reprents a dataset where a brief description is given. Also, the columns
represents the type of the data; depending on molecule properties, images or
materials and how many data points they have. Each dataset is referenced with a
link of the paper. Finally, there are some entries that need further information.
**Cheatsheet**
.. csv-table:: MoleculeNet description
:file: ./moleculenet_datasets_description.csv
:width: 100%
:header-rows: 1
<file_sep># Factors Examples
The Factors dataset is an in-house dataset from Merck that was first introduced in the following paper:
<NAME>, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
It contains 1500 Merck in-house compounds that were measured
for IC50 of inhibition on 12 serine proteases. Unlike most of
the other datasets featured in MoleculeNet, the Factors
collection does not have structures for the compounds tested
since they were proprietary Merck compounds. However, the
collection does feature pre-computed descriptors for these
compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
In this example, we train various models on the Factors dataset.
<file_sep>import numpy as np
from typing import Optional, Callable
class ElectronSampler:
"""
This class enables to initialize electron's position using gauss distribution around a nucleus and update using Markov Chain Monte-Carlo(MCMC) moves.
Using the probability obtained from the square of magnitude of wavefunction of a molecule/atom, MCMC steps can be performed to get the electron's positions and further update the wavefunction.
This method is primarily used in methods like Variational Monte Carlo to sample electrons around the nucleons.
Sampling can be done in 2 ways:
-Simultaneous: All the electrons' positions are updated all at once.
-Single-electron: MCMC steps are performed only a particular electron, given their index value.
Further these moves can be done in 2 methods:
-Symmetric: In this configuration, the standard deviation for all the steps are uniform.
-Asymmetric: In this configuration, the standard deviation are not uniform and typically the standard deviation is obtained a function like harmonic distances, etc.
Irrespective of these methods, the initialization is done uniformly around the respective nucleus and the number of electrons specified.
Example
-------
>>> from deepchem.utils.electron_sampler import ElectronSampler
>>> test_f = lambda x: 2*np.log(np.random.uniform(low=0,high=1.0,size=np.shape(x)[0]))
>>> distribution=ElectronSampler(central_value=np.array([[1,1,3],[3,2,3]]),f=test_f,seed=0,batch_no=2,steps=1000,)
>>> distribution.gauss_initialize_position(np.array([[1],[2]]))
>> print(distribution.x)
[[[[1.03528105 1.00800314 3.01957476]]
[[3.01900177 1.99697286 2.99793562]]
[[3.00821197 2.00288087 3.02908547]]]
[[[1.04481786 1.03735116 2.98045444]]
[[3.01522075 2.0024335 3.00887726]]
[[3.00667349 2.02988158 2.99589683]]]]
>>> distribution.move()
0.5115
>> print(distribution.x)
[[[[-0.32441754 1.23330263 2.67927645]]
[[ 3.42250997 2.23617126 3.55806632]]
[[ 3.37491385 1.54374006 3.13575241]]]
[[[ 0.49067726 1.03987841 3.70277884]]
[[ 3.5631939 1.68703947 2.5685874 ]]
[[ 2.84560249 1.73998364 3.41274181]]]]
"""
def __init__(self,
central_value: np.ndarray,
f: Callable[[np.ndarray], np.ndarray],
batch_no: int = 10,
x: np.ndarray = np.array([]),
steps: int = 200,
steps_per_update: int = 10,
seed: Optional[int] = None,
symmetric: bool = True,
simultaneous: bool = True):
"""
Parameters
----------
central_value: np.ndarray
Contains each nucleus' coordinates in a 2D array. The shape of the array should be(number_of_nucleus,3).Ex: [[1,2,3],[3,4,5],..]
f:Callable[[np.ndarray],np.ndarray]
A function that should give the twice the log probability of wavefunction of the molecular system when called. Should taken in a 4D array of electron's positions(x) as argument and return a numpy array containing the log probabilities of each batch.
batch_no: int, optional (default 10)
Number of batches of the electron's positions to be initialized.
x: np.ndarray, optional (default np.ndarray([]))
Contains the electron's coordinates in a 4D array. The shape of the array should be(batch_no,no_of_electrons,1,3). Can be a 1D empty array, when electron's positions are yet to be initialized.
steps: int, optional (default 10)
The number of MCMC steps to be performed when the moves are called.
steps_per_update: int (default 10)
The number of steps after which the parameters of the MCMC gets updated.
seed: int, optional (default None)
Random seed to use.
symmetric: bool, optional(default True)
If true, symmetric moves will be used, else asymmetric moves will be followed.
simultaneous: bool, optional(default True)
If true, MCMC steps will be performed on all the electrons, else only a single electron gets updated.
Attributes
----------
sampled_electrons: np.ndarray
Keeps track of the sampled electrons at every step, must be empty at start.
"""
self.x = x
self.f = f
self.num_accept = 0
self.symmetric = symmetric
self.simultaneous = simultaneous
self.steps = steps
self.steps_per_update = steps_per_update
self.central_value = central_value
self.batch_no = batch_no
self.sampled_electrons: np.ndarray = np.array([])
if seed is not None:
seed = int(seed)
np.random.seed(seed)
def harmonic_mean(self, y: np.ndarray) -> np.ndarray:
"""Calculates the harmonic mean of the value 'y' from the self.central value. The numpy array returned is typically scaled up to get the standard deviation matrix.
Parameters
----------
y: np.ndarray
Containing the data distribution. Shape of y should be (batch,no_of_electron,1,3)
Returns
-------
np.ndarray
Contains the harmonic mean of the data distribution of each batch. Shape of the array obtained (batch_no, no_of_electrons,1,1)
"""
diff = y - self.central_value
distance = np.linalg.norm(diff, axis=-1, keepdims=True)
return 1.0 / np.mean(1.0 / distance, axis=-2, keepdims=True)
def log_prob_gaussian(self, y: np.ndarray, mu: np.ndarray,
sigma: np.ndarray) -> np.ndarray:
"""Calculates the log probability of a gaussian distribution, given the mean and standard deviation
Parameters
----------
y: np.ndarray
data for which the log normal distribution is to be found
mu: np.ndarray
Means wrt which the log normal is calculated. Same shape as x or should be brodcastable to x
sigma: np.ndarray,
The standard deviation of the log normal distribution. Same shape as x or should be brodcastable to x
Returns
-------
np.ndarray
Log probability of gaussian distribution, with the shape - (batch_no,).
"""
numer = np.sum((-0.5 * ((y - mu)**2) / (sigma**2)), axis=(1, 2, 3))
denom = y.shape[-1] * np.sum(np.log(sigma), axis=(1, 2, 3))
return numer - denom
def gauss_initialize_position(self,
no_sample: np.ndarray,
stddev: float = 0.02):
"""Initializes the position around a central value as mean sampled from a gauss distribution and updates self.x.
Parameters
----------
no_sample: np.ndarray,
Contains the number of samples to initialize under each mean. should be in the form [[3],[2]..], where here it means 3 samples and 2 samples around the first entry and second entry,respectively in self.central_value is taken.
stddev: float, optional (default 0.02)
contains the stddev with which the electrons' coordinates are initialized
"""
mean = self.central_value[0]
specific_sample = no_sample[0][0]
ndim = np.shape(self.central_value)[1]
self.x = np.random.normal(mean, stddev,
(self.batch_no, specific_sample, 1, ndim))
end = np.shape(self.central_value)[0]
for i in range(1, end):
mean = self.central_value[i]
specific_sample = no_sample[i][0]
self.x = np.append(
self.x,
np.random.normal(mean, stddev,
(self.batch_no, specific_sample, 1, ndim)),
axis=1)
def electron_update(self, lp1, lp2, move_prob, ratio, x2) -> np.ndarray:
"""
Performs sampling & parameter updates of electrons and appends the sampled electrons to self.sampled_electrons.
Parameters
----------
lp1: np.ndarray
Log probability of initial parameter state.
lp2: np.ndarray
Log probability of the new sampled state.
move_prob: np.ndarray
Sampled log probabilty of the electron moving from the initial to final state, sampled assymetrically or symetrically.
ratio: np.ndarray
Ratio of lp1 and lp2 state.
x2: np.ndarray
Numpy array of the new sampled electrons.
Returns
-------
lp1: np.ndarray
The update log probability of initial parameter state.
"""
cond = move_prob < ratio
tmp_sampled = np.where(cond[:, None, None, None], x2, self.x)
if (self.steps % self.steps_per_update) == 0:
self.x = tmp_sampled
lp1 = np.where(cond, lp2, lp1)
if (np.shape(self.sampled_electrons)[0] == 0):
self.sampled_electrons = tmp_sampled
else:
self.sampled_electrons = np.concatenate(
(self.sampled_electrons, tmp_sampled))
self.num_accept += np.sum(cond)
return lp1
def move(self,
stddev: float = 0.02,
asymmetric_func: Optional[Callable[[np.ndarray],
np.ndarray]] = None,
index: Optional[int] = None) -> float:
"""Performs Metropolis-Hasting move for self.x(electrons). The type of moves to be followed -(simultaneous or single-electron, symmetric or asymmetric) have been specified when calling the class.
The self.x array is replaced with a new array at the end of each step containing the new electron's positions.
Parameters
----------
asymmetric_func: Callable[[np.ndarray],np.ndarray], optional(default None)
Should be specified for an asymmetric move.The function should take in only 1 argument- y: a numpy array wrt to which mean should be calculated.
This function should return the mean for the asymmetric proposal. For ferminet, this function is the harmonic mean of the distance between the electron and the nucleus.
stddev: float, optional (default 0.02)
Specifies the standard deviation in the case of symmetric moves and the scaling factor of the standard deviation matrix in the case of asymmetric moves.
index: int, optional (default None)
Specifies the index of the electron to be updated in the case of a single electron move.
Returns
-------
float
accepted move ratio of the MCMC steps.
"""
self.sampled_electrons = np.array([])
lp1 = self.f(self.x) # log probability of self.x state
if self.simultaneous:
if self.symmetric:
for i in range(self.steps):
x2 = np.random.normal(self.x, stddev, self.x.shape)
lp2 = self.f(x2) # log probability of x2 state
move_prob = np.log(
np.random.uniform(low=0,
high=1.0,
size=np.shape(self.x)[0]))
ratio = lp2 - lp1
lp1 = self.electron_update(lp1, lp2, move_prob, ratio, x2)
elif asymmetric_func is not None:
for i in range(self.steps):
std = stddev * asymmetric_func(self.x)
x2 = np.random.normal(self.x, std, self.x.shape)
lp2 = self.f(x2) # log probability of x2 state
lq1 = self.log_prob_gaussian(self.x, x2,
std) # forward probability
lq2 = self.log_prob_gaussian(
x2, self.x,
stddev * asymmetric_func(x2)) # backward probability
ratio = lp2 + lq2 - lq1 - lp1
move_prob = np.log(
np.random.uniform(low=0,
high=1.0,
size=np.shape(self.x)[0]))
lp1 = self.electron_update(lp1, lp2, move_prob, ratio, x2)
elif index is not None:
index = int(index)
x2 = np.copy(self.x)
altered_shape = (self.batch_no, 1, np.shape(self.x)[3])
if self.symmetric:
for i in range(self.steps):
x2[:, index, :, :] = np.random.normal(x2[:, index, :, :],
stddev,
size=altered_shape)
lp2 = self.f(x2) # log probability of x2 state
ratio = lp2 - lp1
move_prob = np.log(
np.random.uniform(low=0,
high=1.0,
size=np.shape(self.x)[0]))
lp1 = self.electron_update(lp1, lp2, move_prob, ratio, x2)
elif asymmetric_func is not None:
init_dev = stddev * asymmetric_func(
self.x) # initial standard deviation matrix
for i in range(self.steps):
std = stddev * asymmetric_func(self.x[:, index, :, :])
x2[:, index, :, :] = np.random.normal(x2[:, index, :, :],
std,
size=altered_shape)
lp2 = self.f(x2) # log probability of x2 state
init_dev[:, index, :, :] = std
lq1 = self.log_prob_gaussian(
self.x, x2, init_dev) # forward probability
lq2 = self.log_prob_gaussian(
x2, self.x,
stddev * asymmetric_func(x2)) # backward probability
ratio = lp2 + lq2 - lq1 - lp1
move_prob = np.log(
np.random.uniform(low=0,
high=1.0,
size=np.shape(self.x)[0]))
lp1 = self.electron_update(lp1, lp2, move_prob, ratio, x2)
return self.num_accept / (
(i + 1) * np.shape(self.x)[0]) # accepted move ratio
<file_sep>"""
Docks Molecular Complexes
"""
import logging
import tempfile
from typing import Generator, Optional, Tuple, Union
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.models import Model
from deepchem.feat import ComplexFeaturizer
from deepchem.data import NumpyDataset
from deepchem.dock import PoseGenerator
logger = logging.getLogger(__name__)
POSED_COMPLEX = Tuple[RDKitMol, RDKitMol]
class Docker(object):
"""A generic molecular docking class
This class provides a docking engine which uses provided models for
featurization, pose generation, and scoring. Most pieces of docking
software are command line tools that are invoked from the shell. The
goal of this class is to provide a python clean API for invoking
molecular docking programmatically.
The implementation of this class is lightweight and generic. It's
expected that the majority of the heavy lifting will be done by pose
generation and scoring classes that are provided to this class.
"""
def __init__(self,
pose_generator: PoseGenerator,
featurizer: Optional[ComplexFeaturizer] = None,
scoring_model: Optional[Model] = None):
"""Builds model.
Parameters
----------
pose_generator: PoseGenerator
The pose generator to use for this model
featurizer: ComplexFeaturizer, optional (default None)
Featurizer associated with `scoring_model`
scoring_model: Model, optional (default None)
Should make predictions on molecular complex.
"""
if ((featurizer is not None and scoring_model is None) or
(featurizer is None and scoring_model is not None)):
raise ValueError(
"featurizer/scoring_model must both be set or must both be None."
)
self.base_dir = tempfile.mkdtemp()
self.pose_generator = pose_generator
self.featurizer = featurizer
self.scoring_model = scoring_model
def dock(
self,
molecular_complex: Tuple[str, str],
centroid: Optional[np.ndarray] = None,
box_dims: Optional[np.ndarray] = None,
exhaustiveness: int = 10,
num_modes: int = 9,
num_pockets: Optional[int] = None,
out_dir: Optional[str] = None,
use_pose_generator_scores: bool = False
) -> Union[Generator[POSED_COMPLEX, None, None], Generator[Tuple[
POSED_COMPLEX, float], None, None]]:
"""Generic docking function.
This docking function uses this object's featurizer, pose
generator, and scoring model to make docking predictions. This
function is written in generic style so
Parameters
----------
molecular_complex: Tuple[str, str]
A representation of a molecular complex. This tuple is
(protein_file, ligand_file).
centroid: np.ndarray, optional (default None)
The centroid to dock against. Is computed if not specified.
box_dims: np.ndarray, optional (default None)
A numpy array of shape `(3,)` holding the size of the box to dock. If not
specified is set to size of molecular complex plus 5 angstroms.
exhaustiveness: int, optional (default 10)
Tells pose generator how exhaustive it should be with pose
generation.
num_modes: int, optional (default 9)
Tells pose generator how many binding modes it should generate at
each invocation.
num_pockets: int, optional (default None)
If specified, `self.pocket_finder` must be set. Will only
generate poses for the first `num_pockets` returned by
`self.pocket_finder`.
out_dir: str, optional (default None)
If specified, write generated poses to this directory.
use_pose_generator_scores: bool, optional (default False)
If `True`, ask pose generator to generate scores. This cannot be
`True` if `self.featurizer` and `self.scoring_model` are set
since those will be used to generate scores in that case.
Returns
-------
Generator[Tuple[`posed_complex`, `score`]] or Generator[`posed_complex`]
A generator. If `use_pose_generator_scores==True` or
`self.scoring_model` is set, then will yield tuples
`(posed_complex, score)`. Else will yield `posed_complex`.
"""
if self.scoring_model is not None and use_pose_generator_scores:
raise ValueError(
"Cannot set use_pose_generator_scores=True "
"when self.scoring_model is set (since both generator scores for complexes)."
)
outputs = self.pose_generator.generate_poses(
molecular_complex,
centroid=centroid,
box_dims=box_dims,
exhaustiveness=exhaustiveness,
num_modes=num_modes,
num_pockets=num_pockets,
out_dir=out_dir,
generate_scores=use_pose_generator_scores)
if use_pose_generator_scores:
complexes, scores = outputs
else:
complexes = outputs
# We know use_pose_generator_scores == False in this case
if self.scoring_model is not None:
for posed_complex in complexes:
# check whether self.featurizer is instance of ComplexFeaturizer or not
assert isinstance(self.featurizer, ComplexFeaturizer)
# TODO: How to handle the failure here?
features = self.featurizer.featurize([molecular_complex])
dataset = NumpyDataset(X=features)
score = self.scoring_model.predict(dataset)
yield (posed_complex, score)
elif use_pose_generator_scores:
for posed_complex, score in zip(complexes, scores):
yield (posed_complex, score)
else:
for posed_complex in complexes:
yield posed_complex
<file_sep>"""
SPLIF Fingerprints for molecular complexes.
"""
import logging
import itertools
import numpy as np
from deepchem.utils.hash_utils import hash_ecfp_pair
from deepchem.utils.rdkit_utils import load_complex
from deepchem.utils.rdkit_utils import compute_all_ecfp
from deepchem.utils.rdkit_utils import MoleculeLoadException
from deepchem.utils.rdkit_utils import compute_contact_centroid
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.hash_utils import vectorize
from deepchem.utils.voxel_utils import voxelize
from deepchem.utils.voxel_utils import convert_atom_pair_to_voxel
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import subtract_centroid
from typing import Optional, Tuple, Dict, List
logger = logging.getLogger(__name__)
SPLIF_CONTACT_BINS = [(0, 2.0), (2.0, 3.0), (3.0, 4.5)]
def compute_splif_features_in_range(frag1: Tuple,
frag2: Tuple,
pairwise_distances: np.ndarray,
contact_bin: List,
ecfp_degree: int = 2) -> Dict:
"""Computes SPLIF features for close atoms in molecular complexes.
Finds all frag1 atoms that are > contact_bin[0] and <
contact_bin[1] away from frag2 atoms. Then, finds the ECFP
fingerprints for the contacting atoms. Returns a dictionary
mapping (frag1_index_i, frag2_index_j) --> (frag1_ecfp_i,
frag2_ecfp_j)
Parameters
----------
frag1: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
frag2: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
contact_bins: np.ndarray
Ranges of pair distances which are placed in separate bins.
pairwise_distances: np.ndarray
Array of pairwise fragment-fragment distances (Angstroms)
ecfp_degree: int
ECFP radius
"""
contacts = np.nonzero((pairwise_distances > contact_bin[0]) &
(pairwise_distances < contact_bin[1]))
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
frag1_ecfp_dict = compute_all_ecfp(frag1[1],
indices=frag1_atoms,
degree=ecfp_degree)
frag2_ecfp_dict = compute_all_ecfp(frag2[1], degree=ecfp_degree)
splif_dict = {
contact: (frag1_ecfp_dict[contact[0]], frag2_ecfp_dict[contact[1]])
for contact in zip(contacts[0], contacts[1])
}
return splif_dict
def featurize_splif(frag1, frag2, contact_bins, pairwise_distances,
ecfp_degree):
"""Computes SPLIF featurization of fragment interactions binding pocket.
For each contact range (i.e. 1 A to 2 A, 2 A to 3 A, etc.)
compute a dictionary mapping (frag1_index_i, frag2_index_j)
tuples --> (frag1_ecfp_i, frag2_ecfp_j) tuples. Return a
list of such splif dictionaries.
Parameters
----------
frag1: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
frag2: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
contact_bins: np.ndarray
Ranges of pair distances which are placed in separate bins.
pairwise_distances: np.ndarray
Array of pairwise fragment-fragment distances (Angstroms)
ecfp_degree: int
ECFP radius, the graph distance at which fragments are computed.
Returns
-------
Dictionaries of SPLIF interactions suitable for `vectorize` or
`voxelize`.
"""
splif_dicts = []
for i, contact_bin in enumerate(contact_bins):
splif_dicts.append(
compute_splif_features_in_range(frag1, frag2, pairwise_distances,
contact_bin, ecfp_degree))
return splif_dicts
class SplifFingerprint(ComplexFeaturizer):
"""Computes SPLIF Fingerprints for a macromolecular complex.
SPLIF fingerprints are based on a technique introduced in the
following paper.
<NAME>., and <NAME>. "Structural protein–ligand interaction
fingerprints (SPLIF) for structure-based virtual screening:
method and benchmark study." Journal of chemical information
and modeling 54.9 (2014): 2555-2561.
SPLIF fingerprints are a subclass of `ComplexFeaturizer`. It
requires 3D coordinates for a molecular complex. For each ligand
atom, it identifies close pairs of atoms from different molecules.
These atom pairs are expanded to 2D circular fragments and a
fingerprint for the union is turned on in the bit vector. Note that
we slightly generalize the original paper by not requiring the
interacting molecules to be proteins or ligands.
This is conceptually pretty similar to
`ContactCircularFingerprint` but computes ECFP fragments only
for direct contacts instead of the entire contact region.
For a macromolecular complex, returns a vector of shape
`(len(contact_bins)*size,)`
"""
def __init__(self, contact_bins=None, radius=2, size=8):
"""
Parameters
----------
contact_bins: list[tuple]
List of contact bins. If not specified is set to default
`[(0, 2.0), (2.0, 3.0), (3.0, 4.5)]`.
radius : int, optional (default 2)
Fingerprint radius used for circular fingerprints.
size: int, optional (default 8)
Length of generated bit vector.
"""
if contact_bins is None:
self.contact_bins = SPLIF_CONTACT_BINS
else:
self.contact_bins = contact_bins
self.size = size
self.radius = radius
def _featurize(self, datapoint, **kwargs):
"""
Compute featurization for a molecular complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
for (frag1, frag2) in itertools.combinations(fragments, 2):
# Get coordinates
distances = compute_pairwise_distances(frag1[0], frag2[0])
# distances = compute_pairwise_distances(prot_xyz, lig_xyz)
vectors = [
vectorize(hash_ecfp_pair,
feature_dict=splif_dict,
size=self.size)
for splif_dict in featurize_splif(
frag1, frag2, self.contact_bins, distances, self.radius)
]
pairwise_features += vectors
pairwise_features = np.concatenate(pairwise_features)
return pairwise_features
class SplifVoxelizer(ComplexFeaturizer):
"""Computes SPLIF voxel grid for a macromolecular complex.
SPLIF fingerprints are based on a technique introduced in the
following paper [1]_.
The SPLIF voxelizer localizes local SPLIF descriptors in
space, by assigning features to the voxel in which they
originated. This technique may be useful for downstream
learning methods such as convolutional networks.
Featurizes a macromolecular complex into a tensor of shape
`(voxels_per_edge, voxels_per_edge, voxels_per_edge, size)`
where `voxels_per_edge = int(box_width/voxel_width)`.
References
----------
.. [1] <NAME>., and <NAME>. "Structural protein–ligand interaction
fingerprints (SPLIF) for structure-based virtual screening:
method and benchmark study." Journal of chemical information
and modeling 54.9 (2014): 2555-2561.
"""
def __init__(self,
cutoff: float = 4.5,
contact_bins: Optional[List] = None,
radius: int = 2,
size: int = 8,
box_width: float = 16.0,
voxel_width: float = 1.0):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
contact_bins: list[tuple]
List of contact bins. If not specified is set to default
`[(0, 2.0), (2.0, 3.0), (3.0, 4.5)]`.
radius : int, optional (default 2)
Fingerprint radius used for circular fingerprints.
size: int, optional (default 8)
Length of generated bit vector.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
"""
self.cutoff = cutoff
if contact_bins is None:
self.contact_bins = SPLIF_CONTACT_BINS
else:
self.contact_bins = contact_bins
self.size = size
self.radius = radius
self.box_width = box_width
self.voxel_width = voxel_width
self.voxels_per_edge = int(self.box_width / self.voxel_width)
def _featurize(self, datapoint, **kwargs):
"""
Compute featurization for a molecular complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
for (frag1, frag2) in itertools.combinations(fragments, 2):
distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
pairwise_features.append(
np.concatenate([
voxelize(convert_atom_pair_to_voxel,
hash_function=hash_ecfp_pair,
coordinates=xyzs,
box_width=self.box_width,
voxel_width=self.voxel_width,
feature_dict=splif_dict,
nb_channel=self.size)
for splif_dict in featurize_splif(
frag1, frag2, self.contact_bins, distances, self.radius)
],
axis=-1))
# Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 1) so we should concatenate on the last axis.
return np.concatenate(pairwise_features, axis=-1)
<file_sep>"""
Contains an abstract base class that supports data transformations.
"""
import os
import logging
import time
import warnings
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import scipy
import scipy.ndimage
import deepchem as dc
from deepchem.data import Dataset, NumpyDataset, DiskDataset
from deepchem.feat import Featurizer
from deepchem.feat.mol_graphs import ConvMol
logger = logging.getLogger(__name__)
def undo_grad_transforms(grad, tasks, transformers):
"""DEPRECATED. DO NOT USE."""
logger.warning(
"undo_grad_transforms is DEPRECATED and will be removed in a future version of DeepChem. "
"Manually implement transforms to perform force calculations.")
for transformer in reversed(transformers):
if transformer.transform_y:
grad = transformer.untransform_grad(grad, tasks)
return grad
def get_grad_statistics(dataset):
"""Computes and returns statistics of a dataset
DEPRECATED DO NOT USE.
This function assumes that the first task of a dataset holds the
energy for an input system, and that the remaining tasks holds the
gradient for the system.
"""
logger.warning(
"get_grad_statistics is DEPRECATED and will be removed in a future version of DeepChem. Manually compute force/energy statistics."
)
if len(dataset) == 0:
return None, None, None, None
y = dataset.y
energy = y[:, 0]
grad = y[:, 1:]
for i in range(energy.size):
grad[i] *= energy[i]
ydely_means = np.sum(grad, axis=0) / len(energy)
return grad, ydely_means
class Transformer(object):
"""Abstract base class for different data transformation techniques.
A transformer is an object that applies a transformation to a given
dataset. Think of a transformation as a mathematical operation which
makes the source dataset more amenable to learning. For example, one
transformer could normalize the features for a dataset (ensuring
they have zero mean and unit standard deviation). Another
transformer could for example threshold values in a dataset so that
values outside a given range are truncated. Yet another transformer
could act as a data augmentation routine, generating multiple
different images from each source datapoint (a transformation need
not necessarily be one to one).
Transformers are designed to be chained, since data pipelines often
chain multiple different transformations to a dataset. Transformers
are also designed to be scalable and can be applied to
large `dc.data.Dataset` objects. Not that Transformers are not
usually thread-safe so you will have to be careful in processing
very large datasets.
This class is an abstract superclass that isn't meant to be directly
instantiated. Instead, you will want to instantiate one of the
subclasses of this class inorder to perform concrete
transformations.
"""
# Hack to allow for easy unpickling:
# http://stefaanlippens.net/pickleproblem
__module__ = os.path.splitext(os.path.basename(__file__))[0]
def __init__(self,
transform_X: bool = False,
transform_y: bool = False,
transform_w: bool = False,
transform_ids: bool = False,
dataset: Optional[Dataset] = None):
"""Initializes transformation based on dataset statistics.
Parameters
----------
transform_X: bool, optional (default False)
Whether to transform X
transform_y: bool, optional (default False)
Whether to transform y
transform_w: bool, optional (default False)
Whether to transform w
transform_ids: bool, optional (default False)
Whether to transform ids
dataset: dc.data.Dataset object, optional (default None)
Dataset to be transformed
"""
if self.__class__.__name__ == "Transformer":
raise ValueError(
"Transformer is an abstract superclass and cannot be directly instantiated. You probably want to instantiate a concrete subclass instead."
)
self.transform_X = transform_X
self.transform_y = transform_y
self.transform_w = transform_w
self.transform_ids = transform_ids
# Some transformation must happen
assert transform_X or transform_y or transform_w or transform_ids
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w, ids) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
raise NotImplementedError(
"Each Transformer is responsible for its own transform_array method."
)
def untransform(self, transformed: np.ndarray) -> np.ndarray:
"""Reverses stored transformation on provided data.
Depending on whether `transform_X` or `transform_y` or `transform_w` was
set, this will perform different un-transformations. Note that this method
may not always be defined since some transformations aren't 1-1.
Parameters
----------
transformed: np.ndarray
Array which was previously transformed by this class.
"""
raise NotImplementedError(
"Each Transformer is responsible for its own untransform method.")
def transform(self,
dataset: Dataset,
parallel: bool = False,
out_dir: Optional[str] = None,
**kwargs) -> Dataset:
"""Transforms all internally stored data in dataset.
This method transforms all internal data in the provided dataset by using
the `Dataset.transform` method. Note that this method adds X-transform,
y-transform columns to metadata. Specified keyword arguments are passed on
to `Dataset.transform`.
Parameters
----------
dataset: dc.data.Dataset
Dataset object to be transformed.
parallel: bool, optional (default False)
if True, use multiple processes to transform the dataset in parallel.
For large datasets, this might be faster.
out_dir: str, optional
If `out_dir` is specified in `kwargs` and `dataset` is a `DiskDataset`,
the output dataset will be written to the specified directory.
Returns
-------
Dataset
A newly transformed Dataset object
"""
# Add this case in to handle non-DiskDataset that should be written to disk
if out_dir is not None:
if not isinstance(dataset, dc.data.DiskDataset):
dataset = dc.data.DiskDataset.from_numpy(
dataset.X, dataset.y, dataset.w, dataset.ids)
_, y_shape, w_shape, _ = dataset.get_shape()
if y_shape == tuple() and self.transform_y:
raise ValueError("Cannot transform y when y_values are not present")
if w_shape == tuple() and self.transform_w:
raise ValueError("Cannot transform w when w_values are not present")
return dataset.transform(self, out_dir=out_dir, parallel=parallel)
def transform_on_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transforms numpy arrays X, y, and w
DEPRECATED. Use `transform_array` instead.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
warnings.warn(
"transform_on_array() is deprecated and has been renamed to transform_array()."
"transform_on_array() will be removed in DeepChem 3.0",
FutureWarning)
X, y, w, ids = self.transform_array(X, y, w, ids)
return X, y, w, ids
def undo_transforms(y: np.typing.ArrayLike,
transformers: List[Transformer]) -> np.ndarray:
"""Undoes all transformations applied.
Transformations are reversed using `transformer.untransform`.
Transformations will be assumed to have been applied in the order specified,
so transformations will be reversed in the opposite order. That is if
`transformers = [t1, t2]`, then this method will do `t2.untransform`
followed by `t1.untransform`.
Parameters
----------
y: np.ndarray
Array of values for which transformations have to be undone.
transformers: list[dc.trans.Transformer]
List of transformations which have already been applied to `y` in the
order specifed.
Returns
-------
y_out: np.ndarray
The array with all transformations reversed.
"""
# Note that transformers have to be undone in reversed order
y_out = np.asarray(y)
for transformer in reversed(transformers):
if transformer.transform_y:
y_out = transformer.untransform(y_out)
return y_out
class MinMaxTransformer(Transformer):
"""Ensure each value rests between 0 and 1 by using the min and max.
`MinMaxTransformer` transforms the dataset by shifting each axis of X or y
(depending on whether transform_X or transform_y is True), except the first
one by the minimum value along the axis and dividing the result by the range
(maximum value - minimum value) along the axis. This ensures each axis is
between 0 and 1. In case of multi-task learning, it ensures each task is
given equal importance.
Given original array A, the transformed array can be written as:
>>> import numpy as np
>>> A = np.random.rand(10, 10)
>>> A_min = np.min(A, axis=0)
>>> A_max = np.max(A, axis=0)
>>> A_t = np.nan_to_num((A - A_min)/(A_max - A_min))
Examples
--------
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.random.rand(n_samples, n_tasks)
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.MinMaxTransformer(transform_y=True, dataset=dataset)
>>> dataset = transformer.transform(dataset)
Note
----
This class can only transform `X` or `y` and not `w`. So only one of
`transform_X` or `transform_y` can be set.
Raises
------
ValueError
if `transform_X` and `transform_y` are both set.
"""
def __init__(self,
transform_X: bool = False,
transform_y: bool = False,
dataset: Optional[Dataset] = None):
"""Initialization of MinMax transformer.
Parameters
----------
transform_X: bool, optional (default False)
Whether to transform X
transform_y: bool, optional (default False)
Whether to transform y
dataset: dc.data.Dataset object, optional (default None)
Dataset to be transformed
"""
if transform_X and transform_y:
raise ValueError("Can only transform only one of X and y")
if dataset is not None and transform_X:
self.X_min = np.min(dataset.X, axis=0)
self.X_max = np.max(dataset.X, axis=0)
elif dataset is not None and transform_y:
self.y_min = np.min(dataset.y, axis=0)
self.y_max = np.max(dataset.y, axis=0)
if len(dataset.y.shape) > 1:
assert len(self.y_min) == dataset.y.shape[1]
super(MinMaxTransformer, self).__init__(transform_X=transform_X,
transform_y=transform_y,
dataset=dataset)
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w, ids) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of ids.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
if self.transform_X:
# Handle division by zero
denominator = np.where((self.X_max - self.X_min) > 0,
(self.X_max - self.X_min),
np.ones_like(self.X_max - self.X_min))
X = np.nan_to_num((X - self.X_min) / denominator)
elif self.transform_y:
# Handle division by zero
denominator = np.where((self.y_max - self.y_min) > 0,
(self.y_max - self.y_min),
np.ones_like(self.y_max - self.y_min))
y = np.nan_to_num((y - self.y_min) / denominator)
return (X, y, w, ids)
def untransform(self, z: np.ndarray) -> np.ndarray:
"""Undo transformation on provided data.
Parameters
----------
z: np.ndarray
Transformed X or y array
Returns
-------
np.ndarray
Array with min-max scaling undone.
"""
if self.transform_X:
X_max = self.X_max
X_min = self.X_min
return z * (X_max - X_min) + X_min
elif self.transform_y:
y_min = self.y_min
y_max = self.y_max
n_tasks = len(y_min)
z_shape = list(z.shape)
z_shape.reverse()
for dim in z_shape:
if dim != n_tasks and dim == 1:
y_min = np.expand_dims(y_min, -1)
y_max = np.expand_dims(y_max, -1)
y = z * (y_max - y_min) + y_min
return y
else:
return z
class NormalizationTransformer(Transformer):
"""Normalizes dataset to have zero mean and unit standard deviation
This transformer transforms datasets to have zero mean and unit standard
deviation.
Examples
--------
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.random.rand(n_samples, n_tasks)
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset)
>>> dataset = transformer.transform(dataset)
Note
----
This class can only transform `X` or `y` and not `w`. So only one of
`transform_X` or `transform_y` can be set.
Raises
------
ValueError
if `transform_X` and `transform_y` are both set.
"""
def __init__(self,
transform_X: bool = False,
transform_y: bool = False,
transform_w: bool = False,
dataset: Optional[Dataset] = None,
transform_gradients: bool = False,
move_mean: bool = True):
"""Initialize normalization transformation.
Parameters
----------
transform_X: bool, optional (default False)
Whether to transform X
transform_y: bool, optional (default False)
Whether to transform y
transform_w: bool, optional (default False)
Whether to transform w
dataset: dc.data.Dataset object, optional (default None)
Dataset to be transformed
"""
if transform_X and transform_y:
raise ValueError("Can only transform only one of X and y")
if transform_w:
raise ValueError(
"MinMaxTransformer doesn't support w transformation.")
if dataset is not None and transform_X:
X_means, X_stds = dataset.get_statistics(X_stats=True,
y_stats=False)
self.X_means = X_means
self.X_stds = X_stds
elif dataset is not None and transform_y:
y_means, y_stds = dataset.get_statistics(X_stats=False,
y_stats=True)
self.y_means = y_means
# Control for pathological case with no variance.
y_stds_np = np.array(y_stds)
y_stds_np[y_stds_np == 0] = 1.
self.y_stds = y_stds_np
self.transform_gradients = transform_gradients
self.move_mean = move_mean
if self.transform_gradients:
true_grad, ydely_means = get_grad_statistics(dataset)
self.grad = np.reshape(true_grad, (true_grad.shape[0], -1, 3))
self.ydely_means = ydely_means
super(NormalizationTransformer, self).__init__(transform_X=transform_X,
transform_y=transform_y,
transform_w=transform_w,
dataset=dataset)
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of ids.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
if self.transform_X:
if not hasattr(self, 'move_mean') or self.move_mean:
X = np.nan_to_num((X - self.X_means) / self.X_stds)
else:
X = np.nan_to_num(X / self.X_stds)
if self.transform_y:
if not hasattr(self, 'move_mean') or self.move_mean:
y = np.nan_to_num((y - self.y_means) / self.y_stds)
else:
y = np.nan_to_num(y / self.y_stds)
return (X, y, w, ids)
def untransform(self, z: np.ndarray) -> np.ndarray:
"""Undo transformation on provided data.
Parameters
----------
z: np.ndarray
Array to transform back
Returns
-------
z_out: np.ndarray
Array with normalization undone.
"""
if self.transform_X:
if not hasattr(self, 'move_mean') or self.move_mean:
return z * self.X_stds + self.X_means
else:
return z * self.X_stds
elif self.transform_y:
y_stds = self.y_stds
y_means = self.y_means
# Handle case with 1 task correctly
if len(self.y_stds.shape) == 0:
n_tasks = 1
else:
n_tasks = self.y_stds.shape[0]
z_shape = list(z.shape)
# Get the reversed shape of z: (..., n_tasks, batch_size)
z_shape.reverse()
# Find the task dimension of z
for dim in z_shape:
if dim != n_tasks and dim == 1:
# Prevent broadcasting on wrong dimension
y_stds = np.expand_dims(y_stds, -1)
y_means = np.expand_dims(y_means, -1)
if not hasattr(self, 'move_mean') or self.move_mean:
return z * y_stds + y_means
else:
return z * y_stds
else:
return z
def untransform_grad(self, grad, tasks):
"""DEPRECATED. DO NOT USE."""
logger.warning(
"NormalizationTransformer.untransform_grad is DEPRECATED and will be removed in a future version of DeepChem. "
"Manually implement transforms to perform force calculations.")
if self.transform_y:
grad_means = self.y_means[1:]
energy_var = self.y_stds[0]
grad_var = 1 / energy_var * (self.ydely_means -
self.y_means[0] * self.y_means[1:])
energy = tasks[:, 0]
transformed_grad = []
for i in range(energy.size):
Etf = energy[i]
grad_Etf = grad[i].flatten()
grad_E = Etf * grad_var + energy_var * grad_Etf + grad_means
grad_E = np.reshape(grad_E, (-1, 3))
transformed_grad.append(grad_E)
transformed_grad = np.asarray(transformed_grad)
return transformed_grad
class ClippingTransformer(Transformer):
"""Clip large values in datasets.
Examples
--------
Let's clip values from a synthetic dataset
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.ClippingTransformer(transform_X=True)
>>> dataset = transformer.transform(dataset)
"""
def __init__(self,
transform_X: bool = False,
transform_y: bool = False,
dataset: Optional[Dataset] = None,
x_max: float = 5.,
y_max: float = 500.):
"""Initialize clipping transformation.
Parameters
----------
transform_X: bool, optional (default False)
Whether to transform X
transform_y: bool, optional (default False)
Whether to transform y
dataset: dc.data.Dataset object, optional
Dataset to be transformed
x_max: float, optional
Maximum absolute value for X
y_max: float, optional
Maximum absolute value for y
Note
----
This transformer can transform `X` and `y` jointly, but does not transform
`w`.
Raises
------
ValueError
if `transform_w` is set.
"""
super(ClippingTransformer, self).__init__(transform_X=transform_X,
transform_y=transform_y,
dataset=dataset)
self.x_max = x_max
self.y_max = y_max
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w) arrays.
Parameters
----------
X: np.ndarray
Array of Features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights
ids: np.ndarray
Array of ids.
Returns
-------
X: np.ndarray
Transformed features
y: np.ndarray
Transformed tasks
w: np.ndarray
Transformed weights
idstrans: np.ndarray
Transformed array of ids
"""
if self.transform_X:
X[X > self.x_max] = self.x_max
X[X < (-1.0 * self.x_max)] = -1.0 * self.x_max
if self.transform_y:
y[y > self.y_max] = self.y_max
y[y < (-1.0 * self.y_max)] = -1.0 * self.y_max
return (X, y, w, ids)
def untransform(self, z: np.ndarray) -> np.ndarray:
"""Not implemented."""
raise NotImplementedError(
"Cannot untransform datasets with ClippingTransformer.")
class LogTransformer(Transformer):
"""Computes a logarithmic transformation
This transformer computes the transformation given by
>>> import numpy as np
>>> A = np.random.rand(10, 10)
>>> A = np.log(A + 1)
Assuming that tasks/features are not specified. If specified, then
transformations are only performed on specified tasks/features.
Examples
--------
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.LogTransformer(transform_X=True)
>>> dataset = transformer.transform(dataset)
Note
----
This class can only transform `X` or `y` and not `w`. So only one of
`transform_X` or `transform_y` can be set.
Raises
------
ValueError
if `transform_w` is set or `transform_X` and `transform_y` are both set.
"""
def __init__(self,
transform_X: bool = False,
transform_y: bool = False,
features: Optional[List[int]] = None,
tasks: Optional[List[str]] = None,
dataset: Optional[Dataset] = None):
"""Initialize log transformer.
Parameters
----------
transform_X: bool, optional (default False)
Whether to transform X
transform_y: bool, optional (default False)
Whether to transform y
features: list[Int]
List of features indices to transform
tasks: list[str]
List of task names to transform.
dataset: dc.data.Dataset object, optional (default None)
Dataset to be transformed
"""
if transform_X and transform_y:
raise ValueError("Can only transform only one of X and y")
self.features = features
self.tasks = tasks
super(LogTransformer, self).__init__(transform_X=transform_X,
transform_y=transform_y,
dataset=dataset)
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of weights.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
if self.transform_X:
num_features = len(X[0])
if self.features is None:
X = np.log(X + 1)
else:
for j in range(num_features):
if j in self.features:
X[:, j] = np.log(X[:, j] + 1)
else:
X[:, j] = X[:, j]
if self.transform_y:
if np.isscalar(y[0]):
num_tasks = 1
else:
num_tasks = len(y[0])
if self.tasks is None:
y = np.log(y + 1)
else:
for j in range(num_tasks):
if j in self.tasks:
y[:, j] = np.log(y[:, j] + 1)
else:
y[:, j] = y[:, j]
return (X, y, w, ids)
def untransform(self, z: np.ndarray) -> np.ndarray:
"""Undo transformation on provided data.
Parameters
----------
z: np.ndarray,
Transformed X or y array
Returns
-------
np.ndarray
Array with a logarithmic transformation undone.
"""
if self.transform_X:
num_features = len(z[0])
if self.features is None:
return np.exp(z) - 1
else:
for j in range(num_features):
if j in self.features:
z[:, j] = np.exp(z[:, j]) - 1
else:
z[:, j] = z[:, j]
return z
elif self.transform_y:
if np.isscalar(z[0]):
num_tasks = 1
else:
num_tasks = len(z[0])
if self.tasks is None:
return np.exp(z) - 1
else:
for j in range(num_tasks):
if j in self.tasks:
z[:, j] = np.exp(z[:, j]) - 1
else:
z[:, j] = z[:, j]
return z
else:
return z
class BalancingTransformer(Transformer):
"""Balance positive and negative (or multiclass) example weights.
This class balances the sample weights so that the sum of all example
weights from all classes is the same. This can be useful when you're
working on an imbalanced dataset where there are far fewer examples of some
classes than others.
Examples
--------
Here's an example for a binary dataset.
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> n_classes = 2
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.random.randint(n_classes, size=(n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.BalancingTransformer(dataset=dataset)
>>> dataset = transformer.transform(dataset)
And here's a multiclass dataset example.
>>> n_samples = 50
>>> n_features = 3
>>> n_tasks = 1
>>> n_classes = 5
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.random.randint(n_classes, size=(n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.BalancingTransformer(dataset=dataset)
>>> dataset = transformer.transform(dataset)
See Also
--------
deepchem.trans.DuplicateBalancingTransformer: Balance by duplicating samples.
Note
----
This transformer is only meaningful for classification datasets where `y`
takes on a limited set of values. This class can only transform `w` and does
not transform `X` or `y`.
Raises
------
ValueError
if `transform_X` or `transform_y` are set. Also raises or if `y` or `w` aren't of shape `(N,)` or `(N, n_tasks)`.
"""
def __init__(self, dataset: Dataset):
# BalancingTransformer can only transform weights.
super(BalancingTransformer, self).__init__(transform_w=True,
dataset=dataset)
# Compute weighting factors from dataset.
y = dataset.y
w = dataset.w
# Handle 1-D case
if len(y.shape) == 1:
y = np.reshape(y, (len(y), 1))
if len(w.shape) == 1:
w = np.reshape(w, (len(w), 1))
if len(y.shape) != 2:
raise ValueError("y must be of shape (N,) or (N, n_tasks)")
if len(w.shape) != 2:
raise ValueError("w must be of shape (N,) or (N, n_tasks)")
self.classes = sorted(np.unique(y))
weights = []
for ind, task in enumerate(dataset.get_task_names()):
task_w = w[:, ind]
task_y = y[:, ind]
# Remove labels with zero weights
task_y = task_y[task_w != 0]
N_task = len(task_y)
class_counts = []
# Note that we may have 0 elements of a given class since we remove those
# labels with zero weight. This typically happens in multitask datasets
# where some datapoints only have labels for some tasks.
for c in self.classes:
# this works because task_y is 1D
num_c = len(np.where(task_y == c)[0])
class_counts.append(num_c)
# This is the right ratio since N_task/num_c * num_c = N_task
# for all classes
class_weights = [
N_task / float(num_c) if num_c > 0 else 0
for num_c in class_counts
]
weights.append(class_weights)
self.weights = weights
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of weights.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
w_balanced = np.zeros_like(w)
if len(y.shape) == 1 and len(w.shape) == 2 and w.shape[1] == 1:
y = np.expand_dims(y, 1)
if len(y.shape) == 1:
n_tasks = 1
elif len(y.shape) == 2:
n_tasks = y.shape[1]
else:
raise ValueError("y must be of shape (N,) or (N, n_tasks)")
for ind in range(n_tasks):
if n_tasks == 1:
task_y = y
task_w = w
else:
task_y = y[:, ind]
task_w = w[:, ind]
for i, c in enumerate(self.classes):
class_indices = np.logical_and(task_y == c, task_w != 0)
# Set to the class weight computed previously
if n_tasks == 1:
w_balanced[class_indices] = self.weights[ind][i]
else:
w_balanced[class_indices, ind] = self.weights[ind][i]
return (X, y, w_balanced, ids)
class FlatteningTransformer(Transformer):
"""This transformer is required for a `Dataset` consisting of fragments as a preprocessing
step before prediction. This is used only in the context of performing interpretation of models using atomic
contributions (atom-based model interpretation) [1]_
Examples
--------
Here's an example of preparation to atom-based model interpretation.
>>> import tempfile
>>> import deepchem as dc
>>> with tempfile.NamedTemporaryFile(mode='wt', delete=False) as fin:
... tmp = fin.write("smiles,endpoint\\nc1ccccc1,1")
>>> loader = dc.data.CSVLoader([], feature_field="smiles",
... featurizer = dc.feat.ConvMolFeaturizer(per_atom_fragmentation=False))
>>> # prepare dataset of molecules ready for prediction stage
... dataset = loader.create_dataset(fin.name)
>>> loader = dc.data.CSVLoader([], feature_field="smiles",
... featurizer=dc.feat.ConvMolFeaturizer(per_atom_fragmentation=True))
>>> frag_dataset = loader.create_dataset(fin.name)
>>> transformer = dc.trans.FlatteningTransformer(dataset=frag_dataset)
>>> # prepare dataset of fragments ready for prediction stage,
... # thereafter difference with molecules' predictions can be calculated
... frag_dataset = transformer.transform(frag_dataset)
See Also
--------
Detailed examples of `GraphConvModel` interpretation are provided in Tutorial #28
References
---------
.. [1] <NAME>., et al. J. Chem. Inf. Model. 2016, 56, 8, 1455–1469
"""
def __init__(self, dataset: Dataset):
"""Initializes flattening transformation.
Parameters
----------
dataset: dc.data.Dataset
Dataset object to be transformed
"""
if self.__class__.__name__ == "Transformer":
raise ValueError(
"Transformer is an abstract superclass and cannot be directly instantiated. You probably want to instantiate a concrete subclass instead."
)
self.transform_X = True
self.transform_y = (dataset.get_shape()[1] != tuple()
) # iff y passed, then transform it
self.transform_w = (dataset.get_shape()[2] != tuple()
) # iff w passed, then transform it
self.transform_ids = True
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of weights.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
ids = np.repeat(ids, [len(i) for i in X],
axis=0) # each fragment should recieve parent mol id
if self.transform_y:
y = np.repeat(
y, [len(i) for i in X], axis=0
) # for consistency of shapes each fragment should recieve parent mol y
if self.transform_w:
w = np.repeat(
w, [len(i) for i in X], axis=0
) # for consistency of shapes each fragment should recieve parent mol w
X = np.array([j for i in X for j in i]) # flatten
return (X, y, w, ids)
class CDFTransformer(Transformer):
"""Histograms the data and assigns values based on sorted list.
Acts like a Cumulative Distribution Function (CDF). If given a dataset of
samples from a continuous distribution computes the CDF of this dataset and
replaces values with their corresponding CDF values.
Examples
--------
Let's look at an example where we transform only features.
>>> N = 10
>>> n_feat = 5
>>> n_bins = 100
Note that we're using 100 bins for our CDF histogram
>>> import numpy as np
>>> X = np.random.normal(size=(N, n_feat))
>>> y = np.random.randint(2, size=(N,))
>>> dataset = dc.data.NumpyDataset(X, y)
>>> cdftrans = dc.trans.CDFTransformer(transform_X=True, dataset=dataset, bins=n_bins)
>>> dataset = cdftrans.transform(dataset)
Note that you can apply this transformation to `y` as well
>>> X = np.random.normal(size=(N, n_feat))
>>> y = np.random.normal(size=(N,))
>>> dataset = dc.data.NumpyDataset(X, y)
>>> cdftrans = dc.trans.CDFTransformer(transform_y=True, dataset=dataset, bins=n_bins)
>>> dataset = cdftrans.transform(dataset)
"""
def __init__(self,
transform_X: bool = False,
transform_y: bool = False,
dataset: Optional[Dataset] = None,
bins: int = 2):
"""Initialize this transformer.
Parameters
----------
transform_X: bool, optional (default False)
Whether to transform X
transform_y: bool, optional (default False)
Whether to transform y
dataset: dc.data.Dataset object, optional (default None)
Dataset to be transformed
bins: int, optional (default 2)
Number of bins to use when computing histogram.
"""
super(CDFTransformer, self).__init__(transform_X=transform_X,
transform_y=transform_y)
self.bins = bins
if transform_y:
if dataset is None:
raise ValueError(
"dataset must be specified when transforming y")
self.y = dataset.y
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Performs CDF transform on data.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
w_t = w
if self.transform_X:
X_t = get_cdf_values(X, self.bins)
y_t = y
elif self.transform_y:
X_t = X
y_t = get_cdf_values(y, self.bins)
return X_t, y_t, w_t, ids
def untransform(self, z: np.ndarray) -> np.ndarray:
"""Undo transformation on provided data.
Note that this transformation is only undone for y.
Parameters
----------
z: np.ndarray,
Transformed y array
Returns
-------
np.ndarray
Array with the transformation undone.
"""
# Need this for transform_y
if self.transform_y:
return self.y
else:
raise NotImplementedError
def get_cdf_values(array: np.ndarray, bins: int) -> np.ndarray:
"""Helper function to compute CDF values.
Parameters
----------
array: np.ndarray
Must be of shape `(n_rows, n_cols)` or `(n_rows,)`
bins: int
Number of bins to split data into.
Returns
-------
array_t: np.ndarray
Array with sorted histogram values
"""
# Handle 1D case
if len(array.shape) == 1:
array = np.reshape(array, (len(array), 1))
n_rows = array.shape[0]
n_cols = array.shape[1]
array_t = np.zeros((n_rows, n_cols))
parts = n_rows / bins
hist_values = np.zeros(n_rows)
sorted_hist_values = np.zeros(n_rows)
for row in range(n_rows):
if np.remainder(bins, 2) == 1:
hist_values[row] = np.floor(np.divide(row, parts)) / (bins - 1)
else:
hist_values[row] = np.floor(np.divide(row, parts)) / bins
for col in range(n_cols):
order = np.argsort(array[:, col], axis=0)
sorted_hist_values = hist_values[order]
array_t[:, col] = sorted_hist_values
return array_t
class PowerTransformer(Transformer):
"""Takes power n transforms of the data based on an input vector.
Computes the specified powers of the dataset. This can be useful if you're
looking to add higher order features of the form `x_i^2`, `x_i^3` etc. to
your dataset.
Examples
--------
Let's look at an example where we transform only `X`.
>>> N = 10
>>> n_feat = 5
>>> powers = [1, 2, 0.5]
So in this example, we're taking the identity, squares, and square roots.
Now let's construct our matrices
>>> import numpy as np
>>> X = np.random.rand(N, n_feat)
>>> y = np.random.normal(size=(N,))
>>> dataset = dc.data.NumpyDataset(X, y)
>>> trans = dc.trans.PowerTransformer(transform_X=True, dataset=dataset, powers=powers)
>>> dataset = trans.transform(dataset)
Let's now look at an example where we transform `y`. Note that the `y`
transform expands out the feature dimensions of `y` the same way it does for
`X` so this transform is only well defined for singletask datasets.
>>> import numpy as np
>>> X = np.random.rand(N, n_feat)
>>> y = np.random.rand(N)
>>> dataset = dc.data.NumpyDataset(X, y)
>>> trans = dc.trans.PowerTransformer(transform_y=True, dataset=dataset, powers=powers)
>>> dataset = trans.transform(dataset)
"""
def __init__(self,
transform_X: bool = False,
transform_y: bool = False,
dataset: Optional[Dataset] = None,
powers: List[int] = [1]):
"""Initialize this transformer
Parameters
----------
transform_X: bool, optional (default False)
Whether to transform X
transform_y: bool, optional (default False)
Whether to transform y
dataset: dc.data.Dataset object, optional (default None)
Dataset to be transformed. Note that this argument is ignored since
`PowerTransformer` doesn't require it to be specified.
powers: list[int], optional (default `[1]`)
The list of powers of features/labels to compute.
"""
super(PowerTransformer, self).__init__(transform_X=transform_X,
transform_y=transform_y)
self.powers = powers
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Performs power transform on data.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
if not (len(y.shape) == 1 or len(y.shape) == 2 and y.shape[1] == 1):
raise ValueError("This transform is not defined for multitask y")
# THis reshape is safe because of guard above.
y = np.reshape(y, (len(y), 1))
w_t = w
n_powers = len(self.powers)
if self.transform_X:
X_t = np.power(X, self.powers[0])
for i in range(1, n_powers):
X_t = np.hstack((X_t, np.power(X, self.powers[i])))
y_t = y
if self.transform_y:
y_t = np.power(y, self.powers[0])
for i in range(1, n_powers):
y_t = np.hstack((y_t, np.power(y, self.powers[i])))
X_t = X
return (X_t, y_t, w_t, ids)
def untransform(self, z: np.ndarray) -> np.ndarray:
"""Undo transformation on provided data.
Parameters
----------
z: np.ndarray,
Transformed y array
Returns
-------
np.ndarray
Array with the power transformation undone.
"""
n_powers = len(self.powers)
orig_len = (z.shape[1]) // n_powers
z = z[:, :orig_len]
z = np.power(z, 1 / self.powers[0])
return z
class CoulombFitTransformer(Transformer):
"""Performs randomization and binarization operations on batches of Coulomb Matrix features during fit.
Examples
--------
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
>>> model = dc.models.MultitaskFitTransformRegressor(n_tasks,
... [n_features, n_features], batch_size=n_samples, fit_transformers=fit_transformers, n_evals=1)
>>> print(model.n_features)
12
"""
def __init__(self, dataset: Dataset):
"""Initializes CoulombFitTransformer.
Parameters
----------
dataset: dc.data.Dataset
Dataset object to be transformed.
"""
X = dataset.X
num_atoms = X.shape[1]
self.step = 1.0
self.noise = 1.0
self.triuind = (np.arange(num_atoms)[:, np.newaxis] <=
np.arange(num_atoms)[np.newaxis, :]).flatten()
self.max = 0
for _ in range(10):
self.max = np.maximum(self.max, self.realize(X).max(axis=0))
X = self.expand(self.realize(X))
self.nbout = X.shape[1]
self.mean = X.mean(axis=0)
self.std = (X - self.mean).std()
super(CoulombFitTransformer, self).__init__(transform_X=True)
def realize(self, X: np.ndarray) -> np.ndarray:
"""Randomize features.
Parameters
----------
X: np.ndarray
Features
Returns
-------
X: np.ndarray
Randomized features
"""
def _realize_(x):
assert (len(x.shape) == 2)
inds = np.argsort(-(x**2).sum(axis=0)**.5 +
np.random.normal(0, self.noise, x[0].shape))
x = x[inds, :][:, inds] * 1
x = x.flatten()[self.triuind]
return x
return np.array([_realize_(z) for z in X])
def normalize(self, X: np.ndarray) -> np.ndarray:
"""Normalize features.
Parameters
----------
X: np.ndarray
Features
Returns
-------
X: np.ndarray
Normalized features
"""
return (X - self.mean) / self.std
def expand(self, X: np.ndarray) -> np.ndarray:
"""Binarize features.
Parameters
----------
X: np.ndarray
Features
Returns
-------
X: np.ndarray
Binarized features
"""
Xexp = []
for i in range(X.shape[1]):
for k in np.arange(
0,
self.max[i] + self.step, # type: ignore
self.step):
Xexp += [np.tanh((X[:, i] - k) / self.step)]
return np.array(Xexp).T
def X_transform(self, X: np.ndarray) -> np.ndarray:
"""Perform Coulomb Fit transform on features.
Parameters
----------
X: np.ndarray
Features
Returns
-------
X: np.ndarray
Transformed features
"""
X = self.normalize(self.expand(self.realize(X)))
return X
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Performs randomization and binarization operations on data.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
X = self.X_transform(X)
return (X, y, w, ids)
def untransform(self, z: np.ndarray) -> np.ndarray:
"Not implemented."
raise NotImplementedError(
"Cannot untransform datasets with FitTransformer.")
class IRVTransformer(Transformer):
"""Performs transform from ECFP to IRV features(K nearest neighbors).
This transformer is required by `MultitaskIRVClassifier` as a preprocessing
step before training.
Examples
--------
Let's start by defining the parameters of the dataset we're about to
transform.
>>> n_feat = 128
>>> N = 20
>>> n_tasks = 2
Let's now make our dataset object
>>> import numpy as np
>>> import deepchem as dc
>>> X = np.random.randint(2, size=(N, n_feat))
>>> y = np.zeros((N, n_tasks))
>>> w = np.ones((N, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w)
And let's apply our transformer with 10 nearest neighbors.
>>> K = 10
>>> trans = dc.trans.IRVTransformer(K, n_tasks, dataset)
>>> dataset = trans.transform(dataset)
Note
----
This class requires TensorFlow to be installed.
"""
def __init__(self, K: int, n_tasks: int, dataset: Dataset):
"""Initializes IRVTransformer.
Parameters
----------
K: int
number of nearest neighbours being count
n_tasks: int
number of tasks
dataset: dc.data.Dataset object
train_dataset
"""
self.X = dataset.X
self.n_tasks = n_tasks
self.K = K
self.y = dataset.y
self.w = dataset.w
super(IRVTransformer, self).__init__(transform_X=True)
def realize(self, similarity: np.ndarray, y: np.ndarray,
w: np.ndarray) -> List:
"""find samples with top ten similarity values in the reference dataset
Parameters
----------
similarity: np.ndarray
similarity value between target dataset and reference dataset
should have size of (n_samples_in_target, n_samples_in_reference)
y: np.array
labels for a single task
w: np.array
weights for a single task
Returns
-------
features: list
n_samples * np.array of size (2*K,)
each array includes K similarity values and corresponding labels
"""
features = []
similarity_xs = similarity * np.sign(w)
[target_len, reference_len] = similarity_xs.shape
values = []
top_labels = []
# map the indices to labels
for count in range(target_len // 100 + 1):
similarity = similarity_xs[count * 100:min((count + 1) *
100, target_len), :]
# generating batch of data by slicing similarity matrix
# into 100*reference_dataset_length
indice = np.argsort(similarity)[:, -(self.K + 1):][:, ::-1]
value = np.take_along_axis(similarity, indice, axis=1)
top_label = np.take(y, indice)
values.append(value)
top_labels.append(top_label)
values_np = np.concatenate(values, axis=0)
top_labels_np = np.concatenate(top_labels, axis=0)
# concatenate batches of data together
for count in range(values_np.shape[0]):
if values_np[count, 0] == 1:
features.append(
np.concatenate([
values_np[count, 1:(self.K + 1)],
top_labels_np[count, 1:(self.K + 1)]
]))
# highest similarity is 1: target is in the reference
# use the following K points
else:
features.append(
np.concatenate([
values_np[count, 0:self.K], top_labels_np[count,
0:self.K]
]))
# highest less than 1: target not in the reference, use top K points
return features
def X_transform(self, X_target: np.ndarray) -> np.ndarray:
"""Calculate similarity between target dataset(X_target) and
reference dataset(X): #(1 in intersection)/#(1 in union)
similarity = (X_target intersect X)/(X_target union X)
Parameters
----------
X_target: np.ndarray
fingerprints of target dataset
should have same length with X in the second axis
Returns
-------
X_target: np.ndarray
features of size(batch_size, 2*K*n_tasks)
"""
X_target2 = []
n_features = X_target.shape[1]
logger.info('start similarity calculation')
time1 = time.time()
similarity = IRVTransformer.matrix_mul(X_target, np.transpose(
self.X)) / (n_features - IRVTransformer.matrix_mul(
1 - X_target, np.transpose(1 - self.X)))
time2 = time.time()
logger.info('similarity calculation takes %i s' % (time2 - time1))
for i in range(self.n_tasks):
X_target2.append(
self.realize(similarity, self.y[:, i], self.w[:, i]))
return np.concatenate([z for z in np.array(X_target2)], axis=1)
@staticmethod
def matrix_mul(X1, X2, shard_size=5000):
"""Calculate matrix multiplication for big matrix,
X1 and X2 are sliced into pieces with shard_size rows(columns)
then multiplied together and concatenated to the proper size
"""
X1 = np.float_(X1)
X2 = np.float_(X2)
X1_shape = X1.shape
X2_shape = X2.shape
assert X1_shape[1] == X2_shape[0]
X1_iter = X1_shape[0] // shard_size + 1
X2_iter = X2_shape[1] // shard_size + 1
all_result = np.zeros((1,))
for X1_id in range(X1_iter):
result = np.zeros((1,))
for X2_id in range(X2_iter):
partial_result = np.matmul(
X1[X1_id * shard_size:min((X1_id + 1) *
shard_size, X1_shape[0]), :],
X2[:, X2_id * shard_size:min((X2_id + 1) *
shard_size, X2_shape[1])])
# calculate matrix multiplicatin on slices
if result.size == 1:
result = partial_result
else:
result = np.concatenate((result, partial_result), axis=1)
# concatenate the slices together
del partial_result
if all_result.size == 1:
all_result = result
else:
all_result = np.concatenate((all_result, result), axis=0)
del result
return all_result
def transform(self,
dataset: Dataset,
parallel: bool = False,
out_dir: Optional[str] = None,
**kwargs) -> Union[DiskDataset, NumpyDataset]:
"""Transforms a given dataset
Parameters
----------
dataset: Dataset
Dataset to transform
parallel: bool, optional, (default False)
Whether to parallelize this transformation. Currently ignored.
out_dir: str, optional (default None)
Directory to write resulting dataset.
Returns
-------
DiskDataset or NumpyDataset
`Dataset` object that is transformed.
"""
X_length = dataset.X.shape[0]
X_trans = []
for count in range(X_length // 5000 + 1):
X_trans.append(
self.X_transform(dataset.X[count *
5000:min((count + 1) *
5000, X_length), :]))
X = np.concatenate(X_trans, axis=0)
if out_dir is None:
return NumpyDataset(X, dataset.y, dataset.w, ids=None)
return DiskDataset.from_numpy(X, dataset.y, dataset.w, data_dir=out_dir)
def untransform(self, z: np.ndarray) -> np.ndarray:
"Not implemented."
raise NotImplementedError(
"Cannot untransform datasets with IRVTransformer.")
class DAGTransformer(Transformer):
"""Performs transform from ConvMol adjacency lists to DAG calculation orders
This transformer is used by `DAGModel` before training to transform its
inputs to the correct shape. This expansion turns a molecule with `n` atoms
into `n` DAGs, each with root at a different atom in the molecule.
Examples
--------
Let's transform a small dataset of molecules.
>>> N = 10
>>> n_feat = 5
>>> import numpy as np
>>> feat = dc.feat.ConvMolFeaturizer()
>>> X = feat(["C", "CC"])
>>> y = np.random.rand(N)
>>> dataset = dc.data.NumpyDataset(X, y)
>>> trans = dc.trans.DAGTransformer(max_atoms=5)
>>> dataset = trans.transform(dataset)
"""
def __init__(self, max_atoms: int = 50):
"""Initializes DAGTransformer.
Parameters
----------
max_atoms: int, optional (Default 50)
Maximum number of atoms to allow
"""
self.max_atoms = max_atoms
super(DAGTransformer, self).__init__(transform_X=True)
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w, ids) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
for idm, mol in enumerate(X):
X[idm].parents = self.UG_to_DAG(mol)
return (X, y, w, ids)
def untransform(self, z: np.ndarray) -> np.ndarray:
"Not implemented."
raise NotImplementedError(
"Cannot untransform datasets with DAGTransformer.")
def UG_to_DAG(self, sample: ConvMol) -> List:
"""This function generates the DAGs for a molecule
Parameters
----------
sample: `ConvMol`
Molecule to transform
Returns
-------
List
List of parent adjacency matrices
"""
# list of calculation orders for DAGs
# stemming from one specific atom in the molecule
parents = []
# starting from the adjacency list derived by graphconv featurizer
UG = sample.get_adjacency_list()
# number of atoms, also number of DAGs
n_atoms = sample.get_num_atoms()
# DAG on a molecule with k atoms includes k steps of calculation,
# each step calculating graph features for one atom.
# `max_atoms` is the maximum number of steps
max_atoms = self.max_atoms
for count in range(n_atoms):
# each iteration generates the DAG starting from atom with index `count`
DAG = []
# list of lists, elements represent the calculation orders
# for atoms in the current graph
parent: List[Any] = [[] for i in range(n_atoms)]
# starting from the target atom with index `count`
current_atoms = [count]
# flags of whether the atom is already included in the DAG
atoms_indicator = np.zeros((n_atoms,))
# atom `count` is in the DAG
radial = 1
atoms_indicator[count] = radial
# recording number of radial propagation steps
while not np.all(atoms_indicator):
# in the fisrt loop, atoms directly connected to `count` will be added
# into the DAG(radial=0), then atoms two-bond away from `count`
# will be added in the second loop(radial=1).
# atoms i-bond away will be added in i-th loop
if radial > n_atoms:
# when molecules have separate parts, starting from one part,
# it is not possible to include all atoms.
# this break quit the loop when going into such condition
break
# reinitialize targets for next iteration
next_atoms = []
radial = radial + 1
for current_atom in current_atoms:
for atom_adj in UG[current_atom]:
# atoms connected to current_atom
if atoms_indicator[atom_adj] == 0:
# generate the dependency map of current DAG
# atoms connected to `current_atoms`(and not included in the DAG)
# are added, and will be the `current_atoms` for next iteration.
DAG.append((current_atom, atom_adj))
atoms_indicator[atom_adj] = radial
next_atoms.append(atom_adj)
current_atoms = next_atoms
# DAG starts from the target atom, calculation should go in reverse
for edge in reversed(DAG):
# `edge[1]` is the parent of `edge[0]`
parent[edge[0]].append(edge[1] % max_atoms)
parent[edge[0]].extend(parent[edge[1]])
for i, order in enumerate(parent):
parent[i] = sorted(order, key=lambda x: atoms_indicator[x])
# after this loop, `parents[i]` includes all parents of atom i
for ids, atom in enumerate(parent):
# manually adding the atom index into its parents list
parent[ids].insert(0, ids % max_atoms)
# after this loop, `parents[i][0]` is i, `parents[i][1:]` are all parents of atom i
# atoms with less parents(farther from the target atom) come first.
# graph features of atoms without parents will be first calculated,
# then atoms with more parents can be calculated in order
# based on previously calculated graph features.
# target atom of this DAG will be calculated in the last step
parent = sorted(parent, key=len)
for ids, atom in enumerate(parent):
n_par = len(atom)
# padding with `max_atoms`
if n_par < max_atoms:
parent[ids].extend(
[max_atoms for i in range(max_atoms - n_par)])
if n_par > max_atoms:
parent[ids] = parent[ids][:max_atoms]
if len(parent) > max_atoms:
parent = parent[-max_atoms:]
while len(parent) < max_atoms:
# padding
parent.insert(0, [max_atoms] * max_atoms)
# `parents[i]` is the calculation order for the DAG stemming from atom i,
# which is a max_atoms * max_atoms numpy array after padding
parents.append(np.array(parent))
return parents
class ImageTransformer(Transformer):
"""Convert an image into width, height, channel
Note
----
This class require Pillow to be installed.
"""
def __init__(self, size: Tuple[int, int]):
"""Initializes ImageTransformer.
Parameters
----------
size: Tuple[int, int]
The image size, a tuple of (width, height).
"""
self.size = size
super(ImageTransformer, self).__init__(transform_X=True)
def transform_array(self, X, y, w):
"""Transform the data in a set of (X, y, w, ids) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
try:
from PIL import Image
except ModuleNotFoundError:
raise ImportError("This function requires Pillow to be installed.")
images = [scipy.ndimage.imread(x, mode='RGB') for x in X]
images = [Image.fromarray(x).resize(self.size) for x in images]
return np.array(images), y, w
# class ANITransformer(Transformer):
# """Performs transform from 3D coordinates to ANI symmetry functions
# Note
# ----
# This class requires TensorFlow to be installed.
# """
# def __init__(self,
# max_atoms=23,
# radial_cutoff=4.6,
# angular_cutoff=3.1,
# radial_length=32,
# angular_length=8,
# atom_cases=[1, 6, 7, 8, 16],
# atomic_number_differentiated=True,
# coordinates_in_bohr=True):
# """
# Only X can be transformed
# """
# import tensorflow as tf
# self.max_atoms = max_atoms
# self.radial_cutoff = radial_cutoff
# self.angular_cutoff = angular_cutoff
# self.radial_length = radial_length
# self.angular_length = angular_length
# self.atom_cases = atom_cases
# self.atomic_number_differentiated = atomic_number_differentiated
# self.coordinates_in_bohr = coordinates_in_bohr
# self.compute_graph = self.build()
# self.sess = tf.Session(graph=self.compute_graph)
# self.transform_batch_size = 32
# super(ANITransformer, self).__init__(transform_X=True)
# def transform_array(self, X, y, w):
# if self.transform_X:
# X_out = []
# num_transformed = 0
# start = 0
# batch_size = self.transform_batch_size
# while True:
# end = min((start + 1) * batch_size, X.shape[0])
# X_batch = X[(start * batch_size):end]
# output = self.sess.run(
# [self.outputs], feed_dict={self.inputs: X_batch})[0]
# X_out.append(output)
# num_transformed = num_transformed + X_batch.shape[0]
# logger.info('%i samples transformed' % num_transformed)
# start += 1
# if end >= len(X):
# break
# X_new = np.concatenate(X_out, axis=0)
# assert X_new.shape[0] == X.shape[0]
# return (X_new, y, w)
# def untransform(self, z):
# raise NotImplementedError(
# "Cannot untransform datasets with ANITransformer.")
# def build(self):
# """ tensorflow computation graph for transform """
# import tensorflow as tf
# graph = tf.Graph()
# with graph.as_default():
# self.inputs = tf.keras.Input(
# dtype=tf.float32, shape=(None, self.max_atoms, 4))
# atom_numbers = tf.cast(self.inputs[:, :, 0], tf.int32)
# flags = tf.sign(atom_numbers)
# flags = tf.cast(
# tf.expand_dims(flags, 1) * tf.expand_dims(flags, 2), tf.float32)
# coordinates = self.inputs[:, :, 1:]
# if self.coordinates_in_bohr:
# coordinates = coordinates * 0.52917721092
# d = self.distance_matrix(coordinates, flags)
# d_radial_cutoff = self.distance_cutoff(d, self.radial_cutoff, flags)
# d_angular_cutoff = self.distance_cutoff(d, self.angular_cutoff, flags)
# radial_sym = self.radial_symmetry(d_radial_cutoff, d, atom_numbers)
# angular_sym = self.angular_symmetry(d_angular_cutoff, d, atom_numbers,
# coordinates)
# self.outputs = tf.concat(
# [
# tf.cast(tf.expand_dims(atom_numbers, 2), tf.float32), radial_sym,
# angular_sym
# ],
# axis=2)
# return graph
# def distance_matrix(self, coordinates, flags):
# """ Generate distance matrix """
# import tensorflow as tf
# max_atoms = self.max_atoms
# tensor1 = tf.stack([coordinates] * max_atoms, axis=1)
# tensor2 = tf.stack([coordinates] * max_atoms, axis=2)
# # Calculate pairwise distance
# d = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=3))
# # Masking for valid atom index
# d = d * flags
# return d
# def distance_cutoff(self, d, cutoff, flags):
# """ Generate distance matrix with trainable cutoff """
# import tensorflow as tf
# # Cutoff with threshold Rc
# d_flag = flags * tf.sign(cutoff - d)
# d_flag = tf.nn.relu(d_flag)
# d_flag = d_flag * tf.expand_dims(
# tf.expand_dims((1 - tf.eye(self.max_atoms)), 0), -1)
# d = 0.5 * (tf.cos(np.pi * d / cutoff) + 1)
# return d * d_flag
# def radial_symmetry(self, d_cutoff, d, atom_numbers):
# """ Radial Symmetry Function """
# import tensorflow as tf
# embedding = tf.eye(np.max(self.atom_cases) + 1)
# atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)
# Rs = np.linspace(0., self.radial_cutoff, self.radial_length)
# ita = np.ones_like(Rs) * 3 / (Rs[1] - Rs[0])**2
# Rs = tf.cast(np.reshape(Rs, (1, 1, 1, -1)), tf.float32)
# ita = tf.cast(np.reshape(ita, (1, 1, 1, -1)), tf.float32)
# length = ita.get_shape().as_list()[-1]
# d_cutoff = tf.stack([d_cutoff] * length, axis=3)
# d = tf.stack([d] * length, axis=3)
# out = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
# if self.atomic_number_differentiated:
# out_tensors = []
# for atom_type in self.atom_cases:
# selected_atoms = tf.expand_dims(
# tf.expand_dims(atom_numbers_embedded[:, :, atom_type], axis=1),
# axis=3)
# out_tensors.append(tf.reduce_sum(out * selected_atoms, axis=2))
# return tf.concat(out_tensors, axis=2)
# else:
# return tf.reduce_sum(out, axis=2)
# def angular_symmetry(self, d_cutoff, d, atom_numbers, coordinates):
# """ Angular Symmetry Function """
# import tensorflow as tf
# max_atoms = self.max_atoms
# embedding = tf.eye(np.max(self.atom_cases) + 1)
# atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)
# Rs = np.linspace(0., self.angular_cutoff, self.angular_length)
# ita = 3 / (Rs[1] - Rs[0])**2
# thetas = np.linspace(0., np.pi, self.angular_length)
# zeta = float(self.angular_length**2)
# ita, zeta, Rs, thetas = np.meshgrid(ita, zeta, Rs, thetas)
# zeta = tf.cast(np.reshape(zeta, (1, 1, 1, 1, -1)), tf.float32)
# ita = tf.cast(np.reshape(ita, (1, 1, 1, 1, -1)), tf.float32)
# Rs = tf.cast(np.reshape(Rs, (1, 1, 1, 1, -1)), tf.float32)
# thetas = tf.cast(np.reshape(thetas, (1, 1, 1, 1, -1)), tf.float32)
# length = zeta.get_shape().as_list()[-1]
# vector_distances = tf.stack([coordinates] * max_atoms, 1) - tf.stack(
# [coordinates] * max_atoms, 2)
# R_ij = tf.stack([d] * max_atoms, axis=3)
# R_ik = tf.stack([d] * max_atoms, axis=2)
# f_R_ij = tf.stack([d_cutoff] * max_atoms, axis=3)
# f_R_ik = tf.stack([d_cutoff] * max_atoms, axis=2)
# # Define angle theta = arccos(R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance))
# vector_mul = tf.reduce_sum(tf.stack([vector_distances] * max_atoms, axis=3) * \
# tf.stack([vector_distances] * max_atoms, axis=2), axis=4)
# vector_mul = vector_mul * tf.sign(f_R_ij) * tf.sign(f_R_ik)
# theta = tf.acos(tf.math.divide(vector_mul, R_ij * R_ik + 1e-5))
# R_ij = tf.stack([R_ij] * length, axis=4)
# R_ik = tf.stack([R_ik] * length, axis=4)
# f_R_ij = tf.stack([f_R_ij] * length, axis=4)
# f_R_ik = tf.stack([f_R_ik] * length, axis=4)
# theta = tf.stack([theta] * length, axis=4)
# out_tensor = tf.pow((1. + tf.cos(theta - thetas)) / 2., zeta) * \
# tf.exp(-ita * tf.square((R_ij + R_ik) / 2. - Rs)) * f_R_ij * f_R_ik * 2
# if self.atomic_number_differentiated:
# out_tensors = []
# for id_j, atom_type_j in enumerate(self.atom_cases):
# for atom_type_k in self.atom_cases[id_j:]:
# selected_atoms = tf.stack([atom_numbers_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \
# tf.stack([atom_numbers_embedded[:, :, atom_type_k]] * max_atoms, axis=1)
# selected_atoms = tf.expand_dims(
# tf.expand_dims(selected_atoms, axis=1), axis=4)
# out_tensors.append(
# tf.reduce_sum(out_tensor * selected_atoms, axis=(2, 3)))
# return tf.concat(out_tensors, axis=2)
# else:
# return tf.reduce_sum(out_tensor, axis=(2, 3))
# def get_num_feats(self):
# n_feat = self.outputs.get_shape().as_list()[-1]
# return n_feat
class FeaturizationTransformer(Transformer):
"""A transformer which runs a featurizer over the X values of a dataset.
Datasets used by this transformer must be compatible with the internal
featurizer. The idea of this transformer is that it allows for the
application of a featurizer to an existing dataset.
Examples
--------
>>> smiles = ["C", "CC"]
>>> X = np.array(smiles)
>>> y = np.array([1, 0])
>>> dataset = dc.data.NumpyDataset(X, y)
>>> trans = dc.trans.FeaturizationTransformer(dataset, dc.feat.CircularFingerprint())
>>> dataset = trans.transform(dataset)
"""
def __init__(self,
dataset: Optional[Dataset] = None,
featurizer: Optional[Featurizer] = None):
"""Initialization of FeaturizationTransformer
Parameters
----------
dataset: dc.data.Dataset object, optional (default None)
Dataset to be transformed
featurizer: dc.feat.Featurizer object, optional (default None)
Featurizer applied to perform transformations.
"""
if featurizer is None:
raise ValueError("featurizer must be specified.")
self.featurizer = featurizer
super(FeaturizationTransformer, self).__init__(transform_X=True,
dataset=dataset)
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transforms arrays of rdkit mols using internal featurizer.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
X = self.featurizer.featurize(X)
return X, y, w, ids
class DataTransforms(object):
"""Applies different data transforms to images.
This utility class facilitates various image transformations that may be of
use for handling image datasets.
Note
----
This class requires PIL to be installed.
"""
def __init__(self, Image):
self.Image = Image
def scale(self, h, w):
"""Scales the image
Parameters
----------
h: int
Height of the images
w: int
Width of the images
Returns
-------
np.ndarray
The scaled image.
"""
from PIL import Image
return Image.fromarray(self.Image).resize((h, w))
def flip(self, direction="lr"):
"""Flips the image
Parameters
----------
direction: str
"lr" denotes left-right flip and "ud" denotes up-down flip.
Returns
-------
np.ndarray
The flipped image.
"""
if direction == "lr":
return np.fliplr(self.Image)
elif direction == "ud":
return np.flipud(self.Image)
else:
raise ValueError(
"Invalid flip command : Enter either lr (for left to right flip) or ud (for up to down flip)"
)
def rotate(self, angle=0):
"""Rotates the image
Parameters
----------
angle: float (default = 0 i.e no rotation)
Denotes angle by which the image should be rotated (in Degrees)
Returns
-------
np.ndarray
The rotated image.
"""
return scipy.ndimage.rotate(self.Image, angle)
def gaussian_blur(self, sigma=0.2):
"""Adds gaussian noise to the image
Parameters
----------
sigma: float
Std dev. of the gaussian distribution
Returns
-------
np.ndarray
The image added gaussian noise.
"""
return scipy.ndimage.gaussian_filter(self.Image, sigma)
def center_crop(self, x_crop, y_crop):
"""Crops the image from the center
Parameters
----------
x_crop: int
the total number of pixels to remove in the horizontal direction, evenly split between the left and right sides
y_crop: int
the total number of pixels to remove in the vertical direction, evenly split between the top and bottom sides
Returns
-------
np.ndarray
The center cropped image.
"""
y = self.Image.shape[0]
x = self.Image.shape[1]
x_start = x // 2 - (x_crop // 2)
y_start = y // 2 - (y_crop // 2)
return self.Image[y_start:y_start + y_crop, x_start:x_start + x_crop]
def crop(self, left, top, right, bottom):
"""Crops the image and returns the specified rectangular region from an image
Parameters
----------
left: int
the number of pixels to exclude from the left of the image
top: int
the number of pixels to exclude from the top of the image
right: int
the number of pixels to exclude from the right of the image
bottom: int
the number of pixels to exclude from the bottom of the image
Returns
-------
np.ndarray
The cropped image.
"""
y = self.Image.shape[0]
x = self.Image.shape[1]
return self.Image[top:y - bottom, left:x - right]
def convert2gray(self):
"""Converts the image to grayscale. The coefficients correspond to the Y' component of the Y'UV color system.
Returns
-------
np.ndarray
The grayscale image.
"""
return np.dot(self.Image[..., :3], [0.2989, 0.5870, 0.1140])
def shift(self, width, height, mode='constant', order=3):
"""Shifts the image
Parameters
----------
width: float
Amount of width shift (positive values shift image right )
height: float
Amount of height shift(positive values shift image lower)
mode: str
Points outside the boundaries of the input are filled according to the
given mode: (‘constant’, ‘nearest’, ‘reflect’ or ‘wrap’). Default is
‘constant’
order: int
The order of the spline interpolation, default is 3. The order has to be in the range 0-5.
Returns
-------
np.ndarray
The shifted image.
"""
if len(self.Image.shape) == 2:
return scipy.ndimage.shift(self.Image, [height, width],
order=order,
mode=mode)
if len(self.Image.shape == 3):
return scipy.ndimage.shift(self.Image, [height, width, 0],
order=order,
mode=mode)
def gaussian_noise(self, mean=0, std=25.5):
"""Adds gaussian noise to the image
Parameters
----------
mean: float
Mean of gaussian.
std: float
Standard deviation of gaussian.
Returns
-------
np.ndarray
The image added gaussian noise.
"""
x = self.Image
x = x + np.random.normal(loc=mean, scale=std, size=self.Image.shape)
return x
def salt_pepper_noise(self, prob=0.05, salt=255, pepper=0):
"""Adds salt and pepper noise to the image
Parameters
----------
prob: float
probability of the noise.
salt: float
value of salt noise.
pepper: float
value of pepper noise.
Returns
-------
np.ndarray
The image added salt and pepper noise.
"""
noise = np.random.random(size=self.Image.shape)
x = self.Image
x[noise < (prob / 2)] = pepper
x[noise > (1 - prob / 2)] = salt
return x
def median_filter(self, size):
""" Calculates a multidimensional median filter
Parameters
----------
size: int
The kernel size in pixels.
Returns
-------
np.ndarray
The median filtered image.
"""
from PIL import Image, ImageFilter
image = Image.fromarray(self.Image)
image = image.filter(ImageFilter.MedianFilter(size=size))
return np.array(image)
class RxnSplitTransformer(Transformer):
"""Splits the reaction SMILES input into the source and target strings
required for machine translation tasks.
The input is expected to be in the form reactant>reagent>product. The source
string would be reactants>reagents and the target string would be the products.
The transformer can also separate the reagents from the reactants for a mixed
training mode. During mixed training, the source string is transformed from
reactants>reagent to reactants.reagent> . This can be toggled (default True)
by setting the value of sep_reagent while calling the transformer.
Examples
--------
>>> # When mixed training is toggled.
>>> import numpy as np
>>> from deepchem.trans.transformers import RxnSplitTransformer
>>> reactions = np.array(["CC(C)C[Mg+].CON(C)C(=O)c1ccc(O)nc1>C1CCOC1.[Cl-]>CC(C)CC(=O)c1ccc(O)nc1","CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(N)cc3)cc21.O=CO>>CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(NC=O)cc3)cc21"], dtype=object)
>>> trans = RxnSplitTransformer(sep_reagent=True)
>>> split_reactions = trans.transform_array(X=reactions, y=np.array([]), w=np.array([]), ids=np.array([]))
>>> split_reactions
(array([['CC(C)C[Mg+].CON(C)C(=O)c1ccc(O)nc1>C1CCOC1.[Cl-]',
'CC(C)CC(=O)c1ccc(O)nc1'],
['CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(N)cc3)cc21.O=CO>',
'CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(NC=O)cc3)cc21']], dtype='<U51'), array([], dtype=float64), array([], dtype=float64), array([], dtype=float64))
When mixed training is disabled, you get the following outputs:
>>> trans_disable = RxnSplitTransformer(sep_reagent=False)
>>> split_reactions = trans_disable.transform_array(X=reactions, y=np.array([]), w=np.array([]), ids=np.array([]))
>>> split_reactions
(array([['CC(C)C[Mg+].CON(C)C(=O)c1ccc(O)nc1.C1CCOC1.[Cl-]>',
'CC(C)CC(=O)c1ccc(O)nc1'],
['CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(N)cc3)cc21.O=CO>',
'CCn1cc(C(=O)O)c(=O)c2cc(F)c(-c3ccc(NC=O)cc3)cc21']], dtype='<U51'), array([], dtype=float64), array([], dtype=float64), array([], dtype=float64))
Note
----
This class only transforms the feature field of a reaction dataset like USPTO.
"""
def __init__(self,
sep_reagent: bool = True,
dataset: Optional[Dataset] = None):
"""Initializes the Reaction split Transformer.
Parameters
----------
sep_reagent: bool, optional (default True)
To separate the reagent and reactants for training.
dataset: dc.data.Dataset object, optional (default None)
Dataset to be transformed.
"""
self.sep_reagent = sep_reagent
super(RxnSplitTransformer, self).__init__(transform_X=True,
dataset=dataset)
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w, ids) arrays.
Parameters
----------
X: np.ndarray
Array of features(the reactions)
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of weights.
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idstrans: np.ndarray
Transformed array of ids
"""
reactant = list(map(lambda x: x.split('>')[0], X))
reagent = list(map(lambda x: x.split('>')[1], X))
product = list(map(lambda x: x.split('>')[2], X))
if self.sep_reagent:
source = [x + '>' + y for x, y in zip(reactant, reagent)]
else:
source = [
x + '.' + y + '>' if y else x + '>' + y
for x, y in zip(reactant, reagent)
]
target = product
X = np.column_stack((source, target))
return (X, y, w, ids)
def untransform(self, z):
"""Not Implemented."""
raise NotImplementedError("Cannot untransform the source/target split.")
<file_sep>"""
Implements Autodock Vina's pose-generation in tensorflow.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import numpy as np
import tensorflow as tf
from deepchem.models import Model
from deepchem.nn import model_ops
import deepchem.utils.rdkit_util as rdkit_util
def compute_neighbor_list(coords, nbr_cutoff, N, M, n_cells, ndim=3, k=5):
"""Computes a neighbor list from atom coordinates.
Parameters
----------
coords: tf.Tensor
Shape (N, ndim)
N: int
Max number atoms
M: int
Max number neighbors
ndim: int
Dimensionality of space.
k: int
Number of nearest neighbors to pull down.
Returns
-------
nbr_list: tf.Tensor
Shape (N, M) of atom indices
"""
start = tf.cast(tf.reduce_min(coords), tf.int32)
stop = tf.cast(tf.reduce_max(coords), tf.int32)
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
# Associate each atom with cell it belongs to. O(N*n_cells)
# Shape (n_cells, k)
atoms_in_cells, _ = put_atoms_in_cells(coords, cells, N, n_cells, ndim, k)
# Shape (N, 1)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
# Associate each cell with its neighbor cells. Assumes periodic boundary
# conditions, so does wrapround. O(constant)
# Shape (n_cells, 26)
neighbor_cells = compute_neighbor_cells(cells, ndim, n_cells)
# Shape (N, 26)
neighbor_cells = tf.squeeze(tf.gather(neighbor_cells, cells_for_atoms))
# coords of shape (N, ndim)
# Shape (N, 26, k, ndim)
tiled_coords = tf.tile(tf.reshape(coords, (N, 1, 1, ndim)), (1, 26, k, 1))
# Shape (N, 26, k)
nbr_inds = tf.gather(atoms_in_cells, neighbor_cells)
# Shape (N, 26, k)
atoms_in_nbr_cells = tf.gather(atoms_in_cells, neighbor_cells)
# Shape (N, 26, k, ndim)
nbr_coords = tf.gather(coords, atoms_in_nbr_cells)
# For smaller systems especially, the periodic boundary conditions can
# result in neighboring cells being seen multiple times. Maybe use tf.unique to
# make sure duplicate neighbors are ignored?
# TODO(rbharath): How does distance need to be modified here to
# account for periodic boundary conditions?
# Shape (N, 26, k)
dists = tf.reduce_sum((tiled_coords - nbr_coords)**2, axis=3)
# Shape (N, 26*k)
dists = tf.reshape(dists, [N, -1])
# TODO(rbharath): This will cause an issue with duplicates!
# Shape (N, M)
closest_nbr_locs = tf.nn.top_k(dists, k=M)[1]
# N elts of size (M,) each
split_closest_nbr_locs = [
tf.squeeze(locs) for locs in tf.split(closest_nbr_locs, N)
]
# Shape (N, 26*k)
nbr_inds = tf.reshape(nbr_inds, [N, -1])
# N elts of size (26*k,) each
split_nbr_inds = [tf.squeeze(split) for split in tf.split(nbr_inds, N)]
# N elts of size (M,) each
neighbor_list = [
tf.gather(nbr_inds, closest_nbr_locs)
for (nbr_inds,
closest_nbr_locs) in zip(split_nbr_inds, split_closest_nbr_locs)
]
# Shape (N, M)
neighbor_list = tf.stack(neighbor_list)
return neighbor_list
def get_cells_for_atoms(coords, cells, N, n_cells, ndim=3):
"""Compute the cells each atom belongs to.
Parameters
----------
coords: tf.Tensor
Shape (N, ndim)
cells: tf.Tensor
(box_size**ndim, ndim) shape.
Returns
-------
cells_for_atoms: tf.Tensor
Shape (N, 1)
"""
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (n_cells*N, ndim)
tiled_cells = tf.tile(cells, (N, 1))
# N tensors of shape (n_cells, 1)
tiled_cells = tf.split(tiled_cells, N)
# Shape (N*n_cells, 1) after tile
tiled_coords = tf.reshape(tf.tile(coords, (1, n_cells)), (n_cells * N, ndim))
# List of N tensors of shape (n_cells, 1)
tiled_coords = tf.split(tiled_coords, N)
# Lists of length N
coords_rel = [
tf.cast(coords, tf.float32) - tf.cast(cells, tf.float32)
for (coords, cells) in zip(tiled_coords, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
closest_inds = [tf.nn.top_k(-norm, k=1)[1] for norm in coords_norm]
# TODO(rbharath): tf.stack for tf 1.0
return tf.stack(closest_inds)
def compute_closest_neighbors(coords,
cells,
atoms_in_cells,
neighbor_cells,
N,
n_cells,
ndim=3,
k=5):
"""Computes nearest neighbors from neighboring cells.
TODO(rbharath): Make this pass test
Parameters
---------
atoms_in_cells: list
Of length n_cells. Each entry tensor of shape (k, ndim)
neighbor_cells: tf.Tensor
Of shape (n_cells, 26).
N: int
Number atoms
"""
n_cells = int(n_cells)
# Tensor of shape (n_cells, k, ndim)
#atoms_in_cells = tf.stack(atoms_in_cells)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
all_closest = []
for atom in range(N):
atom_vec = coords[atom]
cell = cells_for_atoms[atom]
nbr_inds = tf.gather(neighbor_cells, tf.cast(cell, tf.int32))
# Tensor of shape (26, k, ndim)
nbr_atoms = tf.gather(atoms_in_cells, nbr_inds)
# Reshape to (26*k, ndim)
nbr_atoms = tf.reshape(nbr_atoms, (-1, 3))
# Subtract out atom vector. Still of shape (26*k, ndim) due to broadcast.
nbr_atoms = nbr_atoms - atom_vec
# Dists of shape (26*k, 1)
nbr_dists = tf.reduce_sum(nbr_atoms**2, axis=1)
# Of shape (k, ndim)
closest_inds = tf.nn.top_k(nbr_dists, k=k)[1]
all_closest.append(closest_inds)
return all_closest
def get_cells(start, stop, nbr_cutoff, ndim=3):
"""Returns the locations of all grid points in box.
Suppose start is -10 Angstrom, stop is 10 Angstrom, nbr_cutoff is 1.
Then would return a list of length 20^3 whose entries would be
[(-10, -10, -10), (-10, -10, -9), ..., (9, 9, 9)]
Returns
-------
cells: tf.Tensor
(box_size**ndim, ndim) shape.
"""
ranges = [tf.range(start, stop, nbr_cutoff) for _ in range(ndim)]
return tf.reshape(tf.transpose(tf.stack(tf.meshgrid(*ranges))), (-1, ndim))
def put_atoms_in_cells(coords, cells, N, n_cells, ndim, k=5):
"""Place each atom into cells. O(N) runtime.
Let N be the number of atoms.
Parameters
----------
coords: tf.Tensor
(N, 3) shape.
cells: tf.Tensor
(n_cells, ndim) shape.
N: int
Number atoms
ndim: int
Dimensionality of input space
k: int
Number of nearest neighbors.
Returns
-------
closest_atoms: tf.Tensor
Of shape (n_cells, k, ndim)
"""
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (n_cells*N, ndim)
tiled_cells = tf.reshape(tf.tile(cells, (1, N)), (n_cells * N, ndim))
# TODO(rbharath): Change this for tf 1.0
# n_cells tensors of shape (N, 1)
tiled_cells = tf.split(tiled_cells, n_cells)
# Shape (N*n_cells, 1) after tile
tiled_coords = tf.tile(coords, (n_cells, 1))
# List of n_cells tensors of shape (N, 1)
tiled_coords = tf.split(tiled_coords, n_cells)
# Lists of length n_cells
coords_rel = [
tf.cast(coords, tf.float32) - tf.cast(cells, tf.float32)
for (coords, cells) in zip(tiled_coords, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
closest_inds = [tf.nn.top_k(norm, k=k)[1] for norm in coords_norm]
# n_cells tensors of shape (k, ndim)
closest_atoms = tf.stack([tf.gather(coords, inds) for inds in closest_inds])
# Tensor of shape (n_cells, k)
closest_inds = tf.stack(closest_inds)
return closest_inds, closest_atoms
# TODO(rbharath):
# - Need to find neighbors of the cells (+/- 1 in every dimension).
# - Need to group closest atoms amongst cell neighbors
# - Need to do another top_k to find indices of closest neighbors.
# - Return N lists corresponding to neighbors for every atom.
def compute_neighbor_cells(cells, ndim, n_cells):
"""Compute neighbors of cells in grid.
# TODO(rbharath): Do we need to handle periodic boundary conditions
properly here?
# TODO(rbharath): This doesn't handle boundaries well. We hard-code
# looking for 26 neighbors, which isn't right for boundary cells in
# the cube.
Note n_cells is box_size**ndim. 26 is the number of neighbors of a cube in
a grid (including diagonals).
Parameters
----------
cells: tf.Tensor
(n_cells, 26) shape.
"""
n_cells = int(n_cells)
if ndim != 3:
raise ValueError("Not defined for dimensions besides 3")
# Number of neighbors of central cube in 3-space is
# 3^2 (top-face) + 3^2 (bottom-face) + (3^2-1) (middle-band)
# TODO(rbharath)
k = 9 + 9 + 8 # (26 faces on Rubik's cube for example)
#n_cells = int(cells.get_shape()[0])
# Tile cells to form arrays of size (n_cells*n_cells, ndim)
# Two tilings (a, b, c, a, b, c, ...) vs. (a, a, a, b, b, b, etc.)
# Tile (a, a, a, b, b, b, etc.)
tiled_centers = tf.reshape(
tf.tile(cells, (1, n_cells)), (n_cells * n_cells, ndim))
# Tile (a, b, c, a, b, c, ...)
tiled_cells = tf.tile(cells, (n_cells, 1))
# Lists of n_cells tensors of shape (N, 1)
tiled_centers = tf.split(tiled_centers, n_cells)
tiled_cells = tf.split(tiled_cells, n_cells)
# Lists of length n_cells
coords_rel = [
tf.cast(cells, tf.float32) - tf.cast(centers, tf.float32)
for (cells, centers) in zip(tiled_centers, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
# n_cells tensors of shape (26,)
closest_inds = tf.stack([tf.nn.top_k(norm, k=k)[1] for norm in coords_norm])
return closest_inds
def cutoff(d, x):
"""Truncates interactions that are too far away."""
return tf.where(d < 8, x, tf.zeros_like(x))
def gauss_1(d):
"""Computes first Gaussian interaction term.
Note that d must be in Angstrom
"""
return tf.exp(-(d / 0.5)**2)
def gauss_2(d):
"""Computes second Gaussian interaction term.
Note that d must be in Angstrom.
"""
return tf.exp(-((d - 3) / 2)**2)
def repulsion(d):
"""Computes repulsion interaction term."""
return tf.where(d < 0, d**2, tf.zeros_like(d))
def hydrophobic(d):
"""Compute hydrophobic interaction term."""
where = tf.where(d < 1.5, 1.5 - d, tf.zeros_like(d))
return tf.where(d < 0.5, tf.ones_like(d), where)
def hbond(d):
"""Computes hydrogen bond term."""
where = tf.where(d < 0, (1.0 / 0.7) * (0 - d), tf.zeros_like(d))
return tf.where(d < -0.7, tf.ones_like(d), where)
def g(c, Nrot):
"""Nonlinear function mapping interactions to free energy."""
w = tf.Variable(tf.random_normal([
1,
], stddev=.3))
return c / (1 + w * Nrot)
def h(d):
"""Sum of energy terms used in Autodock Vina.
.. math:: h_{t_i,t_j}(d) = w_1\textrm{gauss}_1(d) + w_2\textrm{gauss}_2(d) + w_3\textrm{repulsion}(d) + w_4\textrm{hydrophobic}(d) + w_5\textrm{hbond}(d)
"""
w_1 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_2 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_3 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_4 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_5 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
return w_1 * gauss_1(d) + w_2 * gauss_2(d) + w_3 * repulsion(
d) + w_4 * hydrophobic(d) + w_5 * hbond(d)
class VinaModel(Model):
def __init__(self, logdir=None, batch_size=50):
r"""Vina models.
.. math:: c = \sum_{i < j} f_{t_i,t_j}(r_{ij})
Over all pairs of atoms that can move relative to one-another. :math:`t_i` is the
atomtype of atom :math:`i`.
Can view as
.. math:: c = c_\textrm{inter} + c_\textrm{intra}
depending on whether atoms can move relative to one another. Free energy is
predicted only from :math:`c_\textrm{inter}`. Let :math:`R_t` be the Van der Waal's radius of
atom of type t. Then define surface distance
.. math:: d_{ij} = r_{ij} - R_{t_i} - R_{t_j}
Then the energy term is
.. math:: f_{t_i,t_j}(r_{ij}) = \textrm{cutoff}(d_{ij}, h_{t_i,t_j}(d_{ij}))
where
.. math:: \textrm{cutoff}(d, x) = \begin{cases} x & d < 8 \textrm{ Angstrom} \\ 0 & \textrm{otherwise} \end{cases}
The inner function can be further broken down into a sum of terms
.. math:: h_{t_i,t_j}(d) = w_1\textrm{gauss}_1(d) + w_2\textrm{gauss}_2(d) + w_3\textrm{repulsion}(d) + w_4\textrm{hydrophobic}(d) + w_5\textrm{hbond}(d)
these terms are defined as follows (all constants are in Angstroms):
.. math::
\textrm{gauss}_1(d) = \exp(-(d/(0.5))^2)
\textrm{gauss}_2(d) = \exp(-((d-3)/(2))^2)
\textrm{repulsion}(d) = \begin{cases} d^2 & d < 0 \\ 0 & d \geq 0 \end{cases}
\textrm{hydrophobic}(d) = \begin{cases} 1 & d < 0.5 \\ 1.5 - d & \textrm{otherwise} \\ 0 & d > 1.5 \end{cases}
\textrm{hbond}(d) = \begin{cases} 1 & d < -0.7 \\ (1.0/.7)(0 - d) & \textrm{otherwise} \\ 0 & d > 0 \end{cases}
The free energy of binding is computed as a function of the intermolecular interactions
..math:: s = g(c_\textrm{inter})
This function is defined as
..math:: g(c) = \frac{c}{1 + wN_\textrm{rot}}
Where :math:`w` is a weight parameter and :math:`N_\textrm{rot}` is the number of
rotatable bonds between heavy atoms in the ligand.
Gradients are taken backwards through the binding-free energy function with
respect to the position of the ligand and with respect to the torsions of
rotatable bonds and flexible ligands.
TODO(rbharath): It's not clear to me how the effect of the torsions on the :math:`d_{ij}` is
computed. Is there a way to get distances from torsions?
The idea is that mutations are applied to the ligand, and then gradient descent is
used to optimize starting from the initial structure. The code to compute the mutations
is specified
https://github.com/mwojcikowski/smina/blob/master/src/lib/mutate.cpp
Seems to do random quaternion rotations of the ligand. It's not clear to me yet
how the flexible and rotatable bonds are handled for the system.
Need to know an initial search space for the compound. Typically a cubic
binding box.
References
----------
Autodock Vina Paper:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041641/
Smina Paper:
http://pubs.acs.org/doi/pdf/10.1021/ci300604z
Omega Paper (ligand conformation generation):
http://www.sciencedirect.com/science/article/pii/S1093326302002048
QuickVina:
http://www.cil.ntu.edu.sg/Courses/papers/journal/QuickVina.pdf
"""
pass
def __init__(self, max_local_steps=10, max_mutations=10):
warnings.warn(
"VinaModel is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.max_local_steps = max_local_steps
self.max_mutations = max_mutations
self.graph, self.input_placeholders, self.output_placeholder = self.construct_graph(
)
self.sess = tf.Session(graph=self.graph)
def construct_graph(self,
N_protein=1000,
N_ligand=100,
M=50,
ndim=3,
k=5,
nbr_cutoff=6):
"""Builds the computational graph for Vina."""
graph = tf.Graph()
with graph.as_default():
n_cells = 64
# TODO(rbharath): Make this handle minibatches
protein_coords_placeholder = tf.placeholder(
tf.float32, shape=(N_protein, 3))
ligand_coords_placeholder = tf.placeholder(
tf.float32, shape=(N_ligand, 3))
protein_Z_placeholder = tf.placeholder(tf.int32, shape=(N_protein,))
ligand_Z_placeholder = tf.placeholder(tf.int32, shape=(N_ligand,))
label_placeholder = tf.placeholder(tf.float32, shape=(1,))
# Shape (N_protein+N_ligand, 3)
coords = tf.concat(
[protein_coords_placeholder, ligand_coords_placeholder], axis=0)
# Shape (N_protein+N_ligand,)
Z = tf.concat([protein_Z_placeholder, ligand_Z_placeholder], axis=0)
# Shape (N_protein+N_ligand, M)
nbr_list = compute_neighbor_list(
coords, nbr_cutoff, N_protein + N_ligand, M, n_cells, ndim=ndim, k=k)
all_interactions = []
# Shape (N_protein+N_ligand,)
all_atoms = tf.range(N_protein + N_ligand)
# Shape (N_protein+N_ligand, 3)
atom_coords = tf.gather(coords, all_atoms)
# Shape (N_protein+N_ligand,)
atom_Z = tf.gather(Z, all_atoms)
# Shape (N_protein+N_ligand, M)
nbrs = tf.squeeze(tf.gather(nbr_list, all_atoms))
# Shape (N_protein+N_ligand, M, 3)
nbr_coords = tf.gather(coords, nbrs)
# Shape (N_protein+N_ligand, M)
nbr_Z = tf.gather(Z, nbrs)
# Shape (N_protein+N_ligand, M, 3)
tiled_atom_coords = tf.tile(
tf.reshape(atom_coords, (N_protein + N_ligand, 1, 3)), (1, M, 1))
# Shape (N_protein+N_ligand, M)
dists = tf.reduce_sum((tiled_atom_coords - nbr_coords)**2, axis=2)
# TODO(rbharath): Need to subtract out Van-der-Waals radii from dists
# Shape (N_protein+N_ligand, M)
atom_interactions = h(dists)
# Shape (N_protein+N_ligand, M)
cutoff_interactions = cutoff(dists, atom_interactions)
# TODO(rbharath): Use RDKit to compute number of rotatable bonds in ligand.
Nrot = 1
# TODO(rbharath): Autodock Vina only uses protein-ligand interactions in
# computing free-energy. This implementation currently uses all interaction
# terms. Not sure if this makes a difference.
# Shape (N_protein+N_ligand, M)
free_energy = g(cutoff_interactions, Nrot)
# Shape () -- scalar
energy = tf.reduce_sum(atom_interactions)
loss = 0.5 * (energy - label_placeholder)**2
return (graph, (protein_coords_placeholder, protein_Z_placeholder,
ligand_coords_placeholder, ligand_Z_placeholder),
label_placeholder)
def fit(self, X_protein, Z_protein, X_ligand, Z_ligand, y):
"""Fit to actual data."""
return
def mutate_conformer(protein, ligand):
"""Performs a mutation on the ligand position."""
return
def generate_conformation(self, protein, ligand, max_steps=10):
"""Performs the global search for conformations."""
best_conf = None
best_score = np.inf
conf = self.sample_random_conformation()
for i in range(max_steps):
mut_conf = self.mutate_conformer(conf)
loc_conf = self.gradient_minimize(mut_conf)
if best_conf is None:
best_conf = loc_conf
else:
loc_score = self.score(loc_conf)
if loc_score < best_score:
best_conf = loc_conf
return best_conf
<file_sep>import os
import unittest
import pytest
import pandas as pd
from deepchem.data import NumpyDataset
from deepchem.feat.molecule_featurizers import MolGanFeaturizer
from deepchem.models.optimizers import ExponentialDecay
try:
import tensorflow as tf # noqa: F401
from deepchem.models import BasicMolGANModel as MolGAN
from tensorflow import one_hot
from tensorflow.keras.backend import clear_session as keras_clear_session
has_tensorflow = True
except:
has_tensorflow = False
class test_molgan_model(unittest.TestCase):
"""
Unit testing for MolGAN basic layers
"""
@pytest.mark.tensorflow
def setUp(self):
self.training_attempts = 6
self.current_dir = os.path.dirname(os.path.abspath(__file__))
self.vertices = 9
self.nodes = 5
self.edges = 5
self.embedding_dim = 10
self.dropout_rate = 0.0
self.batch_size = 100
self.first_convolution_unit = 128
self.second_convolution_unit = 64
self.aggregation_unit = 128
self.model = MolGAN(edges=self.edges,
vertices=self.vertices,
nodes=self.nodes,
embedding_dim=self.embedding_dim,
dropout_rate=self.dropout_rate)
@pytest.mark.tensorflow
def test_build(self):
"""
Test if initialization data is set-up correctly
"""
model = self.model
assert model.batch_size == self.batch_size
assert model.edges == self.edges
assert model.nodes == self.nodes
assert model.vertices == self.vertices
assert model.dropout_rate == self.dropout_rate
assert len(model.generators) == 1
assert len(model.discriminators) == 1
@pytest.mark.tensorflow
def test_shapes(self):
"""
Check if input and output shapes are correct
"""
model = self.model
# test if adjacency matrix input is correctly set
assert model.discriminators[0].input_shape[0] == (None, self.vertices,
self.vertices,
self.edges)
# test if nodes features matrix input is correctly set
assert model.discriminators[0].input_shape[1] == (None, self.vertices,
self.edges)
# check discriminator shape
assert model.discriminators[0].output_shape == (None, 1)
# check training edges logits shape
assert model.generators[0].output_shape[0] == (None, self.vertices,
self.vertices,
self.edges)
# check training nodes logits shapes
assert model.generators[0].output_shape[1] == (None, self.vertices,
self.nodes)
@pytest.mark.tensorflow
def test_training(self):
"""
Check training of the basicMolGANmodel on small number of compounds.
Due to training instability try a few times and see if it worked at least once.
Typically it fails between 1-3 times of 10.
This is something that needs to be addressed in future releases.
"""
input_file = os.path.join(self.current_dir, "assets/molgan_example.csv")
data = pd.read_csv(input_file)
molecules = list(data['Molecule'])
feat = MolGanFeaturizer()
featurized = feat.featurize(molecules)
dataset = NumpyDataset([x.adjacency_matrix for x in featurized],
[x.node_features for x in featurized])
# True will be assigned up successful training attempt
success = False
for _ in range(self.training_attempts):
# force clear tensor flow backend
keras_clear_session()
# create new model
gan = MolGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000))
# to avoid flake8 E125/yapf incompatibility
s = gan.batch_size
# generate input
def iterbatches(epochs):
for __ in range(epochs):
for batch in dataset.iterbatches(batch_size=s,
pad_batches=True):
adjacency_tensor = one_hot(batch[0], gan.edges)
node_tesor = one_hot(batch[1], gan.nodes)
yield {
gan.data_inputs[0]: adjacency_tensor,
gan.data_inputs[1]: node_tesor
}
# train model
gan.fit_gan(iterbatches(1000),
generator_steps=0.2,
checkpoint_interval=0)
# generate sample
g = gan.predict_gan_generator(1000)
# check how many valid molecules were created and add to list
generated_molecules = feat.defeaturize(g)
valid_molecules_count = len(
list(filter(lambda x: x is not None, generated_molecules)))
print(valid_molecules_count)
if valid_molecules_count:
success = True
break
# finally test if there was at least one valid training session
# as the model structure improves this should become more and more strict
assert success
if __name__ == '__main__':
unittest.main()
<file_sep>"""
bace dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
BACE_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/bace.csv"
BACE_REGRESSION_TASKS = ["pIC50"]
BACE_CLASSIFICATION_TASKS = ["Class"]
class _BaceLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "bace.csv")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=BACE_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="mol",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_bace_regression(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
""" Load BACE dataset, regression labels
The BACE dataset provides quantitative IC50 and qualitative (binary label)
binding results for a set of inhibitors of human beta-secretase 1 (BACE-1).
All data are experimental values reported in scientific literature over the
past decade, some with detailed crystal structures available. A collection
of 1522 compounds is provided, along with the regression labels of IC50.
Scaffold splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "mol" - SMILES representation of the molecular structure
- "pIC50" - Negative log of the IC50 binding affinity
- "class" - Binary labels for inhibitor
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Subramanian, Govindan, et al. "Computational modeling of β-secretase 1
(BACE-1) inhibitors using ligand based approaches." Journal of chemical
information and modeling 56.10 (2016): 1936-1949.
"""
loader = _BaceLoader(featurizer, splitter, transformers,
BACE_REGRESSION_TASKS, data_dir, save_dir, **kwargs)
return loader.load_dataset('bace_r', reload)
def load_bace_classification(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
""" Load BACE dataset, classification labels
BACE dataset with classification labels ("class").
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
loader = _BaceLoader(featurizer, splitter, transformers,
BACE_CLASSIFICATION_TASKS, data_dir, save_dir,
**kwargs)
return loader.load_dataset('bace_c', reload)
<file_sep># Model Saving/Restoration
In this example, we'll work through an example of using the
DeepChem API to save and restore a model from disk. We're going
to be training a ChemCeption model for this purpose on the
Delaney dataset.
Here are the files we'll use
- `chemception_model.py`: The file with the model to train
- `chemception_restore.py`: The file that restores the trained model
To train the model, first run
```
python chemception_model.py
```
This will train a model and store it to a subdirectory `./model`. Let's now
invoke this model to make a prediction with it.
```
python chemception_restore.py
```
The scripts are pretty simple so go ahead and peek inside to see how they work.
<file_sep>import numpy as np
from deepchem.feat.molecule_featurizers import OneHotFeaturizer
from deepchem.feat.base_classes import Featurizer
from typing import List, Optional
CHARSET = [
'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R',
'S', 'T', 'V', 'W', 'Y', 'X', 'Z', 'B', 'U', 'O'
]
class PFMFeaturizer(Featurizer):
"""
Encodes a list position frequency matrices for a given list of multiple sequence alignments
The default character set is 25 amino acids. If you want to use a different character set, such as nucleotides, simply pass in
a list of character strings in the featurizer constructor.
The max_length parameter is the maximum length of the sequences to be featurized. If you want to featurize longer sequences, modify the
max_length parameter in the featurizer constructor.
The final row in the position frequency matrix is the unknown set, if there are any characters which are not included in the charset.
Examples
--------
>>> from deepchem.feat.sequence_featurizers import PFMFeaturizer
>>> from deepchem.data import NumpyDataset
>>> msa = NumpyDataset(X=[['ABC','BCD'],['AAA','AAB']], ids=[['seq01','seq02'],['seq11','seq12']])
>>> seqs = msa.X
>>> featurizer = PFMFeaturizer()
>>> pfm = featurizer.featurize(seqs)
>>> pfm.shape
(2, 26, 100)
"""
def __init__(self,
charset: List[str] = CHARSET,
max_length: Optional[int] = 100):
"""Initialize featurizer.
Parameters
----------
charset: List[str] (default CHARSET)
A list of strings, where each string is length 1 and unique.
max_length: int, optional (default 25)
Maximum length of sequences to be featurized.
"""
if len(charset) != len(set(charset)):
raise ValueError("All values in charset must be unique.")
self.charset = charset
self.max_length = max_length
self.ohe = OneHotFeaturizer(charset=CHARSET, max_length=max_length)
def _featurize(self, datapoint):
"""Featurize a multisequence alignment into a position frequency matrix
Use dc.utils.sequence_utils.hhblits or dc.utils.sequence_utils.hhsearch to create a multiple sequence alignment from a fasta file.
Parameters
----------
datapoint: np.ndarray
MSA to featurize. A list of sequences which have been aligned and padded to the same length.
Returns
-------
pfm: np.ndarray
Position frequency matrix for the set of sequences with the rows corresponding to the unique characters and the columns corresponding to the position in the alignment.
"""
seq_one_hot = self.ohe.featurize(datapoint)
seq_one_hot_array = np.transpose(
np.array(seq_one_hot), (0, 2, 1)
) # swap rows and columns to make rows the characters, columns the positions
pfm = np.sum(seq_one_hot_array, axis=0)
return pfm
def PFM_to_PPM(pfm):
"""
Calculate position probability matrix from a position frequency matrix
"""
ppm = pfm.copy()
for col in range(ppm.shape[1]):
total_count = np.sum(ppm[:, col])
if total_count > 0:
# Calculate frequency
ppm[:, col] = ppm[:, col] / total_count
return ppm
<file_sep>"""
Script that loads random forest models trained on the sider and tox21 datasets,
predicts on sweetlead, creates covariance matrix
@Author <NAME>
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import numpy as np
import pandas as pd
import deepchem as dc
from sklearn.ensemble import RandomForestClassifier
from deepchem.models.multitask import SingletaskToMultitask
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.models.sklearn_models import SklearnModel
tox_tasks, (tox_train, tox_valid,
tox_test), tox_transformers = dc.molnet.load_tox21()
classification_metric = Metric(
metrics.roc_auc_score, np.mean, mode="classification")
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500, n_jobs=-1)
return dc.models.SklearnModel(sklearn_model, model_dir)
print(tox_train.get_task_names())
print(tox_tasks)
tox_model = SingletaskToMultitask(tox_tasks, model_builder)
tox_model.fit(tox_train)
# Load sider models now
sider_tasks, (
sider_train, sider_valid,
sider_test), sider_transformers = dc.molnet.load_sider(split="random")
sider_model = SingletaskToMultitask(sider_tasks, model_builder)
sider_model.fit(sider_train)
# Load sweetlead dataset now. Pass in dataset object and appropriate
# transformers to predict functions
sweet_tasks, (sweet_dataset, _, _), sweet_transformers = dc.molnet.load_sweet()
sider_predictions = sider_model.predict(sweet_dataset, sweet_transformers)
tox_predictions = tox_model.predict(sweet_dataset, sweet_transformers)
sider_dimensions = sider_predictions.shape[1]
tox_dimensions = tox_predictions.shape[1]
confusion_matrix = np.zeros(shape=(tox_dimensions, sider_dimensions))
for i in range(tox_predictions.shape[0]):
nonzero_tox = np.nonzero(tox_predictions[i, :])
nonzero_sider = np.nonzero(sider_predictions[i, :])
for j in nonzero_tox[0]:
for k in nonzero_sider[0]:
confusion_matrix[j, k] += 1
df = pd.DataFrame(confusion_matrix)
df.to_csv("./tox_sider_matrix.csv")
<file_sep>"""
Tests to make sure deepchem models can fit models on easy datasets.
"""
import sklearn
import sklearn.datasets
import numpy as np
import deepchem as dc
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
def test_sklearn_regression():
"""Test that sklearn models can learn on simple regression datasets."""
np.random.seed(123)
dataset = sklearn.datasets.load_diabetes()
X, y = dataset.data, dataset.target
y = np.expand_dims(y, 1)
frac_train = .7
n_samples = len(X)
n_train = int(frac_train * n_samples)
X_train, y_train = X[:n_train], y[:n_train]
X_test, y_test = X[n_train:], y[n_train:]
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = LinearRegression()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on test
scores = model.evaluate(test_dataset, [regression_metric])
assert scores[regression_metric.name] > .5
def test_sklearn_transformed_regression():
"""Test that sklearn models can learn on simple transformed regression datasets."""
np.random.seed(123)
dataset = sklearn.datasets.load_diabetes()
X, y = dataset.data, dataset.target
y = np.expand_dims(y, 1)
frac_train = .7
n_samples = len(X)
n_train = int(frac_train * n_samples)
X_train, y_train = X[:n_train], y[:n_train]
X_test, y_test = X[n_train:], y[n_train:]
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
# Eval model on train
transformers = [
dc.trans.NormalizationTransformer(transform_X=True,
dataset=train_dataset),
dc.trans.ClippingTransformer(transform_X=True, dataset=train_dataset),
dc.trans.NormalizationTransformer(transform_y=True,
dataset=train_dataset)
]
for data in [train_dataset, test_dataset]:
for transformer in transformers:
data = transformer.transform(data)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = LinearRegression()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(train_dataset)
model.save()
train_scores = model.evaluate(train_dataset, [regression_metric],
transformers)
assert train_scores[regression_metric.name] > .5
# Eval model on test
test_scores = model.evaluate(test_dataset, [regression_metric],
transformers)
assert test_scores[regression_metric.name] > .5
def test_sklearn_multitask_regression():
"""Test that sklearn models can learn on simple multitask regression."""
np.random.seed(123)
n_tasks = 4
tasks = range(n_tasks)
dataset = sklearn.datasets.load_diabetes()
X, y = dataset.data, dataset.target
y = np.reshape(y, (len(y), 1))
y = np.hstack([y] * n_tasks)
frac_train = .7
n_samples = len(X)
n_train = int(frac_train * n_samples)
X_train, y_train = X[:n_train], y[:n_train]
X_test, y_test = X[n_train:], y[n_train:]
train_dataset = dc.data.DiskDataset.from_numpy(X_train, y_train)
test_dataset = dc.data.DiskDataset.from_numpy(X_test, y_test)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
def model_builder(model_dir):
sklearn_model = LinearRegression()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on test
scores = model.evaluate(test_dataset, [regression_metric])
score = scores[regression_metric.name]
assert score > .5
def test_sklearn_classification():
"""Test that sklearn models can learn on simple classification datasets."""
np.random.seed(123)
dataset = sklearn.datasets.load_digits(n_class=2)
X, y = dataset.data, dataset.target
frac_train = .7
n_samples = len(X)
n_train = int(frac_train * n_samples)
X_train, y_train = X[:n_train], y[:n_train]
X_test, y_test = X[n_train:], y[n_train:]
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = LogisticRegression()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on test
scores = model.evaluate(test_dataset, [classification_metric])
assert scores[classification_metric.name] > .5
def test_sklearn_multitask_classification():
"""Test that sklearn models can learn on simple multitask classification."""
np.random.seed(123)
n_tasks = 4
tasks = range(n_tasks)
dataset = sklearn.datasets.load_digits(n_class=2)
X, y = dataset.data, dataset.target
y = np.reshape(y, (len(y), 1))
y = np.hstack([y] * n_tasks)
frac_train = .7
n_samples = len(X)
n_train = int(frac_train * n_samples)
X_train, y_train = X[:n_train], y[:n_train]
X_test, y_test = X[n_train:], y[n_train:]
train_dataset = dc.data.DiskDataset.from_numpy(X_train, y_train)
test_dataset = dc.data.DiskDataset.from_numpy(X_test, y_test)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
def model_builder(model_dir):
sklearn_model = LogisticRegression()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on test
scores = model.evaluate(test_dataset, [classification_metric])
assert scores[classification_metric.name] > .5
<file_sep>"""
Script that trains multitask models on hiv dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
from deepchem.molnet import load_hiv
# Only for debug!
np.random.seed(123)
# Load hiv dataset
n_features = 512
hiv_tasks, hiv_datasets, transformers = load_hiv()
train_dataset, valid_dataset, test_dataset = hiv_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
transformer = dc.trans.IRVTransformer(10, len(hiv_tasks), train_dataset)
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
model = dc.models.TensorflowMultitaskIRVClassifier(
len(hiv_tasks), K=10, batch_size=50, learning_rate=0.001)
# Fit trained model
model.fit(train_dataset)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Test for MXMNet Featurizer class.
"""
import deepchem as dc
import numpy as np
import unittest
edge_index = {
"C1=CC=NC=C1":
np.asarray([[0, 1, 1, 5, 5, 2, 2, 4, 4, 3, 3, 0],
[1, 0, 5, 1, 2, 5, 4, 2, 3, 4, 0, 3]]),
"CC(=O)C":
np.asarray([[0, 3, 3, 2, 3, 1], [3, 0, 2, 3, 1, 3]]),
"C":
np.empty((2, 0), dtype=int)
}
node_features = {
"C1=CC=NC=C1": np.asarray([[1.], [1.], [2.], [1.], [1.], [1.]]),
"CC(=O)C": np.asarray([[1.], [1.], [3.], [1.]]),
"C": np.asarray([[1.]])
}
node_pos = np.asarray([[-1.2700e-02, 1.0858e+00, 8.0000e-03],
[2.2000e-03, -6.0000e-03, 2.0000e-03],
[1.0117e+00, 1.4638e+00, 3.0000e-04],
[-5.4080e-01, 1.4475e+00, -8.7660e-01],
[-5.2380e-01, 1.4379e+00, 9.0640e-01]])
class TestMXMNetFeaturizer(unittest.TestCase):
"""
Test MXMNetFeaturizer.
"""
def setUp(self):
"""
Set up tests.
"""
self.smiles = ["C1=CC=NC=C1", "CC(=O)C", "C", "CP"]
self.edge_index = list(edge_index.values())
self.node_features = list(node_features.values())
def test_featurizer_ring(self):
"""
Test for featurization of "C1=CC=NC=C1" using `MXMNetFeaturizer` class.
"""
featurizer = dc.feat.molecule_featurizers.mxmnet_featurizer.MXMNetFeaturizer(
)
graph_feat = featurizer.featurize(self.smiles)
assert len(graph_feat) == 4
assert graph_feat[0].num_nodes == 6
assert graph_feat[0].num_node_features == 1
assert graph_feat[0].node_features.shape == (6, 1)
assert graph_feat[0].num_edges == 12
assert (graph_feat[0].node_features == self.node_features[0]).all()
assert (graph_feat[0].edge_index == self.edge_index[0]).all()
def test_featurizer_general_case(self):
"""
Test for featurization of "CC(=O)C" using `MXMNetFeaturizer` class.
"""
featurizer = dc.feat.molecule_featurizers.mxmnet_featurizer.MXMNetFeaturizer(
)
graph_feat = featurizer.featurize(self.smiles)
assert len(graph_feat) == 4
assert graph_feat[1].num_nodes == 4
assert graph_feat[1].num_node_features == 1
assert graph_feat[1].node_features.shape == (4, 1)
assert graph_feat[1].num_edges == 6
assert (graph_feat[1].node_features == self.node_features[1]).all()
assert (graph_feat[1].edge_index == self.edge_index[1]).all()
def test_featurizer_single_atom(self):
"""
Test for featurization of "C" using `MXMNetFeaturizer` class.
"""
featurizer = dc.feat.molecule_featurizers.mxmnet_featurizer.MXMNetFeaturizer(
)
graph_feat = featurizer.featurize(self.smiles)
assert len(graph_feat) == 4
assert graph_feat[2].num_nodes == 1
assert graph_feat[2].num_node_features == 1
assert graph_feat[2].node_features.shape == (1, 1)
assert graph_feat[2].num_edges == 0
assert (graph_feat[2].node_features == self.node_features[2]).all()
assert (graph_feat[2].edge_index == self.edge_index[2]).all()
def test_featurizer_other_atom(self):
"""
Test for featurization of "CP" using `MXMNetFeaturizer` class.
Since the smile contains P which is not supported by featurizer, the featurization process terminates and the featurizer returns an empty numpy array.
"""
featurizer = dc.feat.molecule_featurizers.mxmnet_featurizer.MXMNetFeaturizer(
)
graph_feat = featurizer.featurize(self.smiles)
assert len(graph_feat) == 4
assert graph_feat[3].shape == (0,)
def test_node_pos_features(self):
"""
Test for featurization of "C" using `MXMNetFeaturizer` class.
It checks whether node_pos_features are handled properly.
"""
smile = ['C']
pos_x1 = [np.array([-0.0127, 0.0022, 1.0117, -0.5408, -0.5238])]
pos_y1 = [np.array([1.0858, -0.0060, 1.4638, 1.4475, 1.4379])]
pos_z1 = [np.array([0.0080, 0.0020, 0.0003, -0.8766, 0.9064])]
featurizer = dc.feat.molecule_featurizers.mxmnet_featurizer.MXMNetFeaturizer(
is_adding_hs=True)
graph_feat = featurizer.featurize(smile,
pos_x=pos_x1,
pos_y=pos_y1,
pos_z=pos_z1)
assert isinstance(graph_feat[0].node_pos_features, np.ndarray)
assert np.allclose(graph_feat[0].node_pos_features, node_pos, atol=1e-3)
<file_sep>import tempfile
from rdkit import Chem
import deepchem as dc
def testGroverAtomVocabularyBuilder():
from deepchem.feat.vocabulary_builders.grover_vocab import GroverAtomVocabularyBuilder
file = tempfile.NamedTemporaryFile()
dataset = dc.data.NumpyDataset(X=[['CC(=O)C'], ['CCC']])
vocab = GroverAtomVocabularyBuilder()
vocab.build(dataset)
assert vocab.stoi == {
'<pad>': 0,
'<other>': 1,
'C_C-SINGLE1': 2,
'C_C-SINGLE2': 3,
'C_C-SINGLE2_O-DOUBLE1': 4,
'O_C-DOUBLE1': 5
}
assert vocab.itos == [
'<pad>', '<other>', 'C_C-SINGLE1', 'C_C-SINGLE2',
'C_C-SINGLE2_O-DOUBLE1', 'O_C-DOUBLE1'
]
vocab.save(file.name)
loaded_vocab = GroverAtomVocabularyBuilder.load(file.name)
mol = Chem.MolFromSmiles('CC(=O)C')
atom = mol.GetAtomWithIdx(0)
assert loaded_vocab.atom_to_vocab(mol, atom) == 'C_C-SINGLE1'
assert loaded_vocab.encode(mol, mol.GetAtomWithIdx(0)) == 2
# test with max size
vocab = GroverAtomVocabularyBuilder(max_size=3)
vocab.build(dataset)
assert vocab.size == 3
assert vocab.size == len(vocab.itos)
def testGroverBondVocabularyBuilder():
from deepchem.feat.vocabulary_builders.grover_vocab import GroverBondVocabularyBuilder
file = tempfile.NamedTemporaryFile()
dataset = dc.data.NumpyDataset(X=[['CC(=O)C'], ['CCC']])
vocab = GroverBondVocabularyBuilder()
vocab.build(dataset)
assert vocab.stoi == {
'<pad>':
0,
'<other>':
1,
'(SINGLE-STEREONONE-NONE)_C-(DOUBLE-STEREONONE-NONE)1_C-(SINGLE-STEREONONE-NONE)1':
2,
'(SINGLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)1':
3,
'(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2':
4,
}
assert vocab.itos == [
'<pad>', '<other>',
'(SINGLE-STEREONONE-NONE)_C-(DOUBLE-STEREONONE-NONE)1_C-(SINGLE-STEREONONE-NONE)1',
'(SINGLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)1',
'(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'
]
vocab.save(file.name)
loaded_vocab = GroverBondVocabularyBuilder.load(file.name)
mol = Chem.MolFromSmiles('CC(=O)C')
bond = mol.GetBondWithIdx(0)
assert loaded_vocab.bond_to_vocab(
mol, bond
) == '(SINGLE-STEREONONE-NONE)_C-(DOUBLE-STEREONONE-NONE)1_C-(SINGLE-STEREONONE-NONE)1'
assert loaded_vocab.encode(mol, bond) == 2
# test with max size
vocab = GroverBondVocabularyBuilder(max_size=3)
vocab.build(dataset)
assert vocab.size == 3
assert vocab.size == len(vocab.itos)
def testGroverAtomVocabTokenizer():
from deepchem.feat.vocabulary_builders.grover_vocab import GroverAtomVocabularyBuilder, GroverAtomVocabTokenizer
file = tempfile.NamedTemporaryFile()
dataset = dc.data.NumpyDataset(X=[['CC(=O)C'], ['CCC']])
vocab = GroverAtomVocabularyBuilder()
vocab.build(dataset)
vocab.save(file.name) # build and save the vocabulary
# load the vocabulary by passing filename
atom_tokenizer = GroverAtomVocabTokenizer(file.name)
mol = Chem.MolFromSmiles('CC(=O)C')
# test tokenization of a single point
atom_tokenizer.featurize([(mol, mol.GetAtomWithIdx(0))]) == 2
def testGroverBondVocabTokenizer():
from deepchem.feat.vocabulary_builders.grover_vocab import GroverBondVocabularyBuilder, GroverBondVocabTokenizer
file = tempfile.NamedTemporaryFile()
dataset = dc.data.NumpyDataset(X=[['CC(=O)C'], ['CCC']])
vocab = GroverBondVocabularyBuilder()
vocab.build(dataset)
vocab.save(file.name) # build and save the vocabulary
# load the vocabulary by passing the filename
bond_tokenizer = GroverBondVocabTokenizer(file.name)
mol = Chem.MolFromSmiles('CC(=O)C')
# test tokenization of a single point
bond_tokenizer.featurize([(mol, mol.GetBondWithIdx(0))])[0] == 2
<file_sep>"""
Test Binding Pocket Features.
"""
import os
import numpy as np
import unittest
import deepchem as dc
class TestBindingPocketFeatures(unittest.TestCase):
"""
Test AtomicCoordinates.
"""
def test_pocket_features(self):
"""
Simple test that pocket_features return right shapes.
"""
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir,
"../../dock/tests/1jld_protein.pdb")
finder = dc.dock.ConvexHullPocketFinder()
pocket_featurizer = dc.feat.BindingPocketFeaturizer()
pockets = finder.find_pockets(protein_file)
n_pockets = len(pockets)
pocket_features = pocket_featurizer.featurize(protein_file, pockets)
assert isinstance(pocket_features, np.ndarray)
assert pocket_features.shape[0] == n_pockets
<file_sep># QM7 Examples
QM7 is a subset of GDB-13 (a database of nearly 1 billion
stable and synthetically accessible organic molecules)
containing up to 7 heavy atoms C, N, O, and S. The 3D
Cartesian coordinates of the most stable conformations and
their atomization energies were determined using ab-initio
density functional theory (PBE0/tier2 basis set).This dataset
also provided Coulomb matrices as calculated in [Rupp et al.
PRL, 2012]:
- C_ii = 0.5 * Z^2.4
- C_ij = Z_i * Z_j/abs(R_i − R_j)
- Z_i - nuclear charge of atom i
- R_i - cartesian coordinates of atom i
The data file (.mat format, we recommend using `scipy.io.loadmat` for python users to load this original data) contains five arrays:
- "X" - (7165 x 23 x 23), Coulomb matrices
- "T" - (7165), atomization energies (unit: kcal/mol)
- "P" - (5 x 1433), cross-validation splits as used in [Montavon et al. NIPS, 2012]
- "Z" - (7165 x 23), atomic charges
- "R" - (7165 x 23 x 3), cartesian coordinate (unit: Bohr) of each atom in the molecules
Reference:
Rupp, Matthias, et al. "Fast and accurate modeling of molecular atomization energies with machine learning." Physical review letters 108.5 (2012): 058301.
Montavon, Grégoire, et al. "Learning invariant representations of molecules for atomization energy prediction." Advances in Neural Information Processing Systems. 2012.
# QM7B Examples
QM7b is an extension for the QM7 dataset with additional
properties predicted at different levels (ZINDO, SCS, PBE0, GW).
In total 14 tasks are included for 7211 molecules with up to 7
heavy atoms.
The dataset in .mat format(for python users, we recommend using `scipy.io.loadmat`) includes two arrays:
- "X" - (7211 x 23 x 23), Coulomb matrices
- "T" - (7211 x 14), properties
Atomization energies E (PBE0, unit: kcal/mol)
Excitation of maximal optimal absorption E_max (ZINDO, unit: eV)
Absorption Intensity at maximal absorption I_max (ZINDO)
Highest occupied molecular orbital HOMO (ZINDO, unit: eV)
Lowest unoccupied molecular orbital LUMO (ZINDO, unit: eV)
First excitation energy E_1st (ZINDO, unit: eV)
Ionization potential IP (ZINDO, unit: eV)
Electron affinity EA (ZINDO, unit: eV)
Highest occupied molecular orbital HOMO (PBE0, unit: eV)
Lowest unoccupied molecular orbital LUMO (PBE0, unit: eV)
Highest occupied molecular orbital HOMO (GW, unit: eV)
Lowest unoccupied molecular orbital LUMO (GW, unit: eV)
Polarizabilities α (PBE0, unit: Å^3)
Polarizabilities α (SCS, unit: Å^3)
Reference:
- Blum, <NAME>., and <NAME>. "970 million druglike small molecules for virtual screening in the chemical universe database GDB-13." Journal of the American Chemical Society 131.25 (2009): 8732-8733.
- Montavon, Grégoire, et al. "Machine learning of molecular electronic properties in chemical compound space." New Journal of Physics 15.9 (2013): 095003.
<file_sep>import numpy as np
import tempfile
import deepchem as dc
def test_binary_1d():
"""Test balancing transformer on single-task dataset without explicit task dimension."""
n_samples = 6
n_features = 3
np.random.seed(123)
X = np.random.rand(n_samples, n_features)
y = np.array([1, 1, 0, 0, 0, 0])
w = np.ones((n_samples,))
dataset = dc.data.NumpyDataset(X, y, w)
duplicator = dc.trans.DuplicateBalancingTransformer(dataset=dataset)
dataset = duplicator.transform(dataset)
# Check that we have length 8 now with duplication
assert len(dataset) == 8
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check shapes
assert X_t.shape == (8, n_features)
assert y_t.shape == (8,)
assert w_t.shape == (8,)
assert ids_t.shape == (8,)
# Check that we have 4 positives and 4 negatives
assert np.sum(y_t == 0) == 4
assert np.sum(y_t == 1) == 4
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(np.sum(w_t[y_t == 0]), np.sum(w_t[y_t == 1]))
def test_binary_weighted_1d():
"""Test balancing transformer on a weighted single-task dataset without explicit task dimension."""
n_samples = 6
n_features = 3
np.random.seed(123)
X = np.random.rand(n_samples, n_features)
# Note that nothing should change in this dataset since weights balance!
y = np.array([1, 1, 0, 0, 0, 0])
w = np.array([2, 2, 1, 1, 1, 1])
dataset = dc.data.NumpyDataset(X, y, w)
duplicator = dc.trans.DuplicateBalancingTransformer(dataset=dataset)
dataset = duplicator.transform(dataset)
# Check that still we have length 6
assert len(dataset) == 6
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check shapes
assert X_t.shape == (6, n_features)
assert y_t.shape == (6,)
assert w_t.shape == (6,)
assert ids_t.shape == (6,)
# Check that we have 2 positives and 4 negatives
assert np.sum(y_t == 0) == 4
assert np.sum(y_t == 1) == 2
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(np.sum(w_t[y_t == 0]), np.sum(w_t[y_t == 1]))
def test_binary_singletask():
"""Test duplicate balancing transformer on single-task dataset."""
n_samples = 6
n_features = 3
n_tasks = 1
np.random.seed(123)
X = np.random.rand(n_samples, n_features)
y = np.reshape(np.array([1, 1, 0, 0, 0, 0]), (n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w)
duplicator = dc.trans.DuplicateBalancingTransformer(dataset=dataset)
dataset = duplicator.transform(dataset)
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check that we have length 8 now with duplication
assert len(dataset) == 8
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check shapes
assert X_t.shape == (8, n_features)
assert y_t.shape == (8,)
assert w_t.shape == (8,)
assert ids_t.shape == (8,)
# Check that we have 4 positives and 4 negatives
assert np.sum(y_t == 0) == 4
assert np.sum(y_t == 1) == 4
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(np.sum(w_t[y_t == 0]), np.sum(w_t[y_t == 1]))
def test_multiclass_singletask():
"""Test balancing transformer on single-task dataset."""
n_samples = 10
n_features = 3
X = np.random.rand(n_samples, n_features)
# 6-1 imbalance in favor of class 0
y = np.array([0, 0, 0, 0, 0, 0, 1, 2, 3, 4])
w = np.ones((n_samples,))
dataset = dc.data.NumpyDataset(X, y, w)
duplicator = dc.trans.DuplicateBalancingTransformer(dataset=dataset)
dataset = duplicator.transform(dataset)
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check that we have length 30 now with duplication
assert len(dataset) == 30
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check shapes
assert X_t.shape == (30, n_features)
assert y_t.shape == (30,)
assert w_t.shape == (30,)
assert ids_t.shape == (30,)
# Check that we have 6 of each class
assert np.sum(y_t == 0) == 6
assert np.sum(y_t == 1) == 6
assert np.sum(y_t == 2) == 6
assert np.sum(y_t == 3) == 6
assert np.sum(y_t == 4) == 6
# Check that sum of all class weights is equal by comparing to 0 weight
assert np.isclose(np.sum(w_t[y_t == 0]), np.sum(w_t[y_t == 1]))
assert np.isclose(np.sum(w_t[y_t == 0]), np.sum(w_t[y_t == 2]))
assert np.isclose(np.sum(w_t[y_t == 0]), np.sum(w_t[y_t == 3]))
assert np.isclose(np.sum(w_t[y_t == 0]), np.sum(w_t[y_t == 4]))
def test_transform_to_directory():
"""Test that output can be written to a directory."""
n_samples = 10
n_features = 3
np.random.seed(123)
X = np.random.rand(n_samples, n_features)
# Note class imbalance. This will round to 2x duplication for 1
y = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0])
w = np.ones((n_samples,))
dataset = dc.data.NumpyDataset(X, y, w)
duplicator = dc.trans.DuplicateBalancingTransformer(dataset=dataset)
with tempfile.TemporaryDirectory() as tmpdirname:
dataset = duplicator.transform(dataset, out_dir=tmpdirname)
balanced_dataset = dc.data.DiskDataset(tmpdirname)
X_t, y_t, w_t, ids_t = (balanced_dataset.X, balanced_dataset.y,
balanced_dataset.w, balanced_dataset.ids)
# Check that we have length 13 now with duplication
assert len(balanced_dataset) == 13
# Check shapes
assert X_t.shape == (13, n_features)
assert y_t.shape == (13,)
assert w_t.shape == (13,)
assert ids_t.shape == (13,)
# Check that we have 6 positives and 7 negatives
assert np.sum(y_t == 0) == 7
assert np.sum(y_t == 1) == 6
<file_sep>import unittest
from deepchem.feat import MACCSKeysFingerprint
class TestMACCSKeysFingerprint(unittest.TestCase):
"""
Test MACCSKeyFingerprint.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
def test_maccs_key_fingerprint(self):
"""
Test simple fingerprint.
"""
featurizer = MACCSKeysFingerprint()
feature_sum = featurizer([self.mol])
assert feature_sum.shape == (1, 167)
<file_sep># flake8: noqa
from deepchem.models.gbdt_models.gbdt_model import GBDTModel<file_sep>"""
Density Functional Theory Utilities
Derived from: https://github.com/mfkasim1/xcnn/blob/f2cb9777da2961ac553f256ecdcca3e314a538ca/xcdnn2/kscalc.py """
try:
import torch
from dqc.utils.datastruct import SpinParam
from dqc.qccalc.base_qccalc import BaseQCCalc
except ModuleNotFoundError:
pass
import hashlib
import xitorch as xt
from typing import List
from abc import abstractmethod, abstractproperty
class KSCalc(object):
"""
Interface to DQC's KS calculation.
Parameters
----------
qc: BaseQCCalc
object often acts as a wrapper around an engine class (from dqc.qccalc) that contains information about the self-consistent iterations.
References
----------
Kasim, <NAME>., and <NAME>. "Learning the exchange-correlation functional from nature with fully differentiable density functional theory." Physical Review Letters 127.12 (2021): 126403.
https://github.com/diffqc/dqc/blob/master/dqc/qccalc/ks.py
"""
def __init__(self, qc: "BaseQCCalc"):
self.qc = qc
def energy(self) -> torch.Tensor:
"""
Returns
-------
The total energy of the Kohn-Sham calculation for a particular system.
"""
return self.qc.energy()
def aodmtot(self) -> torch.Tensor:
"""
Both interacting and non-interacting system's total energy can be expressed in terms of the density matrix. The ground state properties of a system can be calculated by minimizing the energy w.r.t the density matrix.
Returns
-------
The total density matrix in atomic orbital bases.
"""
dm = self.qc.aodm()
if isinstance(dm, SpinParam):
dmtot = dm.u + dm.d
else:
dmtot = dm
return dmtot
def dens(self, rgrid: torch.Tensor) -> torch.Tensor:
"""
The ground state density n(r) of a system.
Parameters
----------
rgrid: torch.Tensor
Calculate integration grid using dqc.grid.
Returns
-------
The total density profile in the given grid
Reference
---------
https://github.com/diffqc/dqc/blob/master/dqc/grid/base_grid.py
"""
dmtot = self.aodmtot()
return self.qc.get_system().get_hamiltonian().aodm2dens(dmtot, rgrid)
def force(self) -> torch.Tensor:
"""
The force on an atom is calculated as the gradient of energy with respect to the atomic position.
Returns
-------
The force for each atom.
"""
ene = self.energy()
atompos = self.qc.get_system().atompos
is_grad_enabled = torch.is_grad_enabled()
f, = torch.autograd.grad(ene,
atompos,
create_graph=is_grad_enabled,
retain_graph=True)
return f
def hashstr(s: str) -> str:
"""
Encodes the string into hashed format - hexadecimal digits.
Parameters
----------
s : str
"""
return str(hashlib.blake2s(str.encode(s)).hexdigest())
class BaseGrid(xt.EditableModule):
"""
Interface to DQC's BaseGrid class. BaseGrid is a class that regulates the integration points over the spatial
dimensions.
Parameters
----------
qc: BaseQCCalc
object often acts as a wrapper around an engine class (from dqc.qccalc) that contains information about the self-consistent iterations.
References
----------
Kasim, <NAME>., and <NAME>. "Learning the exchange-correlation functional from nature with fully differentiable density functional theory." Physical Review Letters 127.12 (2021): 126403.
https://github.com/diffqc/dqc/blob/0fe821fc92cb3457fb14f6dff0c223641c514ddb/dqc/grid/base_grid.py
"""
@abstractproperty
def dtype(self) -> torch.dtype:
pass
@abstractproperty
def device(self) -> torch.device:
pass
@abstractproperty
def coord_type(self) -> str:
"""
Returns the type of the coordinate returned in get_rgrid
"""
pass
@abstractmethod
def get_dvolume(self) -> torch.Tensor:
"""
Obtain the torch.tensor containing the dV elements for the integration.
Returns
-------
torch.tensor (*BG, ngrid)
The dV elements for the integration. *BG is the length of the BaseGrid.
"""
pass
@abstractmethod
def get_rgrid(self) -> torch.Tensor:
"""
Returns the grid points position in the specified coordinate in
self.coord_type.
Returns
-------
torch.tensor (*BG, ngrid, ndim)
The grid points position. *BG is the length of the BaseGrid.
"""
pass
@abstractmethod
def getparamnames(self, methodname: str, prefix: str = "") -> List[str]:
"""
Return a list with the parameter names corresponding to the given method
(methodname)
Returns
-------
List[str]
List of parameter names of methodname
"""
pass
<file_sep>"""
Tests for dataset creation
"""
import random
import math
import unittest
import os
import numpy as np
import pytest
import deepchem as dc
import pandas as pd
import tempfile
try:
import torch # noqa
PYTORCH_IMPORT_FAILED = False
except ImportError:
PYTORCH_IMPORT_FAILED = True
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
def load_multitask_data():
"""Load example multitask data."""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = [
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
]
input_file = os.path.join(
current_dir, "../../models/tests/assets/multitask_example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
class TestTransformer(dc.trans.Transformer):
def transform_array(self, X, y, w, ids):
return (2 * X, 1.5 * y, w, ids)
def test_transform_disk():
"""Test that the transform() method works for DiskDatasets."""
dataset = load_solubility_data()
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
# Transform it
transformer = TestTransformer(transform_X=True, transform_y=True)
for parallel in (True, False):
transformed = dataset.transform(transformer, parallel=parallel)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_sparsify_and_densify():
"""Test that sparsify and densify work as inverses."""
# Test on identity matrix
num_samples = 10
num_features = num_samples
X = np.eye(num_samples)
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Generate random sparse features dataset
np.random.seed(123)
p = .05
X = np.random.binomial(1, p, size=(num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Test edge case with array of all zeros
X = np.zeros((num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
def test_pad_features():
"""Test that pad_features pads features correctly."""
batch_size = 100
num_features = 10
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
def test_pad_batches():
"""Test that pad_batch pads batches correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
def test_get_task_names():
"""Test that get_task_names returns correct task_names"""
solubility_dataset = load_solubility_data()
assert solubility_dataset.get_task_names() == ["log-solubility"]
multitask_dataset = load_multitask_data()
assert sorted(multitask_dataset.get_task_names()) == sorted([
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
])
def test_get_data_shape():
"""Test that get_data_shape returns currect data shape"""
solubility_dataset = load_solubility_data()
assert solubility_dataset.get_data_shape() == (1024,)
multitask_dataset = load_multitask_data()
assert multitask_dataset.get_data_shape() == (1024,)
def test_len():
"""Test that len(dataset) works."""
solubility_dataset = load_solubility_data()
assert len(solubility_dataset) == 10
def test_reshard():
"""Test that resharding the dataset works."""
solubility_dataset = load_solubility_data()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 1
solubility_dataset.reshard(shard_size=1)
assert solubility_dataset.get_shard_size() == 1
X_r, y_r, w_r, ids_r = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 10
solubility_dataset.reshard(shard_size=10)
assert solubility_dataset.get_shard_size() == 10
X_rr, y_rr, w_rr, ids_rr = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Test first resharding worked
np.testing.assert_array_equal(X, X_r)
np.testing.assert_array_equal(y, y_r)
np.testing.assert_array_equal(w, w_r)
np.testing.assert_array_equal(ids, ids_r)
# Test second resharding worked
np.testing.assert_array_equal(X, X_rr)
np.testing.assert_array_equal(y, y_rr)
np.testing.assert_array_equal(w, w_rr)
np.testing.assert_array_equal(ids, ids_rr)
def test_complete_shuffle():
shard_sizes = [1, 2, 3, 4, 5]
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
res = dataset.complete_shuffle()
# approx 1/15! chance of equality
np.testing.assert_equal(np.any(np.not_equal(dataset.X, res.X)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.y, res.w)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.w, res.y)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.ids, res.ids)), True)
np.testing.assert_array_equal(np.sort(dataset.X, axis=0),
np.sort(res.X, axis=0))
np.testing.assert_array_equal(np.sort(dataset.y, axis=0),
np.sort(res.y, axis=0))
np.testing.assert_array_equal(np.sort(dataset.w, axis=0),
np.sort(res.w, axis=0))
np.testing.assert_array_equal(np.sort(dataset.ids), np.sort(res.ids))
def test_iterbatches():
"""Test that iterating over batches of data works."""
solubility_dataset = load_solubility_data()
batch_size = 2
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
for (X_b, y_b, w_b, ids_b) in solubility_dataset.iterbatches(batch_size):
assert X_b.shape == (batch_size,) + data_shape
assert y_b.shape == (batch_size,) + (len(tasks),)
assert w_b.shape == (batch_size,) + (len(tasks),)
assert ids_b.shape == (batch_size,)
def test_itersamples_numpy():
"""Test that iterating over samples in a NumpyDataset works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
for i, (sx, sy, sw, sid) in enumerate(dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_itersamples_disk():
"""Test that iterating over samples in a DiskDataset works."""
solubility_dataset = load_solubility_data()
X = solubility_dataset.X
y = solubility_dataset.y
w = solubility_dataset.w
ids = solubility_dataset.ids
for i, (sx, sy, sw, sid) in enumerate(solubility_dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_transform_numpy():
"""Test that the transform() method works for NumpyDatasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Transform it
transformer = TestTransformer(transform_X=True, transform_y=True)
transformed = dataset.transform(transformer)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_to_numpy():
"""Test that transformation to numpy arrays is sensible."""
solubility_dataset = load_solubility_data()
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
N_samples = len(solubility_dataset)
N_tasks = len(tasks)
assert X.shape == (N_samples,) + data_shape
assert y.shape == (N_samples, N_tasks)
assert w.shape == (N_samples, N_tasks)
assert ids.shape == (N_samples,)
def test_consistent_ordering():
"""Test that ordering of labels is consistent over time."""
solubility_dataset = load_solubility_data()
ids1 = solubility_dataset.ids
ids2 = solubility_dataset.ids
assert np.array_equal(ids1, ids2)
def test_get_statistics():
"""Test statistics computation of this dataset."""
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
X_means, y_means = np.mean(X, axis=0), np.mean(y, axis=0)
X_stds, y_stds = np.std(X, axis=0), np.std(y, axis=0)
comp_X_means, comp_X_stds, comp_y_means, comp_y_stds = \
solubility_dataset.get_statistics()
np.testing.assert_allclose(comp_X_means, X_means)
np.testing.assert_allclose(comp_y_means, y_means)
np.testing.assert_allclose(comp_X_stds, X_stds)
np.testing.assert_allclose(comp_y_stds, y_stds)
def test_disk_iterate_batch_size():
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
batch_sizes = []
for X, y, _, _ in solubility_dataset.iterbatches(3,
epochs=2,
pad_batches=False,
deterministic=True):
batch_sizes.append(len(X))
assert [3, 3, 3, 1, 3, 3, 3, 1] == batch_sizes
def test_disk_pad_batches():
shard_sizes = [21, 11, 41, 21, 51]
batch_size = 10
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ys = np.concatenate(all_ys, axis=0)
all_ws = np.concatenate(all_ws, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(batch_size=batch_size,
pad_batches=True,
deterministic=True)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
total_size = sum(shard_sizes)
assert bidx == math.ceil(total_size / batch_size) - 1
expected_batches = math.ceil(total_size / batch_size) * batch_size
assert len(test_Xs) == expected_batches
assert len(test_ys) == expected_batches
assert len(test_ws) == expected_batches
assert len(test_ids) == expected_batches
np.testing.assert_array_equal(all_Xs, test_Xs[:total_size, :])
np.testing.assert_array_equal(all_ys, test_ys[:total_size, :])
np.testing.assert_array_equal(all_ws, test_ws[:total_size, :])
np.testing.assert_array_equal(all_ids, test_ids[:total_size])
def test_disk_iterate_y_w_None():
shard_sizes = [21, 11, 41, 21, 51]
batch_size = 10
all_Xs, all_ids = [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ids.append(ids_b)
yield X_b, None, None, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
test_Xs, test_ids = [], []
for bidx, (a, _, _, d) in enumerate(
dataset.iterbatches(batch_size=batch_size,
pad_batches=True,
deterministic=True)):
test_Xs.append(a)
test_ids.append(d)
test_Xs = np.concatenate(test_Xs, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
total_size = sum(shard_sizes)
assert bidx == math.ceil(total_size / batch_size) - 1
expected_batches = math.ceil(total_size / batch_size) * batch_size
assert len(test_Xs) == expected_batches
assert len(test_ids) == expected_batches
np.testing.assert_array_equal(all_Xs, test_Xs[:total_size, :])
np.testing.assert_array_equal(all_ids, test_ids[:total_size])
def test_disk_iterate_batch():
all_batch_sizes = [None, 32, 17, 11]
all_shard_sizes = [[7, 3, 12, 4, 5], [1, 1, 1, 1, 1], [31, 31, 31, 31, 31],
[21, 11, 41, 21, 51]]
for idx in range(25):
shard_length = random.randint(1, 32)
shard_sizes = []
for _ in range(shard_length):
shard_sizes.append(random.randint(1, 128))
all_shard_sizes.append(shard_sizes)
if idx == 0:
# special case to test
all_batch_sizes.append(None)
else:
all_batch_sizes.append(random.randint(1, 256))
for shard_sizes, batch_size in zip(all_shard_sizes, all_batch_sizes):
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ys = np.concatenate(all_ys, axis=0)
all_ws = np.concatenate(all_ws, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
total_size = sum(shard_sizes)
assert dataset.X.shape[0] == total_size
# deterministic
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(batch_size=batch_size,
pad_batches=False,
deterministic=True)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
if batch_size is None:
for idx, (tx, ty, tw, tids) in enumerate(
zip(test_Xs, test_ys, test_ws, test_ids)):
assert len(tx) == shard_sizes[idx]
assert len(ty) == shard_sizes[idx]
assert len(tw) == shard_sizes[idx]
assert len(tids) == shard_sizes[idx]
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
if batch_size is None:
assert bidx == len(shard_sizes) - 1
else:
assert bidx == math.ceil(total_size / batch_size) - 1
np.testing.assert_array_equal(all_Xs, test_Xs)
np.testing.assert_array_equal(all_ys, test_ys)
np.testing.assert_array_equal(all_ws, test_ws)
np.testing.assert_array_equal(all_ids, test_ids)
# non-deterministic
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(batch_size=batch_size,
pad_batches=False,
deterministic=False)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
# we don't know the order in which the shards are iterated in.
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
if batch_size is None:
assert bidx == len(shard_sizes) - 1
else:
assert bidx == math.ceil(total_size / batch_size) - 1
np.testing.assert_array_equal(np.sort(all_Xs, axis=0),
np.sort(test_Xs, axis=0))
np.testing.assert_array_equal(np.sort(all_ys, axis=0),
np.sort(test_ys, axis=0))
np.testing.assert_array_equal(np.sort(all_ws, axis=0),
np.sort(test_ws, axis=0))
np.testing.assert_array_equal(np.sort(all_ids, axis=0),
np.sort(test_ids, axis=0))
def test_merge():
"""Test that dataset merge works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
num_datasets = 4
datasets = []
for i in range(num_datasets):
Xi = np.random.rand(num_datapoints, num_features)
yi = np.random.randint(2, size=(num_datapoints, num_tasks))
wi = np.ones((num_datapoints, num_tasks))
idsi = np.array(["id"] * num_datapoints)
dataseti = dc.data.DiskDataset.from_numpy(Xi, yi, wi, idsi)
datasets.append(dataseti)
new_data = dc.data.datasets.DiskDataset.merge(datasets)
x = [1, 2, 3]
X = np.array(x)
y = np.array(x)
a = dc.data.NumpyDataset(X, y)
b = dc.data.NumpyDataset(X, y)
c = dc.data.NumpyDataset.merge([a, b])
assert c.y.shape == (
6, 1) # test to check if merge works when y is one dimensional
# Check that we have all the data in
assert new_data.X.shape == (num_datapoints * num_datasets, num_features)
assert new_data.y.shape == (num_datapoints * num_datasets, num_tasks)
assert len(new_data.tasks) == len(datasets[0].tasks)
@pytest.mark.tensorflow
def test_make_tf_dataset():
"""Test creating a Tensorflow Iterator from a Dataset."""
X = np.random.random((100, 5))
y = np.random.random((100, 1))
dataset = dc.data.NumpyDataset(X, y)
iterator = dataset.make_tf_dataset(batch_size=10,
epochs=2,
deterministic=True)
for i, (batch_X, batch_y, batch_w) in enumerate(iterator):
offset = (i % 10) * 10
np.testing.assert_array_equal(X[offset:offset + 10, :], batch_X)
np.testing.assert_array_equal(y[offset:offset + 10, :], batch_y)
np.testing.assert_array_equal(np.ones((10, 1)), batch_w)
assert i == 19
@pytest.mark.torch
def _validate_pytorch_dataset(dataset):
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
n_samples = X.shape[0]
# Test iterating in order.
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=True)
for i, (iter_X, iter_y, iter_w, iter_id) in enumerate(ds):
j = i % n_samples
np.testing.assert_array_equal(X[j, :], iter_X)
np.testing.assert_array_equal(y[j, :], iter_y)
np.testing.assert_array_equal(w[j, :], iter_w)
assert ids[j] == iter_id
assert i == 2 * n_samples - 1
# Test iterating out of order.
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=False)
id_to_index = dict((id, i) for i, id in enumerate(ids))
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in ds:
j = id_to_index[iter_id]
np.testing.assert_array_equal(X[j, :], iter_X)
np.testing.assert_array_equal(y[j, :], iter_y)
np.testing.assert_array_equal(w[j, :], iter_w)
id_count[iter_id] += 1
assert all(id_count[id] == 2 for id in ids)
# Test iterating in batches.
ds = dataset.make_pytorch_dataset(epochs=2,
deterministic=False,
batch_size=7)
id_to_index = dict((id, i) for i, id in enumerate(ids))
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in ds:
size = len(iter_id)
assert size <= 7
for i in range(size):
j = id_to_index[iter_id[i]]
np.testing.assert_array_equal(X[j, :], iter_X[i])
np.testing.assert_array_equal(y[j, :], iter_y[i])
np.testing.assert_array_equal(w[j, :], iter_w[i])
id_count[iter_id[i]] += 1
assert all(id_count[id] == 2 for id in ids)
# Test iterating with multiple workers.
import torch # noqa
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=False)
loader = torch.utils.data.DataLoader(ds, num_workers=3)
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in loader:
j = id_to_index[iter_id[0]]
np.testing.assert_array_equal(X[j, :], iter_X[0])
np.testing.assert_array_equal(y[j, :], iter_y[0])
np.testing.assert_array_equal(w[j, :], iter_w[0])
id_count[iter_id[0]] += 1
assert all(id_count[id] == 2 for id in ids)
def test_dataframe():
"""Test converting between Datasets and DataFrames."""
dataset = load_solubility_data()
# A round trip from Dataset to DataFrame to Dataset should produce identical arrays.
df = dataset.to_dataframe()
dataset2 = dc.data.Dataset.from_dataframe(df)
np.testing.assert_array_equal(dataset.X, dataset2.X)
np.testing.assert_array_equal(dataset.y, dataset2.y)
np.testing.assert_array_equal(dataset.w, dataset2.w)
np.testing.assert_array_equal(dataset.ids, dataset2.ids)
# Try specifying particular columns.
dataset3 = dc.data.Dataset.from_dataframe(df,
X=['X2', 'X4'],
y='w',
w=['y', 'X1'])
np.testing.assert_array_equal(dataset.X[:, (1, 3)], dataset3.X)
np.testing.assert_array_equal(dataset.w, dataset3.y)
np.testing.assert_array_equal(
np.stack([dataset.y[:, 0], dataset.X[:, 0]], axis=1), dataset3.w)
def test_to_csv():
"""Test converting between Dataset and CSV"""
tmpdir = tempfile.TemporaryDirectory()
csv_path = os.path.join(tmpdir.name, 'tmp.csv')
dataset = load_solubility_data()
dataset.reshard(2)
dataset.to_csv(csv_path)
df = pd.read_csv(csv_path)
dataset2 = dc.data.Dataset.from_dataframe(df)
np.testing.assert_array_equal(dataset.X, dataset2.X)
np.testing.assert_array_equal(dataset.y, dataset2.y)
np.testing.assert_array_equal(dataset.w, dataset2.w)
np.testing.assert_array_equal(dataset.ids, dataset2.ids)
# Try specifying particular columns
dataset3 = dc.data.Dataset.from_dataframe(df,
X=['X2', 'X4'],
y='w',
w=['y', 'X1'])
np.testing.assert_array_equal(dataset.X[:, (1, 3)], dataset3.X)
np.testing.assert_array_equal(dataset.w, dataset3.y)
np.testing.assert_array_equal(
np.stack([dataset.y[:, 0], dataset.X[:, 0]], axis=1), dataset3.w)
x = np.array([np.random.randn(2, 3), np.random.randn(2, 3)])
dataset = dc.data.NumpyDataset(X=x, y=np.array([[1], [2]]))
with pytest.raises(AssertionError):
dataset.to_csv(csv_path)
def test_to_str():
"""Tests to string representation of Dataset."""
dataset = dc.data.NumpyDataset(X=np.random.rand(5, 3),
y=np.random.rand(5,),
ids=np.arange(5))
ref_str = '<NumpyDataset X.shape: (5, 3), y.shape: (5,), w.shape: (5,), ids: [0 1 2 3 4], task_names: [0]>'
assert str(dataset) == ref_str
# Test id shrinkage
dc.utils.set_print_threshold(10)
dataset = dc.data.NumpyDataset(X=np.random.rand(50, 3),
y=np.random.rand(50,),
ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50,), w.shape: (50,), ids: [0 1 2 ... 47 48 49], task_names: [0]>'
assert str(dataset) == ref_str
# Test task shrinkage
dataset = dc.data.NumpyDataset(X=np.random.rand(50, 3),
y=np.random.rand(50, 20),
ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50, 20), w.shape: (50, 1), ids: [0 1 2 ... 47 48 49], task_names: [ 0 1 2 ... 17 18 19]>'
assert str(dataset) == ref_str
# Test max print size
dc.utils.set_max_print_size(25)
dataset = dc.data.NumpyDataset(X=np.random.rand(50, 3),
y=np.random.rand(50,),
ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50,), w.shape: (50,), task_names: [0]>'
assert str(dataset) == ref_str
class TestDatasets(unittest.TestCase):
"""
Test basic top-level API for dataset objects.
"""
def test_numpy_iterate_batch_size(self):
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = dc.data.NumpyDataset.from_DiskDataset(
solubility_dataset)
batch_sizes = []
for X, y, _, _ in solubility_dataset.iterbatches(3,
epochs=2,
pad_batches=False,
deterministic=True):
batch_sizes.append(len(X))
self.assertEqual([3, 3, 3, 1, 3, 3, 3, 1], batch_sizes)
@pytest.mark.torch
def test_make_pytorch_dataset_from_numpy(self):
"""Test creating a PyTorch Dataset from a NumpyDataset."""
X = np.random.random((100, 5))
y = np.random.random((100, 1))
ids = [str(i) for i in range(100)]
dataset = dc.data.NumpyDataset(X, y, ids=ids)
_validate_pytorch_dataset(dataset)
@pytest.mark.torch
def test_make_pytorch_dataset_from_images(self):
"""Test creating a PyTorch Dataset from an ImageDataset."""
path = os.path.join(os.path.dirname(__file__), 'images')
files = [os.path.join(path, f) for f in os.listdir(path)]
y = np.random.random((10, 1))
ids = [str(i) for i in range(len(files))]
dataset = dc.data.ImageDataset(files, y, ids=ids)
_validate_pytorch_dataset(dataset)
@pytest.mark.torch
def test_make_pytorch_dataset_from_disk(self):
"""Test creating a PyTorch Dataset from a DiskDataset."""
dataset = load_solubility_data()
_validate_pytorch_dataset(dataset)
<file_sep>"""
Test atomic conv featurizer.
"""
import os
import logging
import numpy as np
from deepchem.feat import AtomicConvFeaturizer
logger = logging.getLogger(__name__)
def test_atomic_conv_featurization():
"""Unit test for AtomicConvFeaturizer."""
dir_path = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(dir_path, "data/3zso_ligand_hyd.pdb")
protein_file = os.path.join(dir_path, "data/3zso_protein_noH.pdb")
# Pulled from PDB files. For larger datasets with more PDBs, would use
# max num atoms instead of exact.
frag1_num_atoms = 44 # for ligand atoms
frag2_num_atoms = 2334 # for protein atoms
complex_num_atoms = 2378 # in total
max_num_neighbors = 4
# Cutoff in angstroms
neighbor_cutoff = 4
complex_featurizer = AtomicConvFeaturizer(frag1_num_atoms, frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
neighbor_cutoff)
(frag1_coords, frag1_neighbor_list, frag1_z, frag2_coords,
frag2_neighbor_list, frag2_z, complex_coords, complex_neighbor_list,
complex_z) = complex_featurizer._featurize((ligand_file, protein_file))
# Coords are padded, neighbor list and Z are not
assert frag1_coords.shape == (frag1_num_atoms, 3)
assert (sorted(list(frag1_neighbor_list.keys())) == list(
range(frag1_num_atoms)))
assert frag1_neighbor_list[0] == [1, 2, 14, 3]
assert frag1_z.shape == (frag1_num_atoms,)
assert np.array_equal(
frag1_z,
np.array([
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 8, 8, 8
]))
assert frag2_coords.shape == (frag2_num_atoms, 3)
assert (sorted(list(frag2_neighbor_list.keys())) == list(
range(frag2_num_atoms)))
assert frag2_neighbor_list[0] == [1, 2, 4, 3]
assert frag2_z.shape == (frag2_num_atoms,)
assert complex_coords.shape == (complex_num_atoms, 3)
assert (sorted(list(complex_neighbor_list.keys())) == list(
range(complex_num_atoms)))
assert complex_neighbor_list[0] == [1, 2, 14, 3]
assert (complex_z.shape == (complex_num_atoms,))
<file_sep>"""
Implementation of Smiles2Vec and ChemCeption models as part of the ChemNet
transfer learning protocol.
"""
__author__ = "<NAME>"
__license__ = "MIT"
import numpy as np
import tensorflow as tf
from typing import Dict
from deepchem.data.datasets import pad_batch
from deepchem.models import KerasModel
from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy, SigmoidCrossEntropy
from deepchem.metrics import to_one_hot
from deepchem.models import chemnet_layers
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Activation
from tensorflow.keras.layers import Conv1D, GRU, LSTM, Bidirectional
from tensorflow.keras.layers import GlobalAveragePooling2D
DEFAULT_INCEPTION_BLOCKS = {"A": 3, "B": 3, "C": 3}
INCEPTION_DICT = {
"A": chemnet_layers.InceptionResnetA,
"B": chemnet_layers.InceptionResnetB,
"C": chemnet_layers.InceptionResnetC
}
RNN_DICT = {"GRU": GRU, "LSTM": LSTM}
class Smiles2Vec(KerasModel):
"""
Implements the Smiles2Vec model, that learns neural representations of SMILES
strings which can be used for downstream tasks.
The model is based on the description in Goh et al., "SMILES2vec: An
Interpretable General-Purpose Deep Neural Network for Predicting Chemical
Properties" (https://arxiv.org/pdf/1712.02034.pdf). The goal here is to take
SMILES strings as inputs, turn them into vector representations which can then
be used in predicting molecular properties.
The model consists of an Embedding layer that retrieves embeddings for each
character in the SMILES string. These embeddings are learnt jointly with the
rest of the model. The output from the embedding layer is a tensor of shape
(batch_size, seq_len, embedding_dim). This tensor can optionally be fed
through a 1D convolutional layer, before being passed to a series of RNN cells
(optionally bidirectional). The final output from the RNN cells aims
to have learnt the temporal dependencies in the SMILES string, and in turn
information about the structure of the molecule, which is then used for
molecular property prediction.
In the paper, the authors also train an explanation mask to endow the model
with interpretability and gain insights into its decision making. This segment
is currently not a part of this implementation as this was
developed for the purpose of investigating a transfer learning protocol,
ChemNet (which can be found at https://arxiv.org/abs/1712.02734).
"""
def __init__(self,
char_to_idx,
n_tasks=10,
max_seq_len=270,
embedding_dim=50,
n_classes=2,
use_bidir=True,
use_conv=True,
filters=192,
kernel_size=3,
strides=1,
rnn_sizes=[224, 384],
rnn_types=["GRU", "GRU"],
mode="regression",
**kwargs):
"""
Parameters
----------
char_to_idx: dict,
char_to_idx contains character to index mapping for SMILES characters
embedding_dim: int, default 50
Size of character embeddings used.
use_bidir: bool, default True
Whether to use BiDirectional RNN Cells
use_conv: bool, default True
Whether to use a conv-layer
kernel_size: int, default 3
Kernel size for convolutions
filters: int, default 192
Number of filters
strides: int, default 1
Strides used in convolution
rnn_sizes: list[int], default [224, 384]
Number of hidden units in the RNN cells
mode: str, default regression
Whether to use model for regression or classification
"""
self.char_to_idx = char_to_idx
self.n_classes = n_classes
self.max_seq_len = max_seq_len
self.embedding_dim = embedding_dim
self.use_bidir = use_bidir
self.use_conv = use_conv
if use_conv:
self.kernel_size = kernel_size
self.filters = filters
self.strides = strides
self.rnn_types = rnn_types
self.rnn_sizes = rnn_sizes
assert len(rnn_sizes) == len(
rnn_types), "Should have same number of hidden units as RNNs"
self.n_tasks = n_tasks
self.mode = mode
model, loss, output_types = self._build_graph()
super(Smiles2Vec, self).__init__(model=model,
loss=loss,
output_types=output_types,
**kwargs)
def _build_graph(self):
"""Build the model."""
smiles_seqs = Input(dtype=tf.int32,
shape=(self.max_seq_len,),
name='Input')
rnn_input = tf.keras.layers.Embedding(
input_dim=len(self.char_to_idx),
output_dim=self.embedding_dim)(smiles_seqs)
if self.use_conv:
rnn_input = Conv1D(filters=self.filters,
kernel_size=self.kernel_size,
strides=self.strides,
activation=tf.nn.relu,
name='Conv1D')(rnn_input)
rnn_embeddings = rnn_input
for idx, rnn_type in enumerate(self.rnn_types[:-1]):
rnn_layer = RNN_DICT[rnn_type]
layer = rnn_layer(units=self.rnn_sizes[idx], return_sequences=True)
if self.use_bidir:
layer = Bidirectional(layer)
rnn_embeddings = layer(rnn_embeddings)
# Last layer sequences not returned.
layer = RNN_DICT[self.rnn_types[-1]](units=self.rnn_sizes[-1])
if self.use_bidir:
layer = Bidirectional(layer)
rnn_embeddings = layer(rnn_embeddings)
if self.mode == "classification":
logits = Dense(self.n_tasks * self.n_classes)(rnn_embeddings)
logits = Reshape((self.n_tasks, self.n_classes))(logits)
if self.n_classes == 2:
output = Activation(activation='sigmoid')(logits)
loss = SigmoidCrossEntropy()
else:
output = Softmax()(logits)
loss = SoftmaxCrossEntropy()
outputs = [output, logits]
output_types = ['prediction', 'loss']
else:
output = Dense(self.n_tasks * 1, name='Dense')(rnn_embeddings)
output = Reshape((self.n_tasks, 1), name='Reshape')(output)
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(inputs=[smiles_seqs], outputs=outputs)
return model, loss, output_types
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if self.mode == 'classification':
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
yield ([X_b], [y_b], [w_b])
class ChemCeption(KerasModel):
"""
Implements the ChemCeption model that leverages the representational capacities
of convolutional neural networks (CNNs) to predict molecular properties.
The model is based on the description in Goh et al., "Chemception: A Deep
Neural Network with Minimal Chemistry Knowledge Matches the Performance of
Expert-developed QSAR/QSPR Models" (https://arxiv.org/pdf/1706.06689.pdf).
The authors use an image based representation of the molecule, where pixels
encode different atomic and bond properties. More details on the image repres-
entations can be found at https://arxiv.org/abs/1710.02238
The model consists of a Stem Layer that reduces the image resolution for the
layers to follow. The output of the Stem Layer is followed by a series of
Inception-Resnet blocks & a Reduction layer. Layers in the Inception-Resnet
blocks process image tensors at multiple resolutions and use a ResNet style
skip-connection, combining features from different resolutions. The Reduction
layers reduce the spatial extent of the image by max-pooling and 2-strided
convolutions. More details on these layers can be found in the ChemCeption
paper referenced above. The output of the final Reduction layer is subject to
a Global Average Pooling, and a fully-connected layer maps the features to
downstream outputs.
In the ChemCeption paper, the authors perform real-time image augmentation by
rotating images between 0 to 180 degrees. This can be done during model
training by setting the augment argument to True.
"""
def __init__(self,
img_spec: str = "std",
img_size: int = 80,
base_filters: int = 16,
inception_blocks: Dict = DEFAULT_INCEPTION_BLOCKS,
n_tasks: int = 10,
n_classes: int = 2,
augment: bool = False,
mode: str = "regression",
**kwargs):
"""
Parameters
----------
img_spec: str, default std
Image specification used
img_size: int, default 80
Image size used
base_filters: int, default 16
Base filters used for the different inception and reduction layers
inception_blocks: dict,
Dictionary containing number of blocks for every inception layer
n_tasks: int, default 10
Number of classification or regression tasks
n_classes: int, default 2
Number of classes (used only for classification)
augment: bool, default False
Whether to augment images
mode: str, default regression
Whether the model is used for regression or classification
"""
if img_spec == "engd":
self.input_shape = (img_size, img_size, 4)
elif img_spec == "std":
self.input_shape = (img_size, img_size, 1)
self.base_filters = base_filters
self.inception_blocks = inception_blocks
self.n_tasks = n_tasks
self.n_classes = n_classes
self.mode = mode
self.augment = augment
model, loss, output_types = self._build_graph()
super(ChemCeption, self).__init__(model=model,
loss=loss,
output_types=output_types,
**kwargs)
def _build_graph(self):
smile_images = Input(shape=self.input_shape)
stem = chemnet_layers.Stem(self.base_filters)(smile_images)
inceptionA_out = self.build_inception_module(inputs=stem, type="A")
reductionA_out = chemnet_layers.ReductionA(
self.base_filters)(inceptionA_out)
inceptionB_out = self.build_inception_module(inputs=reductionA_out,
type="B")
reductionB_out = chemnet_layers.ReductionB(
self.base_filters)(inceptionB_out)
inceptionC_out = self.build_inception_module(inputs=reductionB_out,
type="C")
avg_pooling_out = GlobalAveragePooling2D()(inceptionC_out)
if self.mode == "classification":
logits = Dense(self.n_tasks * self.n_classes)(avg_pooling_out)
logits = Reshape((self.n_tasks, self.n_classes))(logits)
if self.n_classes == 2:
output = Activation(activation='sigmoid')(logits)
loss = SigmoidCrossEntropy()
else:
output = Softmax()(logits)
loss = SoftmaxCrossEntropy()
outputs = [output, logits]
output_types = ['prediction', 'loss']
else:
output = Dense(self.n_tasks * 1)(avg_pooling_out)
output = Reshape((self.n_tasks, 1))(output)
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(inputs=[smile_images], outputs=outputs)
return model, loss, output_types
def build_inception_module(self, inputs, type="A"):
"""Inception module is a series of inception layers of similar type. This
function builds that."""
num_blocks = self.inception_blocks[type]
inception_layer = INCEPTION_DICT[type]
output = inputs
for block in range(num_blocks):
output = inception_layer(self.base_filters,
int(inputs.shape[-1]))(output)
return output
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
if mode == "predict" or (not self.augment):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if self.mode == 'classification':
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
yield ([X_b], [y_b], [w_b])
else:
if not pad_batches:
n_samples = dataset.X.shape[0]
else:
n_samples = dataset.X.shape[0] + (
self.batch_size -
(dataset.X.shape[0] % self.batch_size))
n_batches = 0
image_data_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=180)
for (X_b, y_b, w_b) in image_data_generator.flow(
dataset.X,
dataset.y,
sample_weight=dataset.w,
shuffle=not deterministic,
batch_size=self.batch_size):
if pad_batches:
ids_b = np.arange(X_b.shape[0])
X_b, y_b, w_b, _ = pad_batch(self.batch_size, X_b, y_b,
w_b, ids_b)
n_batches += 1
if n_batches > n_samples / self.batch_size:
# This is needed because ImageDataGenerator does infinite looping
break
if self.mode == "classification":
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
yield ([X_b], [y_b], [w_b])
<file_sep>import deepchem as dc
import tempfile
import numpy as np
import os
def test_copy():
"""Test that copy works correctly."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
# legacy_dataset_reshard is a shared dataset in the legacy format kept
# around for testing resharding.
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Set cache to 0 size to avoid cache hiding errors
dataset.memory_cache_size = 0
with tempfile.TemporaryDirectory() as tmpdirname:
copy = dataset.copy(tmpdirname)
assert np.all(copy.X == dataset.X)
assert np.all(copy.y == dataset.y)
assert np.all(copy.w == dataset.w)
assert np.all(copy.ids == dataset.ids)
def test_move():
"""Test that move works correctly."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
# legacy_dataset_reshard is a shared dataset in the legacy format kept
# around for testing resharding.
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Set cache to 0 size to avoid cache hiding errors
dataset.memory_cache_size = 0
data_dir = dataset.data_dir
with tempfile.TemporaryDirectory() as tmpdirname:
dataset.move(tmpdirname, delete_if_exists=False)
assert np.all(X == dataset.X)
assert np.all(y == dataset.y)
assert np.all(w == dataset.w)
assert np.all(ids == dataset.ids)
assert dataset.data_dir == os.path.join(tmpdirname,
os.path.basename(data_dir))
<file_sep>import os
import deepchem as dc
def test_charge_voxelizer():
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, 'data',
'3ws9_protein_fixer_rdkit.pdb')
ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
cutoff = 4.5
box_width = 20
voxel_width = 1.0
voxelizer = dc.feat.ChargeVoxelizer(cutoff=cutoff,
box_width=box_width,
voxel_width=voxel_width)
features = voxelizer.featurize([(ligand_file, protein_file)])
assert features.shape == (1, box_width, box_width, box_width, 1)
def test_salt_bridge_voxelizer():
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, 'data',
'3ws9_protein_fixer_rdkit.pdb')
ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
cutoff = 4.5
box_width = 20
voxel_width = 1.0
voxelizer = dc.feat.SaltBridgeVoxelizer(cutoff=cutoff,
box_width=box_width,
voxel_width=voxel_width)
features = voxelizer.featurize([(ligand_file, protein_file)])
assert features.shape == (1, box_width, box_width, box_width, 1)
def test_cation_pi_voxelizer():
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, 'data',
'3ws9_protein_fixer_rdkit.pdb')
ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
cutoff = 4.5
box_width = 20
voxel_width = 1.0
voxelizer = dc.feat.CationPiVoxelizer(cutoff=cutoff,
box_width=box_width,
voxel_width=voxel_width)
features = voxelizer.featurize([(ligand_file, protein_file)])
assert features.shape == (1, box_width, box_width, box_width, 1)
def test_pi_stack_voxelizer():
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, 'data',
'3ws9_protein_fixer_rdkit.pdb')
ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
cutoff = 4.5
box_width = 20
voxel_width = 1.0
voxelizer = dc.feat.PiStackVoxelizer(cutoff=cutoff,
box_width=box_width,
voxel_width=voxel_width)
features = voxelizer.featurize([(ligand_file, protein_file)])
assert features.shape == (1, box_width, box_width, box_width, 2)
# # TODO: This is failing, something about the hydrogen bond counting?
# def test_hydrogen_bond_counter():
# current_dir = os.path.dirname(os.path.realpath(__file__))
# protein_file = os.path.join(current_dir, 'data',
# '3ws9_protein_fixer_rdkit.pdb')
# ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
#
# cutoff = 4.5
# featurizer = dc.feat.HydrogenBondCounter(cutoff=cutoff)
# features, failures = featurizer.featurize([ligand_file], [protein_file])
# # TODO: Add shape test
#
#
# # TODO: This is failing, something about the hydrogen bond counting?
# def test_hydrogen_bond_voxelizer():
# current_dir = os.path.dirname(os.path.realpath(__file__))
# protein_file = os.path.join(current_dir, 'data',
# '3ws9_protein_fixer_rdkit.pdb')
# ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
#
# cutoff = 4.5
# box_width = 16
# voxel_width = 1.0
# voxelizer = dc.feat.HydrogenBondVoxelizer(
# cutoff=cutoff, box_width=box_width, voxel_width=voxel_width)
# features, failures = voxelizer.featurize([ligand_file], [protein_file])
# # TODO: Add shape test
<file_sep>"""
Script that trains RF model on KINASE datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import tempfile
import shutil
import deepchem as dc
from sklearn.ensemble import RandomForestRegressor
from KINASE_datasets import load_kinase
###Load data###
np.random.seed(123)
shard_size = 2000
#num_trials = 5
num_trials = 1
print("About to load KINASE data.")
KINASE_tasks, datasets, transformers = load_kinase(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
num_features = train_dataset.get_data_shape()[0]
print("Num features: %d" % num_features)
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
def task_model_builder(model_dir):
sklearn_model = RandomForestRegressor(
n_estimators=100, max_features=int(num_features/3),
min_samples_split=5, n_jobs=-1)
return dc.models.SklearnModel(sklearn_model, model_dir)
all_results = []
for trial in range(num_trials):
print("Starting trial %d" % trial)
model = dc.models.SingletaskToMultitask(KINASE_tasks, task_model_builder)
print("Training model")
model.fit(train_dataset)
print("Evaluating models")
train_score, train_task_scores = model.evaluate(
train_dataset, [metric], transformers, per_task_metrics=True)
valid_score, valid_task_scores = model.evaluate(
valid_dataset, [metric], transformers, per_task_metrics=True)
test_score, test_task_scores = model.evaluate(
test_dataset, [metric], transformers, per_task_metrics=True)
all_results.append((train_score, train_task_scores,
valid_score, valid_task_scores,
test_score, test_task_scores))
print("----------------------------------------------------------------")
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
print("####################################################################")
for trial in range(num_trials):
(train_score, train_task_scores, valid_score, valid_task_scores,
test_score, test_task_scores) = all_results[trial]
print("----------------------------------------------------------------")
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
<file_sep>from deepchem.models.torch_models.hf_models import HuggingFaceModel
from transformers.models.roberta.modeling_roberta import (
RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification)
from transformers.models.roberta.tokenization_roberta_fast import \
RobertaTokenizerFast
from transformers.modeling_utils import PreTrainedModel
class Chemberta(HuggingFaceModel):
"""Chemberta Model
Chemberta is a transformer style model for learning on SMILES strings.
The model architecture is based on the RoBERTa architecture. The model
has can be used for both pretraining an embedding and finetuning for
downstream applications.
The model supports two types of pretraining tasks - pretraining via masked language
modeling and pretraining via multi-task regression. To pretrain via masked language
modeling task, use task = `mlm` and for pretraining via multitask regression task,
use task = `mtr`. The model supports the regression, classification and multitask
regression finetuning tasks and they can be specified using `regression`, `classification`
and `mtr` as arguments to the `task` keyword during model initialisation.
The model uses a tokenizer To create input tokens for the models from the SMILES strings.
The default tokenizer model is a byte-pair encoding tokenizer trained on PubChem10M dataset
and loaded from huggingFace model hub (https://huggingface.co/seyonec/PubChem10M_SMILES_BPE_60k).
Parameters
----------
task: str
The task defines the type of learning task in the model. The supported tasks are
- `mlm` - masked language modeling commonly used in pretraining
- `mtr` - multitask regression - a task used for both pretraining base models and finetuning
- `regression` - use it for regression tasks, like property prediction
- `classification` - use it for classification tasks
tokenizer_path: str
Path containing pretrained tokenizer used to tokenize SMILES string for model inputs. The tokenizer path can either be a huggingFace tokenizer model or a path in the local machine containing the tokenizer.
n_tasks: int, default 1
Number of prediction targets for a multitask learning model
Example
-------
>>> import os
>>> import tempfile
>>> tempdir = tempfile.mkdtemp()
>>> # preparing dataset
>>> import pandas as pd
>>> import deepchem as dc
>>> smiles = ["CCN(CCSC)C(=O)N[C@@](C)(CC)C(F)(F)F","CC1(C)CN(C(=O)Nc2cc3ccccc3nn2)C[C@@]2(CCOC2)O1"]
>>> labels = [3.112,2.432]
>>> df = pd.DataFrame(list(zip(smiles, labels)), columns=["smiles", "task1"])
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_csv(tmpfile.name)
... loader = dc.data.CSVLoader(["task1"], feature_field="smiles", featurizer=dc.feat.DummyFeaturizer())
... dataset = loader.create_dataset(tmpfile.name)
>>> # pretraining
>>> from deepchem.models.torch_models.chemberta import Chemberta
>>> pretrain_model_dir = os.path.join(tempdir, 'pretrain-model')
>>> tokenizer_path = "seyonec/PubChem10M_SMILES_BPE_60k"
>>> pretrain_model = Chemberta(task='mlm', model_dir=pretrain_model_dir, tokenizer_path=tokenizer_path) # mlm pretraining
>>> pretraining_loss = pretrain_model.fit(dataset, nb_epoch=1)
>>> # finetuning in regression mode
>>> finetune_model_dir = os.path.join(tempdir, 'finetune-model')
>>> finetune_model = Chemberta(task='regression', model_dir=finetune_model_dir, tokenizer_path=tokenizer_path)
>>> finetune_model.load_from_pretrained(pretrain_model_dir)
>>> finetuning_loss = finetune_model.fit(dataset, nb_epoch=1)
>>> # prediction and evaluation
>>> result = finetune_model.predict(dataset)
>>> eval_results = finetune_model.evaluate(dataset, metrics=dc.metrics.Metric(dc.metrics.mae_score))
Reference
---------
.. <NAME>., <NAME>., & <NAME>. (2020). Chemberta: Large-scale self-supervised pretraining for molecular property prediction. arXiv preprint arXiv:2010.09885.
.. <NAME>, et al. "Chemberta-2: Towards chemical foundation models." arXiv preprint arXiv:2209.01712 (2022).
"""
def __init__(self,
task: str,
tokenizer_path: str = 'seyonec/PubChem10M_SMILES_BPE_60k',
n_tasks: int = 1,
**kwargs):
self.n_tasks = n_tasks
tokenizer = RobertaTokenizerFast.from_pretrained(tokenizer_path)
model: PreTrainedModel
chemberta_config = RobertaConfig(vocab_size=tokenizer.vocab_size)
if task == 'mlm':
model = RobertaForMaskedLM(chemberta_config)
elif task == 'mtr':
chemberta_config.problem_type = 'regression'
chemberta_config.num_labels = n_tasks
model = RobertaForSequenceClassification(chemberta_config)
elif task == 'regression':
chemberta_config.problem_type = 'regression'
chemberta_config.num_labels = n_tasks
model = RobertaForSequenceClassification(chemberta_config)
elif task == 'classification':
if n_tasks == 1:
chemberta_config.problem_type = 'single_label_classification'
else:
chemberta_config.problem_type = 'multi_label_classification'
model = RobertaForSequenceClassification(chemberta_config)
else:
raise ValueError('invalid task specification')
super(Chemberta, self).__init__(model=model,
task=task,
tokenizer=tokenizer,
**kwargs)
<file_sep>from rdkit import Chem
import numpy as np
import logging
from typing import List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.feat.graph_data import GraphData
logger = logging.getLogger(__name__)
ATOM_TYPES: dict = {'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4}
def atom_features(datapoint: RDKitMol) -> np.ndarray:
for atom in datapoint.GetAtoms():
if atom.GetSymbol() not in ATOM_TYPES.keys():
raise Exception(
"We only support 'H', 'C', 'N', 'O' and 'F' at this point for MXMNet Model"
)
return np.asarray(
[[ATOM_TYPES[atom.GetSymbol()]] for atom in datapoint.GetAtoms()],
dtype=float)
class MXMNetFeaturizer(MolecularFeaturizer):
"""This class is a featurizer for Multiplex Molecular Graph Neural Network (MXMNet) implementation.
The atomic numbers(indices) of atoms will be used later to generate randomly initialized trainable embeddings to be the input node embeddings.
This featurizer is based on
`Molecular Mechanics-Driven Graph Neural Network with Multiplex Graph for Molecular Structures <https://arxiv.org/pdf/2011.07457.pdf>`_.
Examples
--------
>>> smiles = ["C1=CC=CN=C1", "C1CCC1"]
>>> featurizer = MXMNetFeaturizer()
>>> out = featurizer.featurize(smiles)
>>> type(out[0])
<class 'deepchem.feat.graph_data.GraphData'>
>>> out[0].num_nodes
6
>>> out[0].num_node_features
1
>>> out[0].node_features.shape
(6, 1)
>>> out[0].num_edges
12
Note
----
We are not explitly handling hydrogen atoms for now. We only support 'H', 'C', 'N', 'O' and 'F' atoms to be present in the smiles at this point for MXMNet Model.
"""
def __init__(self, is_adding_hs: bool = False):
"""
Parameters
----------
is_adding_hs: bool, default False
Whether to add Hs or not.
"""
self.is_adding_hs = is_adding_hs
super().__init__()
def _construct_bond_index(self, datapoint: RDKitMol) -> np.ndarray:
"""
Construct edge (bond) index
Parameters
----------
datapoint: RDKitMol
RDKit mol object.
Returns
-------
edge_index: np.ndarray
Edge (Bond) index
"""
# row, col = edge_index
src: List[int] = []
dest: List[int] = []
for bond in datapoint.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
src += [start, end]
dest += [end, start]
return np.asarray([src, dest], dtype=int)
def _featurize(self, datapoint: RDKitMol, **kwargs) -> GraphData:
"""Calculate molecule graph features from RDKit mol object.
Parameters
----------
datapoint: RDKitMol
RDKit mol object.
Returns
-------
graph: GraphData
A molecule graph object with features:
- node_features: np.ndarray
Node feature matrix with shape [num_nodes, num_node_features]
- edge_index: np.ndarray, dtype int
Graph connectivity in COO format with shape [2, num_edges]
- node_pos_features: np.ndarray, optional (default None)
Node position matrix with shape [num_nodes, num_dimensions].
"""
if isinstance(datapoint, Chem.rdchem.Mol):
if self.is_adding_hs:
datapoint = Chem.AddHs(datapoint)
else:
raise ValueError(
"Feature field should contain smiles for MXMNet featurizer!")
pos: List = []
pos_x: np.ndarray
pos_y: np.ndarray
pos_z: np.ndarray
# load_sdf_files returns pos as strings but user can also specify
# numpy arrays for atom coordinates
if 'pos_x' in kwargs and 'pos_y' in kwargs and 'pos_z' in kwargs:
if isinstance(kwargs['pos_x'], str):
pos_x = eval(kwargs['pos_x'])
elif isinstance(kwargs['pos_x'], np.ndarray):
pos_x = kwargs['pos_x']
if isinstance(kwargs['pos_y'], str):
pos_y = eval(kwargs['pos_y'])
elif isinstance(kwargs['pos_y'], np.ndarray):
pos_y = kwargs['pos_y']
if isinstance(kwargs['pos_z'], str):
pos_z = eval(kwargs['pos_z'])
elif isinstance(kwargs['pos_z'], np.ndarray):
pos_z = kwargs['pos_z']
for x, y, z in zip(pos_x, pos_y, pos_z):
pos.append([x, y, z])
node_pos_features: Optional[np.ndarray] = np.asarray(pos)
else:
node_pos_features = None
# get atom features
f_atoms: np.ndarray = atom_features(datapoint)
# get edge index
edge_index: np.ndarray = self._construct_bond_index(datapoint)
return GraphData(node_features=f_atoms,
edge_index=edge_index,
node_pos_features=node_pos_features)
<file_sep>import pytest
@pytest.mark.torch
def test_featurize():
"""Test that RxnFeaturizer.featurize() correctly featurizes the reactions,
correctly outputs the input_ids and attention_mask.
"""
from transformers import RobertaTokenizerFast
from deepchem.feat.reaction_featurizer import RxnFeaturizer
tokenizer = RobertaTokenizerFast.from_pretrained(
"seyonec/PubChem10M_SMILES_BPE_450k")
featurizer = RxnFeaturizer(tokenizer, sep_reagent=True)
reaction = ['CCS(=O)(=O)Cl.OCCBr>CCN(CC)CC.CCOCC>CCS(=O)(=O)OCCBr']
feats = featurizer.featurize(reaction)
assert (feats.shape == (1, 2, 2, 1))
@pytest.mark.torch
def test_separation():
"""Tests the reagent separation feature after tokenizing the reactions.
The tokenized reaction is decoded before testing for equality, to make the
test more readable.
"""
from transformers import RobertaTokenizerFast
from deepchem.feat.reaction_featurizer import RxnFeaturizer
tokenizer = RobertaTokenizerFast.from_pretrained(
"seyonec/PubChem10M_SMILES_BPE_450k")
featurizer_mix = RxnFeaturizer(tokenizer, sep_reagent=False)
featurizer_sep = RxnFeaturizer(tokenizer, sep_reagent=True)
reaction = ['CCS(=O)(=O)Cl.OCCBr>CCN(CC)CC.CCOCC>CCS(=O)(=O)OCCBr']
feats_mix = featurizer_mix.featurize(reaction)
feats_sep = featurizer_sep.featurize(reaction)
# decode the source in the mixed and separated cases
mix_decoded = tokenizer.decode(feats_mix[0][0][0][0])
sep_decoded = tokenizer.decode(feats_sep[0][0][0][0])
assert mix_decoded == '<s>CCS(=O)(=O)Cl.OCCBr.CCN(CC)CC.CCOCC></s>'
assert sep_decoded == '<s>CCS(=O)(=O)Cl.OCCBr>CCN(CC)CC.CCOCC</s>'
<file_sep>"""DeepMHC model, found in https://www.biorxiv.org/content/early/2017/12/24/239236"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__license__ = "MIT"
import numpy as np
import tensorflow as tf
from deepchem.data import NumpyDataset
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
from deepchem.models.tensorgraph.layers import Conv1D, MaxPool1D, Dense, Dropout
from deepchem.models.tensorgraph.layers import Flatten
from deepchem.models.tensorgraph.layers import Feature, Weights, Label
from deepchem.models.tensorgraph.layers import L2Loss, WeightedError
class DeepMHC(TensorGraph):
name = ['DeepMHC']
def __init__(self,
batch_size=64,
pad_length=13,
dropout_p=0.5,
num_amino_acids=20,
mode="regression",
**kwargs):
assert mode in ["regression", "classification"]
self.mode = mode
self.batch_size = batch_size
self.dropout_p = dropout_p
self.pad_length = pad_length
self.num_amino_acids = num_amino_acids
super(DeepMHC, self).__init__(**kwargs)
self._build_graph()
def _build_graph(self):
self.one_hot_seq = Feature(
shape=(None, self.pad_length, self.num_amino_acids), dtype=tf.float32)
conv1 = Conv1D(kernel_size=2, filters=512, in_layers=[self.one_hot_seq])
maxpool1 = MaxPool1D(strides=2, padding="VALID", in_layers=[conv1])
conv2 = Conv1D(kernel_size=3, filters=512, in_layers=[maxpool1])
flattened = Flatten(in_layers=[conv2])
dense1 = Dense(
out_channels=400, in_layers=[flattened], activation_fn=tf.nn.tanh)
dropout = Dropout(dropout_prob=self.dropout_p, in_layers=[dense1])
output = Dense(out_channels=1, in_layers=[dropout], activation_fn=None)
self.add_output(output)
if self.mode == "regression":
label = Label(shape=(None, 1))
loss = L2Loss(in_layers=[label, output])
else:
raise NotImplementedError(
"Classification support not added yet. Missing details in paper.")
weights = Weights(shape=(None,))
weighted_loss = WeightedError(in_layers=[loss, weights])
self.set_loss(weighted_loss)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size):
feed_dict = {}
feed_dict[self.one_hot_seq] = X_b
if y_b is not None:
feed_dict[self.labels[0]] = -np.log10(y_b)
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
def predict_on_batch(self, X, transformers=[], outputs=None):
dataset = NumpyDataset(X, y=None)
generator = self.default_generator(dataset, predict=True, pad_batches=False)
preds = self.predict_on_generator(generator, transformers, outputs)
preds = 10**-preds # Since we get train on -log10(IC50)
return preds
def create_estimator_inputs(self, feature_columns, weight_column, features,
labels, mode):
tensors = dict()
for layer, column in zip(self.features, feature_columns):
feature_column = tf.feature_column.input_layer(features, [column])
if feature_column.dtype != column.dtype:
feature_column = tf.cast(feature_column, column.dtype)
tensors[layer] = feature_column
if weight_column is not None:
tensors[self.task_weights[0]] = tf.feature_column.input_layer(
features, [weight_column])
if labels is not None:
tensors[self.labels[[0]]] = labels
return tensors
<file_sep>import deepchem as dc
import numpy as np
import tensorflow as tf
from deepchem.models.losses import L2Loss
from tensorflow.keras.layers import Input, Dense
class MLP(dc.models.KerasModel):
def __init__(self, n_tasks=1, feature_dim=100, hidden_layer_size=64,
**kwargs):
self.feature_dim = feature_dim
self.hidden_layer_size = hidden_layer_size
self.n_tasks = n_tasks
model, loss, output_types = self._build_graph()
super(MLP, self).__init__(
model=model, loss=loss, output_types=output_types, **kwargs)
def _build_graph(self):
inputs = Input(dtype=tf.float32, shape=(self.feature_dim,), name="Input")
out1 = Dense(units=self.hidden_layer_size, activation='relu')(inputs)
final = Dense(units=self.n_tasks, activation='sigmoid')(out1)
outputs = [final]
output_types = ['prediction']
loss = dc.models.losses.BinaryCrossEntropy()
model = tf.keras.Model(inputs=[inputs], outputs=outputs)
return model, loss, output_types
X_1 = np.random.randn(100, 32)
y_1 = np.random.randn(100, 100)
dataset_1 = dc.data.NumpyDataset(X_1, y_1)
X_2 = np.random.randn(100, 32)
y_2 = np.random.randn(100, 10)
dataset_2 = dc.data.NumpyDataset(X_2, y_2)
source_model = MLP(feature_dim=32, hidden_layer_size=100, n_tasks=100)
source_model.fit(dataset_1, nb_epoch=100)
dest_model = MLP(feature_dim=32, hidden_layer_size=100, n_tasks=10)
dest_model.load_from_pretrained(
source_model=source_model,
assignment_map=None,
value_map=None,
model_dir=None,
include_top=False)
dest_model.fit(dataset_2, nb_epoch=100)
<file_sep>#!/usr/bin/python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for graph convolution models."""
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import math_ops
from google.protobuf import text_format
from tensorflow.python.platform import gfile
from tensorflow.python.training import checkpoint_state_pb2
def ParseCheckpoint(checkpoint):
"""Parse a checkpoint file.
Args:
checkpoint: Path to checkpoint. The checkpoint is either a serialized
CheckpointState proto or an actual checkpoint file.
Returns:
The path to an actual checkpoint file.
"""
warnings.warn(
"ParseCheckpoint is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
with open(checkpoint) as f:
try:
cp = checkpoint_state_pb2.CheckpointState()
text_format.Merge(f.read(), cp)
return cp.model_checkpoint_path
except text_format.ParseError:
return checkpoint
def Mask(t, mask):
"""Apply a mask to a tensor.
If not None, mask should be a t.shape[:-1] tensor of 0,1 values.
Args:
t: Input tensor.
mask: Boolean mask with shape == t.shape[:-1]. If None, nothing happens.
Returns:
A tensor with the same shape as the input tensor.
Raises:
ValueError: If shapes do not match.
"""
warnings.warn("Mask is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
if mask is None:
return t
if not t.get_shape()[:-1].is_compatible_with(mask.get_shape()):
raise ValueError(
'Shapes do not match: %s vs. %s' % (t.get_shape(), mask.get_shape()))
return tf.multiply(t, tf.expand_dims(mask, -1))
def Mean(tensor, reduction_indices=None, mask=None):
"""Compute mean using Sum and Mul for better GPU performance.
See tf.nn.moments for additional notes on this approach.
Args:
tensor: Input tensor.
reduction_indices: Axes to reduce across. If None, reduce to a scalar.
mask: Mask to apply to tensor.
Returns:
A tensor with the same type as the input tensor.
"""
warnings.warn("Mean is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
return Moment(
1,
tensor,
standardize=False,
reduction_indices=reduction_indices,
mask=mask)[0]
def Variance(tensor, reduction_indices=None, mask=None):
"""Compute variance.
Args:
tensor: Input tensor.
reduction_indices: Axes to reduce across. If None, reduce to a scalar.
mask: Mask to apply to tensor.
Returns:
A tensor with the same type as the input tensor.
"""
warnings.warn("Variance is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
return Moment(
2,
tensor,
standardize=False,
reduction_indices=reduction_indices,
mask=mask)[1]
def Skewness(tensor, reduction_indices=None):
"""Compute skewness, the third standardized moment.
Args:
tensor: Input tensor.
reduction_indices: Axes to reduce across. If None, reduce to a scalar.
Returns:
A tensor with the same type as the input tensor.
"""
warnings.warn("Skewness is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
return Moment(
3, tensor, standardize=True, reduction_indices=reduction_indices)[1]
def Kurtosis(tensor, reduction_indices=None):
"""Compute kurtosis, the fourth standardized moment minus three.
Args:
tensor: Input tensor.
reduction_indices: Axes to reduce across. If None, reduce to a scalar.
Returns:
A tensor with the same type as the input tensor.
"""
warnings.warn("Kurtosis is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
return Moment(
4, tensor, standardize=True, reduction_indices=reduction_indices)[1] - 3
def Moment(k, tensor, standardize=False, reduction_indices=None, mask=None):
"""Compute the k-th central moment of a tensor, possibly standardized.
Args:
k: Which moment to compute. 1 = mean, 2 = variance, etc.
tensor: Input tensor.
standardize: If True, returns the standardized moment, i.e. the central
moment divided by the n-th power of the standard deviation.
reduction_indices: Axes to reduce across. If None, reduce to a scalar.
mask: Mask to apply to tensor.
Returns:
The mean and the requested moment.
"""
warnings.warn("Moment is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
if reduction_indices is not None:
reduction_indices = np.atleast_1d(reduction_indices).tolist()
# get the divisor
if mask is not None:
tensor = Mask(tensor, mask)
ones = tf.constant(1, dtype=tf.float32, shape=tensor.get_shape())
divisor = tf.reduce_sum(
Mask(ones, mask), axis=reduction_indices, keep_dims=True)
elif reduction_indices is None:
divisor = tf.constant(np.prod(tensor.get_shape().as_list()), tensor.dtype)
else:
divisor = 1.0
for i in range(len(tensor.get_shape())):
if i in reduction_indices:
divisor *= tensor.get_shape()[i].value
divisor = tf.constant(divisor, tensor.dtype)
# compute the requested central moment
# note that mean is a raw moment, not a central moment
mean = tf.math.divide(
tf.reduce_sum(tensor, axis=reduction_indices, keep_dims=True), divisor)
delta = tensor - mean
if mask is not None:
delta = Mask(delta, mask)
moment = tf.math.divide(
tf.reduce_sum(
math_ops.pow(delta, k), axis=reduction_indices, keep_dims=True),
divisor)
moment = tf.squeeze(moment, reduction_indices)
if standardize:
moment = tf.multiply(
moment,
math_ops.pow(
tf.rsqrt(Moment(2, tensor, reduction_indices=reduction_indices)[1]),
k))
return tf.squeeze(mean, reduction_indices), moment
def StringToOp(string):
"""Get a TensorFlow op from a string.
Args:
string: String description of an op, such as 'sum' or 'mean'.
Returns:
A TensorFlow op.
Raises:
NotImplementedError: If string does not match a supported operation.
"""
warnings.warn("StringToOp is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
# TODO(user): median is not implemented yet in TensorFlow
op_map = {
'max': tf.reduce_max,
'mean': Mean,
'min': tf.reduce_min,
'sum': tf.reduce_sum,
'variance': Variance,
'skewness': Skewness,
'kurtosis': Kurtosis,
}
try:
return op_map[string]
except KeyError:
raise NotImplementedError('Unrecognized op: %s' % string)
<file_sep>"""
Testing singletask-to-multitask.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tempfile
import shutil
import unittest
import numpy as np
import deepchem as dc
class TestSingletasktoMultitask(unittest.TestCase):
"""
Test top-level API for singletask_to_multitask ML models.
"""
# def test_singletask_to_multitask_classification(self):
# n_features = 10
# n_tasks = 17
# tasks = range(n_tasks)
# # Define train dataset
# n_train = 100
# X_train = np.random.rand(n_train, n_features)
# y_train = np.random.randint(2, size=(n_train, n_tasks))
# w_train = np.ones_like(y_train)
# ids_train = ["C"] * n_train
# train_dataset = dc.data.DiskDataset.from_numpy(
# X_train, y_train, w_train, ids_train)
# # Define test dataset
# n_test = 10
# X_test = np.random.rand(n_test, n_features)
# y_test = np.random.randint(2, size=(n_test, n_tasks))
# w_test = np.ones_like(y_test)
# ids_test = ["C"] * n_test
# test_dataset = dc.data.DiskDataset.from_numpy(
# X_test, y_test, w_test, ids_test)
# classification_metrics = [dc.metrics.Metric(dc.metrics.roc_auc_score)]
# def model_builder(model_dir):
# sklearn_model = LogisticRegression()
# return dc.models.SklearnModel(sklearn_model, model_dir)
# multitask_model = dc.models.SingletaskToMultitask(
# tasks, model_builder)
# # Fit trained model
# multitask_model.fit(train_dataset)
# multitask_model.save()
# # Eval multitask_model on train/test
# _ = multitask_model.evaluate(train_dataset, classification_metrics)
# _ = multitask_model.evaluate(test_dataset, classification_metrics)
def test_to_singletask(self):
"""Test that to_singletask works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
task_dirs = []
try:
for task in range(num_tasks):
task_dirs.append(tempfile.mkdtemp())
singletask_datasets = dc.models.SingletaskToMultitask._to_singletask(
dataset, task_dirs)
for task in range(num_tasks):
singletask_dataset = singletask_datasets[task]
X_task, y_task, w_task, ids_task = (singletask_dataset.X,
singletask_dataset.y,
singletask_dataset.w,
singletask_dataset.ids)
w_nonzero = w[:, task] != 0
np.testing.assert_array_equal(X_task, X[w_nonzero != 0])
np.testing.assert_array_equal(y_task.flatten(),
y[:, task][w_nonzero != 0])
np.testing.assert_array_equal(w_task.flatten(),
w[:, task][w_nonzero != 0])
np.testing.assert_array_equal(ids_task, ids[w_nonzero != 0])
finally:
# Cleanup
for task_dir in task_dirs:
shutil.rmtree(task_dir)
<file_sep># This example shows how to load data from a SDF file into DeepChem. The data in this SDF file is stored in field "LogP(RRCK)"
import deepchem as dc
featurizer = dc.feat.CircularFingerprint(size=16)
loader = dc.data.SDFLoader(["LogP(RRCK)"], featurizer=featurizer, sanitize=True)
dataset = loader.featurize("membrane_permeability.sdf")
<file_sep>def test_fourier_encode_dist():
import numpy as np
import torch
from deepchem.utils.graph_utils import fourier_encode_dist
x = torch.tensor([1.0, 2.0, 3.0])
num_encodings = 4
include_self = True
encoded_x = fourier_encode_dist(x,
num_encodings=num_encodings,
include_self=include_self)
assert encoded_x.shape == (x.shape[0],
num_encodings * 2 + int(include_self))
scales = 2**np.arange(num_encodings)
x_scaled = x.unsqueeze(-1) / scales
x_sin = torch.sin(x_scaled)
x_cos = torch.cos(x_scaled)
x_expected = torch.cat([x_sin, x_cos], dim=-1)
if include_self:
x_expected = torch.cat((x_expected, x.unsqueeze(-1)), dim=-1)
assert torch.allclose(encoded_x.float(), x_expected.float(), atol=1e-5)
<file_sep>"""Manages Placeholders for Graph convolution networks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import numpy as np
import tensorflow as tf
from deepchem.nn.copy import Input
from deepchem.feat.mol_graphs import ConvMol
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def merge_dicts(l):
"""Convenience function to merge list of dictionaries."""
merged = {}
for dict in l:
merged = merge_two_dicts(merged, dict)
return merged
class GraphTopology(object):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self, n_feat, name='topology', max_deg=10, min_deg=0):
"""
Note that batch size is not specified in a GraphTopology object. A batch
of molecules must be combined into a disconnected graph and fed to topology
directly to handle batches.
Parameters
----------
n_feat: int
Number of features per atom.
name: str, optional
Name of this manager.
max_deg: int, optional
Maximum #bonds for atoms in molecules.
min_deg: int, optional
Minimum #bonds for atoms in molecules.
"""
warnings.warn("GraphTopology is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
#self.n_atoms = n_atoms
self.n_feat = n_feat
self.name = name
self.max_deg = max_deg
self.min_deg = min_deg
self.atom_features_placeholder = tensor = tf.placeholder(
dtype='float32',
shape=(None, self.n_feat),
name=self.name + '_atom_features')
self.deg_adj_lists_placeholders = [
tf.placeholder(
dtype='int32',
shape=(None, deg),
name=self.name + '_deg_adj' + str(deg))
for deg in range(1, self.max_deg + 1)
]
self.deg_slice_placeholder = tf.placeholder(
dtype='int32',
shape=(self.max_deg - self.min_deg + 1, 2),
name=self.name + '_deg_slice')
self.membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_membership')
# Define the list of tensors to be used as topology
self.topology = [self.deg_slice_placeholder, self.membership_placeholder]
self.topology += self.deg_adj_lists_placeholders
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_input_placeholders(self):
"""All placeholders.
Contains atom_features placeholder and topology placeholders.
"""
return self.inputs
def get_topology_placeholders(self):
"""Returns topology placeholders
Consists of deg_slice_placeholder, membership_placeholder, and the
deg_adj_list_placeholders.
"""
return self.topology
def get_atom_features_placeholder(self):
return self.atom_features_placeholder
def get_deg_adjacency_lists_placeholders(self):
return self.deg_adj_lists_placeholders
def get_deg_slice_placeholder(self):
return self.deg_slice_placeholder
def get_membership_placeholder(self):
return self.membership_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of mol_graphs into tensorflow feed_dict.
Assigns the graph information in array of ConvMol objects to the
placeholders tensors
params
------
batch : np.ndarray
Array of ConvMol objects
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Merge mol conv objects
batch = ConvMol.agglomerate_mols(batch)
atoms = batch.get_atom_features()
deg_adj_lists = [
batch.deg_adj_lists[deg] for deg in range(1, self.max_deg + 1)
]
# Generate dicts
deg_adj_dict = dict(
list(zip(self.deg_adj_lists_placeholders, deg_adj_lists)))
atoms_dict = {
self.atom_features_placeholder: atoms,
self.deg_slice_placeholder: batch.deg_slice,
self.membership_placeholder: batch.membership
}
return merge_dicts([atoms_dict, deg_adj_dict])
class DTNNGraphTopology(GraphTopology):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self,
n_distance=100,
distance_min=-1.,
distance_max=18.,
name='DTNN_topology'):
"""
Parameters
----------
n_distance: int, optional
granularity of distance matrix
step size will be (distance_max-distance_min)/n_distance
distance_min: float, optional
minimum distance of atom pairs, default = -1 Angstorm
distance_max: float, optional
maximum distance of atom pairs, default = 18 Angstorm
"""
warnings.warn("DTNNGraphTopology is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.name = name
self.n_distance = n_distance
self.distance_min = distance_min
self.distance_max = distance_max
self.step_size = (distance_max - distance_min) / n_distance
self.steps = np.array(
[distance_min + i * self.step_size for i in range(n_distance)])
self.steps = np.expand_dims(self.steps, 0)
self.atom_number_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_atom_number')
self.distance_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.n_distance),
name=self.name + '_distance')
self.atom_membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_atom_membership')
self.distance_membership_i_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_distance_membership_i')
self.distance_membership_j_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_distance_membership_j')
# Define the list of tensors to be used as topology
self.topology = [
self.distance_placeholder,
self.atom_membership_placeholder,
self.distance_membership_i_placeholder,
self.distance_membership_j_placeholder,
]
self.inputs = [self.atom_number_placeholder]
self.inputs += self.topology
def get_atom_number_placeholder(self):
return self.atom_number_placeholder
def get_distance_placeholder(self):
return self.distance_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of Coulomb Matrix into tensorflow feed_dict.
Assigns the atom number and distance info to the
placeholders tensors
params
------
batch : np.ndarray
Array of Coulomb Matrix
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Extract atom numbers
num_atoms = list(map(sum, batch.astype(bool)[:, :, 0]))
atom_number = [
np.round(
np.power(2 * np.diag(batch[i, :num_atoms[i], :num_atoms[i]]), 1 /
2.4)).astype(int) for i in range(len(num_atoms))
]
distance = []
atom_membership = []
distance_membership_i = []
distance_membership_j = []
start = 0
for im, molecule in enumerate(atom_number):
distance_matrix = np.outer(
molecule, molecule) / batch[im, :num_atoms[im], :num_atoms[im]]
np.fill_diagonal(distance_matrix, -100)
distance.append(np.expand_dims(distance_matrix.flatten(), 1))
atom_membership.append([im] * num_atoms[im])
membership = np.array([np.arange(num_atoms[im])] * num_atoms[im])
membership_i = membership.flatten(order='F')
membership_j = membership.flatten()
distance_membership_i.append(membership_i + start)
distance_membership_j.append(membership_j + start)
start = start + num_atoms[im]
atom_number = np.concatenate(atom_number)
distance = np.concatenate(distance, 0)
distance = np.exp(-np.square(distance - self.steps) /
(2 * self.step_size**2))
distance_membership_i = np.concatenate(distance_membership_i)
distance_membership_j = np.concatenate(distance_membership_j)
atom_membership = np.concatenate(atom_membership)
# Generate dicts
dict_DTNN = {
self.atom_number_placeholder: atom_number,
self.distance_placeholder: distance,
self.atom_membership_placeholder: atom_membership,
self.distance_membership_i_placeholder: distance_membership_i,
self.distance_membership_j_placeholder: distance_membership_j
}
return dict_DTNN
class DAGGraphTopology(GraphTopology):
"""GraphTopology for DAG models
"""
def __init__(self, n_atom_feat=75, max_atoms=50, name='topology'):
"""
Parameters
----------
n_atom_feat: int, optional
Number of features per atom.
max_atoms: int, optional
Maximum number of atoms in a molecule, should be defined based on dataset
"""
warnings.warn("DAGGraphTopology is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.n_atom_feat = n_atom_feat
self.max_atoms = max_atoms
self.name = name
self.atom_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.n_atom_feat),
name=self.name + '_atom_features')
self.parents_placeholder = tf.placeholder(
dtype='int32',
shape=(None, self.max_atoms, self.max_atoms),
# molecule * atom(graph) => step => features
name=self.name + '_parents')
self.calculation_orders_placeholder = tf.placeholder(
dtype='int32',
shape=(None, self.max_atoms),
# molecule * atom(graph) => step
name=self.name + '_orders')
self.calculation_masks_placeholder = tf.placeholder(
dtype='bool',
shape=(None, self.max_atoms),
# molecule * atom(graph) => step
name=self.name + '_masks')
self.membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_membership')
self.n_atoms_placeholder = tf.placeholder(
dtype='int32', shape=(), name=self.name + '_n_atoms')
# Define the list of tensors to be used as topology
self.topology = [
self.parents_placeholder, self.calculation_orders_placeholder,
self.calculation_masks_placeholder, self.membership_placeholder,
self.n_atoms_placeholder
]
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_parents_placeholder(self):
return self.parents_placeholder
def get_calculation_orders_placeholder(self):
return self.calculation_orders_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of mol_graphs into tensorflow feed_dict.
Assigns the graph information in array of ConvMol objects to the
placeholders tensors for DAG models
params
------
batch : np.ndarray
Array of ConvMol objects
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
atoms_per_mol = [mol.get_num_atoms() for mol in batch]
n_atoms = sum(atoms_per_mol)
start_index = [0] + list(np.cumsum(atoms_per_mol)[:-1])
atoms_all = []
# calculation orders for a batch of molecules
parents_all = []
calculation_orders = []
calculation_masks = []
membership = []
for idm, mol in enumerate(batch):
# padding atom features vector of each molecule with 0
atoms_all.append(mol.get_atom_features())
parents = mol.parents
parents_all.extend(parents)
calculation_index = np.array(parents)[:, :, 0]
mask = np.array(calculation_index - self.max_atoms, dtype=bool)
calculation_orders.append(calculation_index + start_index[idm])
calculation_masks.append(mask)
membership.extend([idm] * atoms_per_mol[idm])
atoms_all = np.concatenate(atoms_all, axis=0)
parents_all = np.stack(parents_all, axis=0)
calculation_orders = np.concatenate(calculation_orders, axis=0)
calculation_masks = np.concatenate(calculation_masks, axis=0)
membership = np.array(membership)
atoms_dict = {
self.atom_features_placeholder: atoms_all,
self.parents_placeholder: parents_all,
self.calculation_orders_placeholder: calculation_orders,
self.calculation_masks_placeholder: calculation_masks,
self.membership_placeholder: membership,
self.n_atoms_placeholder: n_atoms
}
return atoms_dict
class WeaveGraphTopology(GraphTopology):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self,
max_atoms=50,
n_atom_feat=75,
n_pair_feat=14,
name='Weave_topology'):
"""
Parameters
----------
max_atoms: int, optional
maximum number of atoms in a molecule
n_atom_feat: int, optional
number of basic features of each atom
n_pair_feat: int, optional
number of basic features of each pair
"""
warnings.warn("WeaveGraphTopology is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
#self.n_atoms = n_atoms
self.name = name
self.max_atoms = max_atoms
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
self.atom_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.n_atom_feat),
name=self.name + '_atom_features')
self.atom_mask_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms),
name=self.name + '_atom_mask')
self.pair_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.max_atoms, self.n_pair_feat),
name=self.name + '_pair_features')
self.pair_mask_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.max_atoms),
name=self.name + '_pair_mask')
self.membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_membership')
# Define the list of tensors to be used as topology
self.topology = [self.atom_mask_placeholder, self.pair_mask_placeholder]
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_pair_features_placeholder(self):
return self.pair_features_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of WeaveMol into tensorflow feed_dict.
Assigns the atom features and pair features to the
placeholders tensors
params
------
batch : np.ndarray
Array of WeaveMol
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Extract atom numbers
atom_feat = []
pair_feat = []
atom_mask = []
pair_mask = []
membership = []
max_atoms = self.max_atoms
for im, mol in enumerate(batch):
n_atoms = mol.get_num_atoms()
atom_feat.append(
np.pad(mol.get_atom_features(), ((0, max_atoms - n_atoms), (0, 0)),
'constant'))
atom_mask.append(
np.array([1] * n_atoms + [0] * (max_atoms - n_atoms), dtype=float))
pair_feat.append(
np.pad(mol.get_pair_features(), ((0, max_atoms - n_atoms), (
0, max_atoms - n_atoms), (0, 0)), 'constant'))
pair_mask.append(np.array([[1]*n_atoms + [0]*(max_atoms-n_atoms)]*n_atoms + \
[[0]*max_atoms]*(max_atoms-n_atoms), dtype=float))
membership.extend([im] * n_atoms)
atom_feat = np.stack(atom_feat)
pair_feat = np.stack(pair_feat)
atom_mask = np.stack(atom_mask)
pair_mask = np.stack(pair_mask)
membership = np.array(membership)
# Generate dicts
dict_DTNN = {
self.atom_features_placeholder: atom_feat,
self.pair_features_placeholder: pair_feat,
self.atom_mask_placeholder: atom_mask,
self.pair_mask_placeholder: pair_mask,
self.membership_placeholder: membership
}
return dict_DTNN
class AlternateWeaveGraphTopology(GraphTopology):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self,
batch_size,
max_atoms=50,
n_atom_feat=75,
n_pair_feat=14,
name='Weave_topology'):
"""
Parameters
----------
batch_size: int
number of molecules in a batch
max_atoms: int, optional
maximum number of atoms in a molecule
n_atom_feat: int, optional
number of basic features of each atom
n_pair_feat: int, optional
number of basic features of each pair
"""
warnings.warn("AlternateWeaveGraphTopology is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
#self.n_atoms = n_atoms
self.name = name
self.batch_size = batch_size
self.max_atoms = max_atoms * batch_size
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
self.atom_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.n_atom_feat),
name=self.name + '_atom_features')
self.pair_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.n_pair_feat),
name=self.name + '_pair_features')
self.pair_split_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_pair_split')
self.atom_split_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_atom_split')
self.atom_to_pair_placeholder = tf.placeholder(
dtype='int32', shape=(None, 2), name=self.name + '_atom_to_pair')
# Define the list of tensors to be used as topology
self.topology = [
self.pair_split_placeholder, self.atom_split_placeholder,
self.atom_to_pair_placeholder
]
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_pair_features_placeholder(self):
return self.pair_features_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of WeaveMol into tensorflow feed_dict.
Assigns the atom features and pair features to the
placeholders tensors
params
------
batch : np.ndarray
Array of WeaveMol
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Extract atom numbers
atom_feat = []
pair_feat = []
atom_split = []
atom_to_pair = []
pair_split = []
max_atoms = self.max_atoms
start = 0
for im, mol in enumerate(batch):
n_atoms = mol.get_num_atoms()
# number of atoms in each molecule
atom_split.extend([im] * n_atoms)
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(
np.transpose(np.array([C1.flatten() + start,
C0.flatten() + start])))
# number of pairs for each atom
pair_split.extend(C1.flatten() + start)
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(), (n_atoms * n_atoms,
self.n_pair_feat)))
atom_feat = np.concatenate(atom_feat, axis=0)
pair_feat = np.concatenate(pair_feat, axis=0)
atom_to_pair = np.concatenate(atom_to_pair, axis=0)
atom_split = np.array(atom_split)
# Generate dicts
dict_DTNN = {
self.atom_features_placeholder: atom_feat,
self.pair_features_placeholder: pair_feat,
self.pair_split_placeholder: pair_split,
self.atom_split_placeholder: atom_split,
self.atom_to_pair_placeholder: atom_to_pair
}
return dict_DTNN
<file_sep>try:
from deepchem.data.data_loader import DFTYamlLoader
from deepchem.models.dft.scf import XCNNSCF
from deepchem.models.dft.nnxc import HybridXC
import torch
from deepchem.models.losses import DensityProfileLoss
has_dqc = True
except ModuleNotFoundError:
has_dqc = False
import numpy as np
import pytest
@pytest.mark.dqc
def test_densHF():
inputs = 'deepchem/models/tests/assets/test_HFdp.yaml'
data = DFTYamlLoader()
dataset = data.create_dataset(inputs)
labels = torch.as_tensor(dataset.y)
nnmodel = (torch.nn.Sequential(torch.nn.Linear(2, 10), torch.nn.Softplus(),
torch.nn.Linear(10, 1, bias=False))).to(
torch.double)
hybridxc = HybridXC("lda_x", nnmodel, aweight0=0.0)
entry = dataset.X[0]
grid = (dataset.X[0]).get_integration_grid()
volume = grid.get_dvolume()
evl = XCNNSCF(hybridxc, entry)
qcs = []
for system in entry.get_systems():
qcs.append(evl.run(system))
val = entry.get_val(qcs)
output = torch.as_tensor(val)
loss = ((DensityProfileLoss()._create_pytorch_loss(volume))(
output, labels)).detach().numpy()
expected = np.array(0.0068712)
assert np.allclose(loss, expected)
<file_sep>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 29 23:49:02 2017
@author: zqwu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load Delaney dataset
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney(
featurizer='Weave', split='index')
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
n_atom_feat = 75
n_pair_feat = 14
# Batch size of models
batch_size = 64
model = dc.models.MPNNModel(
len(delaney_tasks),
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=3,
M=5,
batch_size=batch_size,
learning_rate=0.0001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=50, checkpoint_interval=100)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import logging
from deepchem.models import KerasModel
from deepchem.models.layers import AtomicConvolution
from deepchem.models.losses import L2Loss
from tensorflow.keras.layers import Input, Dense, Reshape, Dropout, Activation, Lambda, Flatten, Concatenate
import numpy as np
import tensorflow as tf
import itertools
from collections.abc import Sequence as SequenceCollection
from typing import Sequence
from deepchem.utils.typing import ActivationFn, OneOrMany
from deepchem.utils.data_utils import load_from_disk, save_to_disk
logger = logging.getLogger(__name__)
class AtomicConvModel(KerasModel):
"""Implements an Atomic Convolution Model.
Implements the atomic convolutional networks as introduced in
<NAME>, et al. "Atomic convolutional networks for predicting protein-ligand binding affinity." arXiv preprint arXiv:1703.10603 (2017).
The atomic convolutional networks function as a variant of
graph convolutions. The difference is that the "graph" here is
the nearest neighbors graph in 3D space. The AtomicConvModel
leverages these connections in 3D space to train models that
learn to predict energetic state starting from the spatial
geometry of the model.
"""
def __init__(
self,
n_tasks: int,
frag1_num_atoms: int = 70,
frag2_num_atoms: int = 634,
complex_num_atoms: int = 701,
max_num_neighbors: int = 12,
batch_size: int = 24,
atom_types: Sequence[float] = [
6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53.,
-1.
],
radial: Sequence[Sequence[float]] = [[
1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5,
8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
], [0.0, 4.0, 8.0], [0.4]],
# layer_sizes=[32, 32, 16],
layer_sizes=[100],
weight_init_stddevs: OneOrMany[float] = 0.02,
bias_init_consts: OneOrMany[float] = 1.0,
weight_decay_penalty: float = 0.0,
weight_decay_penalty_type: str = "l2",
dropouts: OneOrMany[float] = 0.5,
activation_fns: OneOrMany[ActivationFn] = tf.nn.relu,
residual: bool = False,
learning_rate=0.001,
**kwargs) -> None:
"""
Parameters
----------
n_tasks: int
number of tasks
frag1_num_atoms: int
Number of atoms in first fragment
frag2_num_atoms: int
Number of atoms in sec
max_num_neighbors: int
Maximum number of neighbors possible for an atom. Recall neighbors
are spatial neighbors.
atom_types: list
List of atoms recognized by model. Atoms are indicated by their
nuclear numbers.
radial: list
Radial parameters used in the atomic convolution transformation.
layer_sizes: list
the size of each dense layer in the network. The length of
this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight
initialization of each layer. The length of this list should
equal len(layer_sizes). Alternatively this may be a single
value instead of a list, in which case the same value is used
for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The
length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in
which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
residual: bool
if True, the model will be composed of pre-activation residual blocks instead
of a simple stack of dense layers.
learning_rate: float
Learning rate for the model.
"""
self.complex_num_atoms = complex_num_atoms
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.max_num_neighbors = max_num_neighbors
self.batch_size = batch_size
self.atom_types = atom_types
rp = [x for x in itertools.product(*radial)]
frag1_X = Input(shape=(frag1_num_atoms, 3))
frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_z = Input(shape=(frag1_num_atoms,))
frag2_X = Input(shape=(frag2_num_atoms, 3))
frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_z = Input(shape=(frag2_num_atoms,))
complex_X = Input(shape=(complex_num_atoms, 3))
complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_z = Input(shape=(complex_num_atoms,))
self._frag1_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z])
flattened1 = Flatten()(self._frag1_conv)
self._frag2_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z])
flattened2 = Flatten()(self._frag2_conv)
self._complex_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z])
flattened3 = Flatten()(self._complex_conv)
concat = Concatenate()([flattened1, flattened2, flattened3])
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
if weight_decay_penalty != 0.0:
if weight_decay_penalty_type == 'l1':
regularizer = tf.keras.regularizers.l1(weight_decay_penalty)
else:
regularizer = tf.keras.regularizers.l2(weight_decay_penalty)
else:
regularizer = None
prev_layer = concat
prev_size = concat.shape[0]
next_activation = None
# Add the dense layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns):
layer = prev_layer
if next_activation is not None:
layer = Activation(next_activation)(layer)
layer = Dense(
size,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_stddev),
bias_initializer=tf.constant_initializer(value=bias_const),
kernel_regularizer=regularizer)(layer)
if dropout > 0.0:
layer = Dropout(rate=dropout)(layer)
if residual and prev_size == size:
prev_layer = Lambda(lambda x: x[0] + x[1])([prev_layer, layer])
else:
prev_layer = layer
prev_size = size
next_activation = activation_fn
if next_activation is not None:
prev_layer = Activation(activation_fn)(prev_layer)
self.neural_fingerprint = prev_layer
output = Reshape(
(n_tasks,
1))(Dense(n_tasks,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_init_stddevs[-1]),
bias_initializer=tf.constant_initializer(
value=bias_init_consts[-1]))(prev_layer))
model = tf.keras.Model(inputs=[
frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs,
frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z,
complex_z
],
outputs=output)
super(AtomicConvModel, self).__init__(model,
L2Loss(),
batch_size=batch_size,
**kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
batch_size = self.batch_size
def replace_atom_types(z):
np.putmask(z, np.isin(z, list(self.atom_types), invert=True), -1)
return z
for epoch in range(epochs):
for ind, (F_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(batch_size,
deterministic=True,
pad_batches=pad_batches)):
N = self.complex_num_atoms
N_1 = self.frag1_num_atoms
N_2 = self.frag2_num_atoms
M = self.max_num_neighbors
batch_size = F_b.shape[0]
num_features = F_b[0][0].shape[1]
frag1_X_b = np.zeros((batch_size, N_1, num_features))
for i in range(batch_size):
frag1_X_b[i] = F_b[i][0]
frag2_X_b = np.zeros((batch_size, N_2, num_features))
for i in range(batch_size):
frag2_X_b[i] = F_b[i][3]
complex_X_b = np.zeros((batch_size, N, num_features))
for i in range(batch_size):
complex_X_b[i] = F_b[i][6]
frag1_Nbrs = np.zeros((batch_size, N_1, M))
frag1_Z_b = np.zeros((batch_size, N_1))
for i in range(batch_size):
z = replace_atom_types(F_b[i][2])
frag1_Z_b[i] = z
frag1_Nbrs_Z = np.zeros((batch_size, N_1, M))
for atom in range(N_1):
for i in range(batch_size):
atom_nbrs = F_b[i][1].get(atom, "")
frag1_Nbrs[i,
atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j]
frag2_Nbrs = np.zeros((batch_size, N_2, M))
frag2_Z_b = np.zeros((batch_size, N_2))
for i in range(batch_size):
z = replace_atom_types(F_b[i][5])
frag2_Z_b[i] = z
frag2_Nbrs_Z = np.zeros((batch_size, N_2, M))
for atom in range(N_2):
for i in range(batch_size):
atom_nbrs = F_b[i][4].get(atom, "")
frag2_Nbrs[i,
atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j]
complex_Nbrs = np.zeros((batch_size, N, M))
complex_Z_b = np.zeros((batch_size, N))
for i in range(batch_size):
z = replace_atom_types(F_b[i][8])
complex_Z_b[i] = z
complex_Nbrs_Z = np.zeros((batch_size, N, M))
for atom in range(N):
for i in range(batch_size):
atom_nbrs = F_b[i][7].get(atom, "")
complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(
atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j]
inputs = [
frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b,
frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b,
complex_Nbrs, complex_Nbrs_Z, complex_Z_b
]
y_b = np.reshape(y_b, newshape=(batch_size, 1))
yield (inputs, [y_b], [w_b])
def save(self):
"""Saves model to disk using joblib."""
save_to_disk(self.model, self.get_model_filename(self.model_dir))
def reload(self):
"""Loads model from joblib file on disk."""
self.model = load_from_disk(self.get_model_filename(self.model_dir))
<file_sep>import gensim
from gensim import models
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import numpy as np
def main() :
model = models.KeyedVectors.load_word2vec_format("vec.txt")
embeddings = list()
# Using canonical smiles for glycine, as in original research paper
mol = Chem.MolFromSmiles("C(C(=O)O)N")
try:
info = {}
rdMolDescriptors.GetMorganFingerprint(mol, 0, bitInfo=info)
keys = info.keys()
keys_list = list(keys)
totalvec = np.zeros(200)
for k in keys_list:
wordvec = model.wv[str(k)]
totalvec = np.add(totalvec, wordvec)
embeddings.append(totalvec)
except Exception as e:
print(e)
pass
print(embeddings[0])
<file_sep>import deepchem as dc
import numpy as np
import pytest
import unittest
from flaky import flaky
try:
import torch # noqa: F401
has_torch = True
except:
has_torch = False
class TestCNN(unittest.TestCase):
@pytest.mark.torch
def test_1d_cnn_regression(self):
"""Test that a 1D CNN can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
np.random.seed(123)
X = np.random.rand(n_samples, 10, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.CNN(n_tasks,
n_features,
dims=1,
dropouts=0,
kernel_size=3,
mode='regression',
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
@pytest.mark.torch
def test_2d_cnn_classification(self):
"""Test that a 2D CNN can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
np.random.seed(123)
X = np.random.rand(n_samples, 10, 10, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.CNN(n_tasks,
n_features,
dims=2,
dropouts=0,
kernel_size=3,
mode='classification',
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > 0.9
@flaky
@pytest.mark.torch
def test_residual_cnn_classification(self):
"""Test that a residual CNN can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
np.random.seed(123)
X = np.random.rand(n_samples, 10, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.CNN(n_tasks,
n_features,
dims=1,
dropouts=0,
layer_filters=[30] * 10,
kernel_size=3,
mode='classification',
padding='same',
residual=True,
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > 0.9
@flaky
@pytest.mark.torch
def test_cnn_regression_uncertainty(self):
"""Test computing uncertainty for a CNN regression model."""
n_samples = 10
n_features = 2
n_tasks = 1
noise = 0.1
np.random.seed(123)
X = np.random.randn(n_samples, 10, n_features)
y = np.sum(X, axis=(1, 2)) + np.random.normal(scale=noise,
size=(n_samples,))
y = np.reshape(y, (n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.CNN(n_tasks,
n_features,
dims=1,
dropouts=0.1,
kernel_size=3,
pool_type='average',
mode='regression',
learning_rate=0.005,
uncertainty=True)
# Fit trained model
model.fit(dataset, nb_epoch=300)
# Predict the output and uncertainty.
pred, std = model.predict_uncertainty(dataset)
assert np.mean(np.abs(y - pred)) < 0.3
assert noise < np.mean(std) < 1.0
<file_sep>import unittest
import numpy as np
from deepchem.feat.molecule_featurizers import GraphMatrix
class TestGraphMatrix(unittest.TestCase):
def test_graph_matrix(self):
max_atom_count = 5
atom_array = [7, 7, 7, 8, 8, 8, 9, 6]
A = np.zeros(shape=(max_atom_count, max_atom_count), dtype=np.float32)
X = np.array(atom_array, dtype=np.int32)
graph_matrix = GraphMatrix(adjacency_matrix=A, node_features=X)
assert isinstance(graph_matrix.adjacency_matrix, np.ndarray)
assert isinstance(graph_matrix.node_features, np.ndarray)
assert graph_matrix.adjacency_matrix.dtype == np.float32
assert graph_matrix.node_features.dtype == np.int32
assert graph_matrix.adjacency_matrix.shape == A.shape
assert graph_matrix.node_features.shape == X.shape
if __name__ == '__main__':
unittest.main()
<file_sep>import os
import deepchem as dc
def test_batch_coulomb_matrix_features():
# Get Data
current_dir = os.path.dirname(os.path.abspath(__file__))
dataset_file = os.path.join(current_dir, 'assets/qm9_mini.sdf')
TASKS = ["alpha", "homo"]
loader = dc.data.SDFLoader(tasks=TASKS,
featurizer=dc.feat.CoulombMatrix(29),
sanitize=True)
data = loader.create_dataset(dataset_file, shard_size=100)
inputs = dc.utils.batch_utils.batch_coulomb_matrix_features(data.X)
# Checks that all atoms exits in array
assert inputs[0].shape == (149,)
# Checks shape of gaussian distance
assert inputs[1].shape == (1215, 100)
# Checks all molecule membership exist
for i in range(0, 21):
if i not in inputs[2]:
raise AssertionError("All molecules not in the matrix")
# Check Distance Membership shape
assert inputs[3].shape == (1215,)
assert inputs[4].shape == (1215,)
<file_sep>"""Model-Agnostic Meta-Learning (MAML) algorithm for low data learning."""
import os
import shutil
import tempfile
import time
import tensorflow as tf
from deepchem.models.optimizers import Adam, GradientDescent
class MetaLearner(object):
"""Model and data to which the MAML algorithm can be applied.
To use MAML, create a subclass of this defining the learning problem to solve.
It consists of a model that can be trained to perform many different tasks, and
data for training it on a large (possibly infinite) set of different tasks.
"""
def compute_model(self, inputs, variables, training):
"""Compute the model for a set of inputs and variables.
Parameters
----------
inputs: list of tensors
the inputs to the model
variables: list of tensors
the values to use for the model's variables. This might be the actual
variables (as returned by the MetaLearner's variables property), or
alternatively it might be the values of those variables after one or more
steps of gradient descent for the current task.
training: bool
indicates whether the model is being invoked for training or prediction
Returns
-------
(loss, outputs) where loss is the value of the model's loss function, and
outputs is a list of the model's outputs
"""
raise NotImplementedError("Subclasses must implement this")
@property
def variables(self):
"""Get the list of Tensorflow variables to train."""
raise NotImplementedError("Subclasses must implement this")
def select_task(self):
"""Select a new task to train on.
If there is a fixed set of training tasks, this will typically cycle through them.
If there are infinitely many training tasks, this can simply select a new one each
time it is called.
"""
raise NotImplementedError("Subclasses must implement this")
def get_batch(self):
"""Get a batch of data for training.
This should return the data as a list of arrays, one for each of the model's
inputs. This will usually be called twice for each task, and should
return a different batch on each call.
"""
raise NotImplementedError("Subclasses must implement this")
class MAML(object):
"""Implements the Model-Agnostic Meta-Learning algorithm for low data learning.
The algorithm is described in Fin<NAME> al., "Model-Agnostic Meta-Learning for Fast
Adaptation of Deep Networks" (https://arxiv.org/abs/1703.03400). It is used for
training models that can perform a variety of tasks, depending on what data they
are trained on. It assumes you have training data for many tasks, but only a small
amount for each one. It performs "meta-learning" by looping over tasks and trying
to minimize the loss on each one *after* one or a few steps of gradient descent.
That is, it does not try to create a model that can directly solve the tasks, but
rather tries to create a model that is very easy to train.
To use this class, create a subclass of MetaLearner that encapsulates the model
and data for your learning problem. Pass it to a MAML object and call fit().
You can then use train_on_current_task() to fine tune the model for a particular
task.
"""
def __init__(self,
learner,
learning_rate=0.001,
optimization_steps=1,
meta_batch_size=10,
optimizer=Adam(),
model_dir=None):
"""Create an object for performing meta-optimization.
Parameters
----------
learner: MetaLearner
defines the meta-learning problem
learning_rate: float or Tensor
the learning rate to use for optimizing each task (not to be confused with the one used
for meta-learning). This can optionally be made a variable (represented as a
Tensor), in which case the learning rate will itself be learnable.
optimization_steps: int
the number of steps of gradient descent to perform for each task
meta_batch_size: int
the number of tasks to use for each step of meta-learning
optimizer: Optimizer
the optimizer to use for meta-learning (not to be confused with the gradient descent
optimization performed for each task)
model_dir: str
the directory in which the model will be saved. If None, a temporary directory will be created.
"""
# Record inputs.
self.learner = learner
self.learning_rate = learning_rate
self.optimization_steps = optimization_steps
self.meta_batch_size = meta_batch_size
self.optimizer = optimizer
# Create the output directory if necessary.
self._model_dir_is_temp = False
if model_dir is not None:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
else:
model_dir = tempfile.mkdtemp()
self._model_dir_is_temp = True
self.model_dir = model_dir
self.save_file = "%s/%s" % (self.model_dir, "model")
# Create the optimizers for meta-optimization and task optimization.
self._global_step = tf.Variable(0, trainable=False)
self._tf_optimizer = optimizer._create_tf_optimizer(self._global_step)
task_optimizer = GradientDescent(learning_rate=self.learning_rate)
self._tf_task_optimizer = task_optimizer._create_tf_optimizer(
self._global_step)
# Create a Checkpoint for saving.
self._checkpoint = tf.train.Checkpoint()
self._checkpoint.listed = learner.variables
def __del__(self):
if '_model_dir_is_temp' in dir(self) and self._model_dir_is_temp:
shutil.rmtree(self.model_dir)
def fit(self,
steps,
max_checkpoints_to_keep=5,
checkpoint_interval=600,
restore=False):
"""Perform meta-learning to train the model.
Parameters
----------
steps: int
the number of steps of meta-learning to perform
max_checkpoints_to_keep: int
the maximum number of checkpoint files to keep. When this number is reached, older
files are deleted.
checkpoint_interval: float
the time interval at which to save checkpoints, measured in seconds
restore: bool
if True, restore the model from the most recent checkpoint before training
it further
"""
if restore:
self.restore()
manager = tf.train.CheckpointManager(self._checkpoint, self.model_dir,
max_checkpoints_to_keep)
checkpoint_time = time.time()
# Main optimization loop.
learner = self.learner
variables = learner.variables
for i in range(steps):
for j in range(self.meta_batch_size):
learner.select_task()
meta_loss, meta_gradients = self._compute_meta_loss(
learner.get_batch(), learner.get_batch(), variables)
if j == 0:
summed_gradients = meta_gradients
else:
summed_gradients = [
s + g for s, g in zip(summed_gradients, meta_gradients)
]
self._tf_optimizer.apply_gradients(zip(summed_gradients, variables))
# Do checkpointing.
if i == steps - 1 or time.time(
) >= checkpoint_time + checkpoint_interval:
manager.save()
checkpoint_time = time.time()
@tf.function
def _compute_meta_loss(self, inputs, inputs2, variables):
"""This is called during fitting to compute the meta-loss (the loss after a
few steps of optimization), and its gradient.
"""
updated_variables = variables
with tf.GradientTape() as meta_tape:
for k in range(self.optimization_steps):
with tf.GradientTape() as tape:
loss, _ = self.learner.compute_model(
inputs, updated_variables, True)
gradients = tape.gradient(loss, updated_variables)
updated_variables = [
v if g is None else v - self.learning_rate * g
for v, g in zip(updated_variables, gradients)
]
meta_loss, _ = self.learner.compute_model(inputs2,
updated_variables, True)
meta_gradients = meta_tape.gradient(meta_loss, variables)
return meta_loss, meta_gradients
def restore(self):
"""Reload the model parameters from the most recent checkpoint file."""
last_checkpoint = tf.train.latest_checkpoint(self.model_dir)
if last_checkpoint is None:
raise ValueError('No checkpoint found')
self._checkpoint.restore(last_checkpoint)
def train_on_current_task(self, optimization_steps=1, restore=True):
"""Perform a few steps of gradient descent to fine tune the model on the current task.
Parameters
----------
optimization_steps: int
the number of steps of gradient descent to perform
restore: bool
if True, restore the model from the most recent checkpoint before optimizing
"""
if restore:
self.restore()
variables = self.learner.variables
for i in range(optimization_steps):
inputs = self.learner.get_batch()
with tf.GradientTape() as tape:
loss, _ = self.learner.compute_model(inputs, variables, True)
gradients = tape.gradient(loss, variables)
self._tf_task_optimizer.apply_gradients(zip(gradients, variables))
def predict_on_batch(self, inputs):
"""Compute the model's outputs for a batch of inputs.
Parameters
----------
inputs: list of arrays
the inputs to the model
Returns
-------
(loss, outputs) where loss is the value of the model's loss function, and
outputs is a list of the model's outputs
"""
return self.learner.compute_model(inputs, self.learner.variables, False)
<file_sep>"""
Geometric utility functions for 3D geometry.
"""
import numpy as np
from scipy.spatial.distance import cdist
from copy import deepcopy
def unit_vector(vector: np.ndarray) -> np.ndarray:
""" Returns the unit vector of the vector.
Parameters
----------
vector: np.ndarray
A numpy array of shape `(3,)`, where `3` is (x,y,z).
Returns
----------
np.ndarray
A numpy array of shape `(3,)`. The unit vector of the input vector.
"""
return vector / np.linalg.norm(vector)
def angle_between(vector_i: np.ndarray, vector_j: np.ndarray) -> float:
"""Returns the angle in radians between vectors "vector_i" and "vector_j"
Note that this function always returns the smaller of the two angles between
the vectors (value between 0 and pi).
Parameters
----------
vector_i: np.ndarray
A numpy array of shape `(3,)`, where `3` is (x,y,z).
vector_j: np.ndarray
A numpy array of shape `(3,)`, where `3` is (x,y,z).
Returns
----------
np.ndarray
The angle in radians between the two vectors.
Examples
--------
>>> print("%0.06f" % angle_between((1, 0, 0), (0, 1, 0)))
1.570796
>>> print("%0.06f" % angle_between((1, 0, 0), (1, 0, 0)))
0.000000
>>> print("%0.06f" % angle_between((1, 0, 0), (-1, 0, 0)))
3.141593
"""
vector_i_u = unit_vector(vector_i)
vector_j_u = unit_vector(vector_j)
angle = np.arccos(np.dot(vector_i_u, vector_j_u))
if np.isnan(angle):
if np.allclose(vector_i_u, vector_j_u):
return 0.0
else:
return np.pi
return angle
def generate_random_unit_vector() -> np.ndarray:
r"""Generate a random unit vector on the sphere S^2.
Citation: http://mathworld.wolfram.com/SpherePointPicking.html
Pseudocode:
a. Choose random theta \element [0, 2*pi]
b. Choose random z \element [-1, 1]
c. Compute output vector u: (x,y,z) = (sqrt(1-z^2)*cos(theta), sqrt(1-z^2)*sin(theta),z)
Returns
-------
u: np.ndarray
A numpy array of shape `(3,)`. u is an unit vector
"""
theta = np.random.uniform(low=0.0, high=2 * np.pi)
z = np.random.uniform(low=-1.0, high=1.0)
u = np.array([
np.sqrt(1 - z**2) * np.cos(theta),
np.sqrt(1 - z**2) * np.sin(theta), z
])
return u
def generate_random_rotation_matrix() -> np.ndarray:
r"""Generates a random rotation matrix.
1. Generate a random unit vector u, randomly sampled from the
unit sphere (see function generate_random_unit_vector()
for details)
2. Generate a second random unit vector v
a. If absolute value of u \dot v > 0.99, repeat.
(This is important for numerical stability. Intuition: we
want them to be as linearly independent as possible or
else the orthogonalized version of v will be much shorter
in magnitude compared to u. I assume in Stack they took
this from Gram-Schmidt orthogonalization?)
b. v" = v - (u \dot v)*u, i.e. subtract out the component of
v that's in u's direction
c. normalize v" (this isn"t in Stack but I assume it must be
done)
3. find w = u \cross v"
4. u, v", and w will form the columns of a rotation matrix, R.
The intuition is that u, v" and w are, respectively, what
the standard basis vectors e1, e2, and e3 will be mapped
to under the transformation.
Returns
-------
R: np.ndarray
A numpy array of shape `(3, 3)`. R is a rotation matrix.
"""
u = generate_random_unit_vector()
v = generate_random_unit_vector()
while np.abs(np.dot(u, v)) >= 0.99:
v = generate_random_unit_vector()
vp = v - (np.dot(u, v) * u)
vp /= np.linalg.norm(vp)
w = np.cross(u, vp)
R = np.column_stack((u, vp, w))
return R
def rotate_molecules(mol_coordinates_list):
"""Rotates provided molecular coordinates.
Pseudocode:
1. Generate random rotation matrix. This matrix applies a random
transformation to any 3-vector such that, were the random transformation
repeatedly applied, it would randomly sample along the surface of a sphere
with radius equal to the norm of the given 3-vector cf.
_generate_random_rotation_matrix() for details
2. Apply R to all atomic coordinatse.
3. Return rotated molecule
"""
R = generate_random_rotation_matrix()
rotated_coordinates_list = []
for mol_coordinates in mol_coordinates_list:
coordinates = deepcopy(mol_coordinates)
rotated_coordinates = np.transpose(np.dot(R, np.transpose(coordinates)))
rotated_coordinates_list.append(rotated_coordinates)
return (rotated_coordinates_list)
def is_angle_within_cutoff(vector_i: np.ndarray, vector_j: np.ndarray,
angle_cutoff: float) -> bool:
"""A utility function to compute whether two vectors are within a cutoff from 180 degrees apart.
Parameters
----------
vector_i: np.ndarray
A numpy array of shape (3,)`, where `3` is (x,y,z).
vector_j: np.ndarray
A numpy array of shape `(3,)`, where `3` is (x,y,z).
cutoff: float
The deviation from 180 (in degrees)
Returns
-------
bool
Whether two vectors are within a cutoff from 180 degrees apart
"""
angle = angle_between(vector_i, vector_j) * 180. / np.pi
return (angle > (180 - angle_cutoff) and angle < (180. + angle_cutoff))
def compute_centroid(coordinates: np.ndarray) -> np.ndarray:
"""Compute the (x,y,z) centroid of provided coordinates
Parameters
----------
coordinates: np.ndarray
A numpy array of shape `(N, 3)`, where `N` is the number of atoms.
Returns
-------
centroid: np.ndarray
A numpy array of shape `(3,)`, where `3` is (x,y,z).
"""
centroid = np.mean(coordinates, axis=0)
return centroid
def compute_protein_range(coordinates: np.ndarray) -> np.ndarray:
"""Compute the protein range of provided coordinates
Parameters
----------
coordinates: np.ndarray
A numpy array of shape `(N, 3)`, where `N` is the number of atoms.
Returns
-------
protein_range: np.ndarray
A numpy array of shape `(3,)`, where `3` is (x,y,z).
"""
protein_max = np.max(coordinates, axis=0)
protein_min = np.min(coordinates, axis=0)
protein_range = protein_max - protein_min
return protein_range
def subtract_centroid(coordinates: np.ndarray,
centroid: np.ndarray) -> np.ndarray:
"""Subtracts centroid from each coordinate.
Subtracts the centroid, a numpy array of dim 3, from all coordinates
of all atoms in the molecule
Note that this update is made in place to the array it's applied to.
Parameters
----------
coordinates: np.ndarray
A numpy array of shape `(N, 3)`, where `N` is the number of atoms.
centroid: np.ndarray
A numpy array of shape `(3,)`
Returns
-------
coordinates: np.ndarray
A numpy array of shape `(3,)`, where `3` is (x,y,z).
"""
coordinates -= np.transpose(centroid)
return coordinates
def compute_pairwise_distances(first_coordinate: np.ndarray,
second_coordinate: np.ndarray) -> np.ndarray:
"""Computes pairwise distances between two molecules.
Takes an input (m, 3) and (n, 3) numpy arrays of 3D coords of
two molecules respectively, and outputs an m x n numpy
array of pairwise distances in Angstroms between the first and
second molecule. entry (i,j) is dist between the i"th
atom of first molecule and the j"th atom of second molecule.
Parameters
----------
first_coordinate: np.ndarray
A numpy array of shape `(m, 3)`, where `m` is the number of atoms.
second_coordinate: np.ndarray
A numpy array of shape `(n, 3)`, where `n` is the number of atoms.
Returns
-------
pairwise_distances: np.ndarray
A numpy array of shape `(m, n)`
"""
pairwise_distances = cdist(first_coordinate,
second_coordinate,
metric='euclidean')
return pairwise_distances
<file_sep>import unittest
from deepchem.feat import Mol2VecFingerprint
class TestMol2VecFingerprint(unittest.TestCase):
"""
Test Mol2VecFingerprint.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
def test_mol2vec_fingerprint(self):
"""
Test simple fingerprint.
"""
featurizer = Mol2VecFingerprint()
feature = featurizer([self.mol])
assert feature.shape == (1, 300)
<file_sep>This provides a utility for generating bioassay datasets in PubChem, similar to the pcba dataset used in the original "Massively Multitask Learning" paper by Ramsunder et al 2015. The usage is as follows:
Before starting it is recommended to first set DEEPCHEM_DATA_DIR environment variable to a directory where you have at least 66GB+30GB of storage (for all PubChem SDFs+all Bioassay CSV) available
Then download the core data we will later featurize and learn on:
```bash
python download_pubchem_ftp.py
python create_smiles_mapping.py
```
Note: On an 8-core desktop computer as of Nov 2017 it took approximately 17 hours to execute create_smiles_mapping.py (that is, to extract the smiles from all the downloaded, gzipped SDF files from PubChem)
Then, parametize the create_assay_overview.py script via setting the following options:
```bash
usage: create_assay_overview.py [-h] [-d DATASET_NAME] [-g GENE_ARG]
Deepchem dataset builder for PCBA datasets
optional arguments:
-h, --help show this help message and exit
-d DATASET_NAME Choice of dataset: pcba_128, pcba_146
-g GENE_ARG Name of gene to create a dataset for
```
You must select either -d pcba_146, -d pcba_2475 or -g GENE_SYMBOL.
At the end you will have a file, e.g. pcba_146.csv.gz, etc file in your DEEPCHEM_DATA_DIR ready for benchmarking
Also, please note that the pcba_146 corresponds to the following query on PubChem Bioassay Search:
10000[TotalSidCount] : 1000000000[TotalSidCount] AND 30[ActiveSidCount] : 1000000000[ActiveSidCount] AND 0[TargetCount] : 1[TargetCount] AND "NCGC"[Source Name] AND "small molecule"[filt] AND "doseresponse"[filt]
This yields (as of Dec 2017) an additional 18 bioassays beyond the core 128 bioassays in PCBA-128
pcba_2475 corresponds to:
1[TotalSidCount] : 1000000000[TotalSidCount] AND 5[ActiveSidCount] : 10000000000[ActiveSidCount] AND 0[TargetCount] : 1[TargetCount] AND "small molecule"[filt] AND "doseresponse"[filt]<file_sep>from typing import Union
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class RawFeaturizer(MolecularFeaturizer):
"""Encodes a molecule as a SMILES string or RDKit mol.
This featurizer can be useful when you're trying to transform a large
collection of RDKit mol objects as Smiles strings, or alternatively as a
"no-op" featurizer in your molecular pipeline.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self, smiles: bool = False):
"""Initialize this featurizer.
Parameters
----------
smiles: bool, optional (default False)
If True, encode this molecule as a SMILES string. Else as a RDKit mol.
"""
self.smiles = smiles
def _featurize(self, datapoint: RDKitMol, **kwargs) -> Union[str, RDKitMol]:
"""Calculate either smiles string or pass through raw molecule.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
str or rdkit.Chem.rdchem.Mol
SMILES string or RDKit Mol object.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.smiles:
return Chem.MolToSmiles(datapoint)
else:
return datapoint
<file_sep>"""
Delaney dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
DELANEY_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/delaney-processed.csv"
DELANEY_TASKS = ['measured log solubility in mols per litre']
class _DelaneyLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "delaney-processed.csv")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=DELANEY_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_delaney(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load Delaney dataset
The Delaney (ESOL) dataset a regression dataset containing structures and
water solubility data for 1128 compounds. The dataset is widely used to
validate machine learning models on estimating solubility directly from
molecular structures (as encoded in SMILES strings).
Scaffold splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "Compound ID" - Name of the compound
- "smiles" - SMILES representation of the molecular structure
- "measured log solubility in mols per litre" - Log-scale water solubility
of the compound, used as label
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Delaney, <NAME>. "ESOL: estimating aqueous solubility directly from
molecular structure." Journal of chemical information and computer
sciences 44.3 (2004): 1000-1005.
"""
loader = _DelaneyLoader(featurizer, splitter, transformers, DELANEY_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('delaney', reload)
<file_sep>"""
Short docstring description of dataset.
"""
import os
import logging
import deepchem
from deepchem.feat import Featurizer
from deepchem.trans import Transformer
from deepchem.splits.splitters import Splitter
from deepchem.molnet.defaults import get_defaults
from typing import List, Tuple, Dict, Optional
logger = logging.getLogger(__name__)
DEFAULT_DIR = deepchem.utils.data_utils.get_data_dir()
MYDATASET_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/mydataset.tar.gz"
MYDATASET_CSV_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/mydataset.csv"
# dict of accepted featurizers for this dataset
# modify the returned dicts for your dataset
DEFAULT_FEATURIZERS = get_defaults("feat")
# Names of supported featurizers
mydataset_featurizers = ['CircularFingerprint', 'ConvMolFeaturizer']
DEFAULT_FEATURIZERS = {k: DEFAULT_FEATURIZERS[k] for k in mydataset_featurizers}
# dict of accepted transformers
DEFAULT_TRANSFORMERS = get_defaults("trans")
# dict of accepted splitters
DEFAULT_SPLITTERS = get_defaults("splits")
# names of supported splitters
mydataset_splitters = ['RandomSplitter', 'RandomStratifiedSplitter']
DEFAULT_SPLITTERS = {k: DEFAULT_SPLITTERS[k] for k in mydataset_splitters}
def load_mydataset(
featurizer: Featurizer = DEFAULT_FEATURIZERS['CircularFingerprint'],
transformers: List[Transformer] = [
DEFAULT_TRANSFORMERS['NormalizationTransformer']
],
splitter: Splitter = DEFAULT_SPLITTERS['RandomSplitter'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
featurizer_kwargs: Dict[str, object] = {},
splitter_kwargs: Dict[str, object] = {},
transformer_kwargs: Dict[str, Dict[str, object]] = {},
**kwargs) -> Tuple[List, Tuple, List]:
"""Load mydataset.
This is a template for adding a function to load a dataset from
MoleculeNet. Adjust the global variable URL strings, default parameters,
default featurizers, transformers, and splitters, and variable names as
needed. All available featurizers, transformers, and
splitters are in the `DEFAULTS_X` global variables.
If `reload = True` and `data_dir` (`save_dir`) is specified, the loader
will attempt to load the raw dataset (featurized dataset) from disk.
Otherwise, the dataset will be downloaded from the DeepChem AWS bucket.
The dataset will be featurized with `featurizer` and separated into
train/val/test sets according to `splitter`. Some transformers (e.g.
`NormalizationTransformer`) must be initialized with a dataset.
Set up kwargs to enable these transformations. Additional kwargs may
be given for specific featurizers, transformers, and splitters.
The load function must be modified with the appropriate DataLoaders
for all supported featurizers for your dataset.
Please refer to the MoleculeNet documentation for further information
https://deepchem.readthedocs.io/en/latest/moleculenet.html.
Parameters
----------
featurizer : allowed featurizers for this dataset
A featurizer that inherits from deepchem.feat.Featurizer.
transformers : List of allowed transformers for this dataset
A transformer that inherits from deepchem.trans.Transformer.
splitter : allowed splitters for this dataset
A splitter that inherits from deepchem.splits.splitters.Splitter.
reload : bool (default True)
Try to reload dataset from disk if already downloaded. Save to disk
after featurizing.
data_dir : str, optional (default None)
Path to datasets.
save_dir : str, optional (default None)
Path to featurized datasets.
featurizer_kwargs : dict
Specify parameters to featurizer, e.g. {"size": 1024}
splitter_kwargs : dict
Specify parameters to splitter, e.g. {"seed": 42}
transformer_kwargs : dict
Maps transformer names to constructor arguments, e.g.
{"BalancingTransformer": {"transform_x":True, "transform_y":False}}
**kwargs : additional optional arguments.
Returns
-------
tasks, datasets, transformers : tuple
tasks : list
Column names corresponding to machine learning target variables.
datasets : tuple
train, validation, test splits of data as
``deepchem.data.datasets.Dataset`` instances.
transformers : list
``deepchem.trans.transformers.Transformer`` instances applied
to dataset.
References
----------
MLA style references for this dataset. The example is like this.
Last, First et al. "Article title." Journal name, vol. #, no. #, year, pp. page range, DOI.
...[1] <NAME> et al. "MoleculeNet: a benchmark for molecular machine learning."
Chemical Science, vol. 9, 2018, pp. 513-530, 10.1039/c7sc02664a.
Examples
--------
>> import deepchem as dc
>> tasks, datasets, transformers = dc.molnet.load_tox21(reload=False)
>> train_dataset, val_dataset, test_dataset = datasets
>> n_tasks = len(tasks)
>> n_features = train_dataset.get_data_shape()[0]
>> model = dc.models.MultitaskClassifier(n_tasks, n_features)
"""
# Warning message about this template
raise ValueError("""
This is a template function and it doesn't do anything!
Use this function as a reference when implementing new
loaders for MoleculeNet datasets.
""")
# Featurize mydataset
logger.info("About to featurize mydataset.")
my_tasks = ["task1", "task2", "task3"] # machine learning targets
# Get DeepChem data directory if needed
if data_dir is None:
data_dir = DEFAULT_DIR
if save_dir is None:
save_dir = DEFAULT_DIR
# Check for str args to featurizer and splitter
if isinstance(featurizer, str):
featurizer = DEFAULT_FEATURIZERS[featurizer](**featurizer_kwargs)
elif issubclass(featurizer, Featurizer):
featurizer = featurizer(**featurizer_kwargs)
if isinstance(splitter, str):
splitter = DEFAULT_SPLITTERS[splitter]()
elif issubclass(splitter, Splitter):
splitter = splitter()
# Reload from disk
if reload:
featurizer_name = str(featurizer.__class__.__name__)
splitter_name = str(splitter.__class__.__name__)
save_folder = os.path.join(save_dir, "mydataset-featurized",
featurizer_name, splitter_name)
loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(
save_folder)
if loaded:
return my_tasks, all_dataset, transformers
# First type of supported featurizers
supported_featurizers = [] # type: List[Featurizer]
# If featurizer requires a non-CSV file format, load .tar.gz file
if featurizer in supported_featurizers:
dataset_file = os.path.join(data_dir, 'mydataset.filetype')
if not os.path.exists(dataset_file):
deepchem.utils.data_utils.download_url(url=MYDATASET_URL,
dest_dir=data_dir)
deepchem.utils.data_utils.untargz_file(
os.path.join(data_dir, 'mydataset.tar.gz'), data_dir)
# Changer loader to match featurizer and data file type
loader = deepchem.data.DataLoader(
tasks=my_tasks,
id_field="id", # column name holding sample identifier
featurizer=featurizer)
else: # only load CSV file
dataset_file = os.path.join(data_dir, "mydataset.csv")
if not os.path.exists(dataset_file):
deepchem.utils.data_utils.download_url(url=MYDATASET_CSV_URL,
dest_dir=data_dir)
loader = deepchem.data.CSVLoader(tasks=my_tasks,
smiles_field="smiles",
featurizer=featurizer)
# Featurize dataset
dataset = loader.create_dataset(dataset_file)
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset, **splitter_kwargs)
# Initialize transformers
transformers = [
DEFAULT_TRANSFORMERS[t](dataset=dataset, **transformer_kwargs[t])
if isinstance(t, str) else t(
dataset=dataset, **transformer_kwargs[str(t.__class__.__name__)])
for t in transformers
]
for transformer in transformers:
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
if reload: # save to disk
deepchem.utils.data_utils.save_dataset_to_disk(save_folder,
train_dataset,
valid_dataset,
test_dataset,
transformers)
return my_tasks, (train_dataset, valid_dataset, test_dataset), transformers
<file_sep>"""
Contains basic hyperparameter optimizations.
"""
import numpy as np
import os
import itertools
import tempfile
import collections
import logging
from functools import reduce
from operator import mul
from typing import Dict, List, Optional, Tuple
from deepchem.data import Dataset
from deepchem.trans import Transformer
from deepchem.models import Model
from deepchem.metrics import Metric
from deepchem.hyper.base_classes import HyperparamOpt
from deepchem.hyper.base_classes import _convert_hyperparam_dict_to_filename
logger = logging.getLogger(__name__)
class GridHyperparamOpt(HyperparamOpt):
"""
Provides simple grid hyperparameter search capabilities.
This class performs a grid hyperparameter search over the specified
hyperparameter space. This implementation is simple and simply does
a direct iteration over all possible hyperparameters and doesn't use
parallelization to speed up the search.
Examples
--------
This example shows the type of constructor function expected.
>>> import sklearn
>>> import deepchem as dc
>>> optimizer = dc.hyper.GridHyperparamOpt(lambda **p: dc.models.GraphConvModel(**p))
Here's a more sophisticated example that shows how to optimize only
some parameters of a model. In this case, we have some parameters we
want to optimize, and others which we don't. To handle this type of
search, we create a `model_builder` which hard codes some arguments
(in this case, `max_iter` is a hyperparameter which we don't want
to search over)
>>> import deepchem as dc
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression as LR
>>> # generating data
>>> X = np.arange(1, 11, 1).reshape(-1, 1)
>>> y = np.hstack((np.zeros(5), np.ones(5)))
>>> dataset = dc.data.NumpyDataset(X, y)
>>> # splitting dataset into train and test
>>> splitter = dc.splits.RandomSplitter()
>>> train_dataset, test_dataset = splitter.train_test_split(dataset)
>>> # metric to evaluate result of a set of parameters
>>> metric = dc.metrics.Metric(dc.metrics.accuracy_score)
>>> # defining `model_builder`
>>> def model_builder(**model_params):
... penalty = model_params['penalty']
... solver = model_params['solver']
... lr = LR(penalty=penalty, solver=solver, max_iter=100)
... return dc.models.SklearnModel(lr)
>>> # the parameters which are to be optimized
>>> params = {
... 'penalty': ['l1', 'l2'],
... 'solver': ['liblinear', 'saga']
... }
>>> # Creating optimizer and searching over hyperparameters
>>> optimizer = dc.hyper.GridHyperparamOpt(model_builder)
>>> best_model, best_hyperparams, all_results = \
optimizer.hyperparam_search(params, train_dataset, test_dataset, metric)
>>> best_hyperparams # the best hyperparameters
{'penalty': 'l2', 'solver': 'saga'}
"""
def hyperparam_search(
self,
params_dict: Dict,
train_dataset: Dataset,
valid_dataset: Dataset,
metric: Metric,
output_transformers: List[Transformer] = [],
nb_epoch: int = 10,
use_max: bool = True,
logfile: str = 'results.txt',
logdir: Optional[str] = None,
**kwargs,
) -> Tuple[Model, Dict, Dict]:
"""Perform hyperparams search according to params_dict.
Each key to hyperparams_dict is a model_param. The values should
be a list of potential values for that hyperparam.
Parameters
----------
params_dict: Dict
Maps hyperparameter names (strings) to lists of possible
parameter values.
train_dataset: Dataset
dataset used for training
valid_dataset: Dataset
dataset used for validation(optimization on valid scores)
metric: Metric
metric used for evaluation
output_transformers: list[Transformer]
Transformers for evaluation. This argument is needed since
`train_dataset` and `valid_dataset` may have been transformed
for learning and need the transform to be inverted before
the metric can be evaluated on a model.
nb_epoch: int, (default 10)
Specifies the number of training epochs during each iteration of optimization.
Not used by all model types.
use_max: bool, optional
If True, return the model with the highest score. Else return
model with the minimum score.
logdir: str, optional
The directory in which to store created models. If not set, will
use a temporary directory.
logfile: str, optional (default `results.txt`)
Name of logfile to write results to. If specified, this is must
be a valid file name. If not specified, results of hyperparameter
search will be written to `logdir/results.txt`.
Returns
-------
Tuple[`best_model`, `best_hyperparams`, `all_scores`]
`(best_model, best_hyperparams, all_scores)` where `best_model` is
an instance of `dc.model.Model`, `best_hyperparams` is a
dictionary of parameters, and `all_scores` is a dictionary mapping
string representations of hyperparameter sets to validation
scores.
Notes
-----
From DeepChem 2.6, the return type of `best_hyperparams` is a dictionary of
parameters rather than a tuple of parameters as it was previously. The new
changes have been made to standardize the behaviour across different
hyperparameter optimization techniques available in DeepChem.
"""
hyperparams = params_dict.keys()
hyperparam_vals = params_dict.values()
for hyperparam_list in params_dict.values():
assert isinstance(hyperparam_list, collections.abc.Iterable)
number_combinations = reduce(mul,
[len(vals) for vals in hyperparam_vals])
if use_max:
best_validation_score = -np.inf
else:
best_validation_score = np.inf
best_model = None
all_scores = {}
if logdir is not None:
if not os.path.exists(logdir):
os.makedirs(logdir, exist_ok=True)
log_file = os.path.join(logdir, logfile)
for ind, hyperparameter_tuple in enumerate(
itertools.product(*hyperparam_vals)):
model_params = {}
logger.info("Fitting model %d/%d" % (ind + 1, number_combinations))
# Construction dictionary mapping hyperparameter names to values
hyper_params = dict(zip(hyperparams, hyperparameter_tuple))
for hyperparam, hyperparam_val in zip(hyperparams,
hyperparameter_tuple):
model_params[hyperparam] = hyperparam_val
logger.info("hyperparameters: %s" % str(model_params))
hp_str = _convert_hyperparam_dict_to_filename(hyper_params)
if logdir is not None:
model_dir = os.path.join(logdir, hp_str)
logger.info("model_dir is %s" % model_dir)
try:
os.makedirs(model_dir)
except OSError:
if not os.path.isdir(model_dir):
logger.info(
"Error creating model_dir, using tempfile directory"
)
model_dir = tempfile.mkdtemp()
else:
model_dir = tempfile.mkdtemp()
model_params['model_dir'] = model_dir
model = self.model_builder(**model_params)
# mypy test throws error, so ignoring it in try
try:
model.fit(train_dataset, nb_epoch=nb_epoch) # type: ignore
# Not all models have nb_epoch
except TypeError:
model.fit(train_dataset)
try:
model.save()
# Some models autosave
except NotImplementedError:
pass
multitask_scores = model.evaluate(valid_dataset, [metric],
output_transformers)
valid_score = multitask_scores[metric.name]
all_scores[hp_str] = valid_score
if (use_max and valid_score >= best_validation_score) or (
not use_max and valid_score <= best_validation_score):
best_validation_score = valid_score
best_hyperparams = hyper_params
best_model = model
logger.info(
"Model %d/%d, Metric %s, Validation set %s: %f" %
(ind + 1, number_combinations, metric.name, ind, valid_score))
logger.info("\tbest_validation_score so far: %f" %
best_validation_score)
if best_model is None:
logger.info("No models trained correctly.")
# arbitrarily return last model
if logdir is not None:
with open(log_file, 'w+') as f:
f.write(
"No model trained correctly. Arbitary models returned")
best_model, best_hyperparams = model, hyperparameter_tuple
return best_model, best_hyperparams, all_scores
multitask_scores = best_model.evaluate(train_dataset, [metric],
output_transformers)
train_score = multitask_scores[metric.name]
logger.info("Best hyperparameters: %s" % str(best_hyperparams))
logger.info("best train score: %f" % train_score)
logger.info("best validation score: %f" % best_validation_score)
if logdir is not None:
with open(log_file, 'w+') as f:
f.write("Best Hyperparameters dictionary %s\n" %
str(best_hyperparams))
f.write("Best validation score %f\n" % best_validation_score)
f.write("Best train_score: %f\n" % train_score)
return best_model, best_hyperparams, all_scores
<file_sep>"""
PDBBind binding pocket dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import pandas as pd
import shutil
import time
import re
from rdkit import Chem
import deepchem as dc
def compute_binding_pocket_features(pocket_featurizer, ligand_featurizer,
pdb_subdir, pdb_code, threshold=.3):
"""Compute features for a given complex"""
protein_file = os.path.join(pdb_subdir, "%s_protein.pdb" % pdb_code)
ligand_file = os.path.join(pdb_subdir, "%s_ligand.sdf" % pdb_code)
ligand_mol2 = os.path.join(pdb_subdir, "%s_ligand.mol2" % pdb_code)
# Extract active site
active_site_box, active_site_atoms, active_site_coords = (
dc.dock.binding_pocket.extract_active_site(
protein_file, ligand_file))
# Featurize ligand
mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)
if mol is None:
return None, None
# Default for CircularFingerprint
n_ligand_features = 1024
ligand_features = ligand_featurizer.featurize([mol])
# Featurize pocket
finder = dc.dock.ConvexHullPocketFinder()
pockets, pocket_atoms, pocket_coords = finder.find_pockets(protein_file, ligand_file)
n_pockets = len(pockets)
n_pocket_features = dc.feat.BindingPocketFeaturizer.n_features
features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))
pocket_features = pocket_featurizer.featurize(
protein_file, pockets, pocket_atoms, pocket_coords)
# Note broadcast operation
features[:, :n_pocket_features] = pocket_features
features[:, n_pocket_features:] = ligand_features
# Compute labels for pockets
labels = np.zeros(n_pockets)
pocket_atoms[active_site_box] = active_site_atoms
for ind, pocket in enumerate(pockets):
overlap = dc.dock.binding_pocket.compute_overlap(
pocket_atoms, active_site_box, pocket)
if overlap > threshold:
labels[ind] = 1
else:
labels[ind] = 0
return features, labels
def load_pdbbind_labels(labels_file):
"""Loads pdbbind labels as dataframe"""
# Some complexes have labels but no PDB files. Filter these manually
missing_pdbs = ["1d2v", "1jou", "1s8j", "1cam", "4mlt", "4o7d"]
contents = []
with open(labels_file) as f:
for line in f:
if line.startswith("#"):
continue
else:
# Some of the ligand-names are of form (FMN ox). Use regex
# to merge into form (FMN-ox)
p = re.compile('\(([^\)\s]*) ([^\)\s]*)\)')
line = p.sub('(\\1-\\2)', line)
elts = line.split()
# Filter if missing PDB files
if elts[0] in missing_pdbs:
continue
contents.append(elts)
contents_df = pd.DataFrame(
contents,
columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki",
"ignore-this-field", "reference", "ligand name"))
return contents_df
def featurize_pdbbind_pockets(data_dir=None, subset="core"):
"""Featurizes pdbbind according to provided featurization"""
tasks = ["active-site"]
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, "%s_pockets" % (subset))
if os.path.exists(data_dir):
return dc.data.DiskDataset(data_dir), tasks
pdbbind_dir = os.path.join(current_dir, "../pdbbind/v2015")
# Load PDBBind dataset
if subset == "core":
labels_file = os.path.join(pdbbind_dir, "INDEX_core_data.2013")
elif subset == "refined":
labels_file = os.path.join(pdbbind_dir, "INDEX_refined_data.2015")
elif subset == "full":
labels_file = os.path.join(pdbbind_dir, "INDEX_general_PL_data.2015")
else:
raise ValueError("Only core, refined, and full subsets supported.")
print("About to load contents.")
if not os.path.exists(labels_file):
raise ValueError("Run ../pdbbind/get_pdbbind.sh to download dataset.")
contents_df = load_pdbbind_labels(labels_file)
ids = contents_df["PDB code"].values
y = np.array([float(val) for val in contents_df["-logKd/Ki"].values])
# Define featurizers
pocket_featurizer = dc.feat.BindingPocketFeaturizer()
ligand_featurizer = dc.feat.CircularFingerprint(size=1024)
# Featurize Dataset
all_features = []
all_labels = []
missing_pdbs = []
all_ids = []
time1 = time.time()
for ind, pdb_code in enumerate(ids):
print("Processing complex %d, %s" % (ind, str(pdb_code)))
pdb_subdir = os.path.join(pdbbind_dir, pdb_code)
if not os.path.exists(pdb_subdir):
print("%s is missing!" % pdb_subdir)
missing_pdbs.append(pdb_subdir)
continue
features, labels = compute_binding_pocket_features(
pocket_featurizer, ligand_featurizer, pdb_subdir, pdb_code)
if features is None:
print("Featurization failed!")
continue
all_features.append(features)
all_labels.append(labels)
ids = np.array(["%s%d" % (pdb_code, i) for i in range(len(labels))])
all_ids.append(ids)
time2 = time.time()
print("TIMING: PDBBind Pocket Featurization took %0.3f s" % (time2-time1))
X = np.vstack(all_features)
y = np.concatenate(all_labels)
w = np.ones_like(y)
ids = np.concatenate(all_ids)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids, data_dir=data_dir)
return dataset, tasks
def load_pdbbind_pockets(split="index", subset="core"):
"""Load PDBBind datasets. Does not do train/test split"""
dataset, tasks = featurize_pdbbind_pockets(subset=subset)
splitters = {'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter()}
splitter = splitters[split]
########################################################### DEBUG
print("dataset.X.shape")
print(dataset.X.shape)
print("dataset.y.shape")
print(dataset.y.shape)
print("dataset.w.shape")
print(dataset.w.shape)
print("dataset.ids.shape")
print(dataset.ids.shape)
########################################################### DEBUG
train, valid, test = splitter.train_valid_test_split(dataset)
transformers = []
for transformer in transformers:
train = transformer.transform(train)
for transformer in transformers:
valid = transformer.transform(valid)
for transformer in transformers:
test = transformer.transform(test)
return tasks, (train, valid, test), transformers
<file_sep>import copy
import random
import shutil
import numpy as np
import tensorflow as tf
import deepchem as dc
import deepchem.rl.envs.tictactoe
from deepchem.models.tensorgraph.layers import Flatten, Dense, SoftMax, \
BatchNorm, Squeeze
from deepchem.models.optimizers import Adam
class TicTacToePolicy(dc.rl.Policy):
def create_layers(self, state, **kwargs):
d1 = Flatten(in_layers=state)
d2 = Dense(
in_layers=[d1],
activation_fn=tf.nn.relu,
normalizer_fn=tf.nn.l2_normalize,
normalizer_params={"dim": 1},
out_channels=64)
d3 = Dense(
in_layers=[d2],
activation_fn=tf.nn.relu,
normalizer_fn=tf.nn.l2_normalize,
normalizer_params={"dim": 1},
out_channels=32)
d4 = Dense(
in_layers=[d3],
activation_fn=tf.nn.relu,
normalizer_fn=tf.nn.l2_normalize,
normalizer_params={"dim": 1},
out_channels=16)
d4 = BatchNorm(in_layers=[d4])
d5 = Dense(in_layers=[d4], activation_fn=None, out_channels=9)
value = Dense(in_layers=[d4], activation_fn=None, out_channels=1)
value = Squeeze(squeeze_dims=1, in_layers=[value])
probs = SoftMax(in_layers=[d5])
return {'action_prob': probs, 'value': value}
def eval_tic_tac_toe(value_weight,
num_epoch_rounds=1,
games=10**4,
rollouts=10**5):
"""
Returns the average reward over 1k games after 100k rollouts
:param value_weight:
:return:
"""
env = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
policy = TicTacToePolicy()
model_dir = "/tmp/tictactoe"
try:
shutil.rmtree(model_dir)
except:
pass
avg_rewards = []
for j in range(num_epoch_rounds):
a3c = dc.rl.A3C(
env,
policy,
entropy_weight=0.01,
value_weight=value_weight,
model_dir=model_dir,
optimizer=Adam(learning_rate=0.001))
try:
a3c.restore()
except:
print("unable to restore")
pass
a3c.fit(rollouts)
rewards = []
for i in range(games):
env.reset()
reward = -float('inf')
while not env._terminated:
action = a3c.select_action(env._state)
reward = env.step(action)
rewards.append(reward)
avg_rewards.append({(j + 1) * rollouts: np.mean(rewards)})
return avg_rewards
def main():
value_weight = 6.0
score = eval_tic_tac_toe(value_weight, num_epoch_rounds=3)
print(score)
if __name__ == "__main__":
main()
<file_sep>from __future__ import division
import random
random.seed(1)
import inspect
from collections import namedtuple, defaultdict, OrderedDict
import numpy as np
np.random.seed(1)
from sklearn.model_selection import train_test_split
#from simdna import simulations
import simulations
from simdna.synthetic import StringEmbeddable
from utils import get_motif_scores, one_hot_encode
from models import SequenceDNN
from dragonn.plot import add_letters_to_axis, plot_motif
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def SequenceDNN_learning_curve(dnn):
if dnn.valid_metrics is not None:
train_losses, valid_losses = [
np.array([epoch_metrics['Loss']
for epoch_metrics in metrics])
for metrics in (dnn.train_metrics, dnn.valid_metrics)
]
min_loss_indx = min(enumerate(valid_losses), key=lambda x: x[1])[0]
f = plt.figure(figsize=(10, 4))
ax = f.add_subplot(1, 1, 1)
ax.plot(range(len(train_losses)), train_losses, 'b', label='Training', lw=4)
ax.plot(
range(len(train_losses)), valid_losses, 'r', label='Validation', lw=4)
ax.plot([min_loss_indx, min_loss_indx], [0, 1.0], 'k--', label='Early Stop')
ax.legend(loc="upper right")
ax.set_ylabel("Loss")
ax.set_ylim((0.0, 1.0))
ax.set_xlabel("Epoch")
plt.show()
else:
print("learning curve can only be obtained after training!")
def test_SequenceDNN(dnn, simulation_data):
print("Test performance:")
print(dnn.test(simulation_data.X_test, simulation_data.y_test))
def plot_motifs(simulation_data):
for motif_name in simulation_data.motif_names:
plot_motif(motif_name, figsize=(10, 4), ylab=motif_name)
def plot_sequence_filters(dnn):
fig = plt.figure(figsize=(15, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
conv_filters = dnn.get_sequence_filters()
num_plots_per_axis = int(len(conv_filters)**0.5) + 1
for i, conv_filter in enumerate(conv_filters):
ax = fig.add_subplot(num_plots_per_axis, num_plots_per_axis, i + 1)
add_letters_to_axis(ax, conv_filter.T)
ax.axis("off")
ax.set_title("Filter %s" % (str(i + 1)))
def plot_SequenceDNN_layer_outputs(dnn, simulation_data):
# define layer out functions
import theano
get_conv_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[0].get_output(train=False),
allow_input_downcast=True)
get_conv_relu_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[1].get_output(train=False),
allow_input_downcast=True)
get_maxpool_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[-4].get_output(train=False),
allow_input_downcast=True)
# get layer outputs for a positive simulation example
pos_indx = np.where(simulation_data.y_valid == 1)[0][0]
pos_X = simulation_data.X_valid[pos_indx:(pos_indx + 1)]
conv_outputs = get_conv_output(pos_X).squeeze()
conv_relu_outputs = get_conv_relu_output(pos_X).squeeze()
maxpool_outputs = get_maxpool_output(pos_X).squeeze()
# plot layer outputs
fig = plt.figure(figsize=(15, 12))
ax1 = fig.add_subplot(3, 1, 3)
heatmap = ax1.imshow(
conv_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax1.set_ylabel("Convolutional Filters")
ax1.set_xlabel("Position")
ax1.get_yaxis().set_ticks([])
ax1.get_xaxis().set_ticks([])
ax1.set_title("SequenceDNN outputs from convolutional layer.\t\
Locations of motif sites are highlighted in grey.")
ax2 = fig.add_subplot(3, 1, 2)
heatmap = ax2.imshow(
conv_relu_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax2.set_ylabel("Convolutional Filters")
ax2.get_yaxis().set_ticks([])
ax2.get_xaxis().set_ticks([])
ax2.set_title("Convolutional outputs after ReLU transformation.\t\
Locations of motif sites are highlighted in grey.")
ax3 = fig.add_subplot(3, 1, 1)
heatmap = ax3.imshow(
maxpool_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax3.set_title("DNN outputs after max pooling")
ax3.set_ylabel("Convolutional Filters")
ax3.get_yaxis().set_ticks([])
ax3.get_xaxis().set_ticks([])
# highlight motif sites
motif_scores = get_motif_scores(pos_X, simulation_data.motif_names)
motif_sites = [np.argmax(motif_scores[0, i, :]) for i in [0, 1]]
for motif_site in motif_sites:
conv_output_start = motif_site - max(dnn.conv_width - 10, 0)
conv_output_stop = motif_site + max(dnn.conv_width - 10, 0)
ax1.axvspan(conv_output_start, conv_output_stop, color='grey', alpha=0.5)
ax2.axvspan(conv_output_start, conv_output_stop, color='grey', alpha=0.5)
def interpret_SequenceDNN_filters(dnn, simulation_data):
print("Plotting simulation motifs...")
plot_motifs(simulation_data)
plt.show()
print("Visualizing convolutional sequence filters in SequenceDNN...")
plot_sequence_filters(dnn)
plt.show()
def interpret_data_with_SequenceDNN(dnn, simulation_data):
# get a positive and a negative example from the simulation data
pos_indx = np.flatnonzero(simulation_data.y_valid == 1)[2]
neg_indx = np.flatnonzero(simulation_data.y_valid == 0)[2]
pos_X = simulation_data.X_valid[pos_indx:pos_indx + 1]
neg_X = simulation_data.X_valid[neg_indx:neg_indx + 1]
# get motif scores, ISM scores, and DeepLIFT scores
scores_dict = defaultdict(OrderedDict)
scores_dict['Positive']['Motif Scores'] = get_motif_scores(
pos_X, simulation_data.motif_names)
scores_dict['Positive']['ISM Scores'] = dnn.in_silico_mutagenesis(pos_X).max(
axis=-2)
scores_dict['Positive']['DeepLIFT Scores'] = dnn.deeplift(pos_X).max(axis=-2)
scores_dict['Negative']['Motif Scores'] = get_motif_scores(
neg_X, simulation_data.motif_names)
scores_dict['Negative']['ISM Scores'] = dnn.in_silico_mutagenesis(neg_X).max(
axis=-2)
scores_dict['Negative']['DeepLIFT Scores'] = dnn.deeplift(neg_X).max(axis=-2)
# get motif site locations
motif_sites = {
key: [
embedded_motif.startPos + len(embedded_motif.what.string) // 2
for embedded_motif in (next(
embedded_motif
for embedded_motif in simulation_data.valid_embeddings[index]
if isinstance(embedded_motif.what, StringEmbeddable) and
motif_name in embedded_motif.what.stringDescription)
for motif_name in simulation_data.motif_names)
]
for key, index in (('Positive', pos_indx), ('Negative', neg_indx))
}
# organize legends
motif_label_dict = {}
motif_label_dict['Motif Scores'] = simulation_data.motif_names
if len(simulation_data.motif_names) == dnn.num_tasks:
motif_label_dict['ISM Scores'] = simulation_data.motif_names
else:
motif_label_dict['ISM Scores'] = ['_'.join(simulation_data.motif_names)]
motif_label_dict['DeepLIFT Scores'] = motif_label_dict['ISM Scores']
# plot scores and highlight motif site locations
seq_length = pos_X.shape[-1]
plots_per_row = 2
plots_per_column = 3
ylim_dict = {
'Motif Scores': (-80, 30),
'ISM Scores': (-1.5, 3.0),
'DeepLIFT Scores': (-1.5, 3.0)
}
motif_colors = ['b', 'r', 'c', 'm', 'g', 'k', 'y']
font_size = 12
num_x_ticks = 5
highlight_width = 5
motif_labels_cache = []
f = plt.figure(figsize=(10, 12))
f.subplots_adjust(hspace=0.15, wspace=0.15)
f.set_tight_layout(True)
for j, key in enumerate(['Positive', 'Negative']):
for i, (score_type, scores) in enumerate(scores_dict[key].items()):
ax = f.add_subplot(plots_per_column, plots_per_row,
plots_per_row * i + j + 1)
ax.set_ylim(ylim_dict[score_type])
ax.set_xlim((0, seq_length))
ax.set_frame_on(False)
if j == 0: # put y axis and ticks only on left side
xmin, xmax = ax.get_xaxis().get_view_interval()
ymin, ymax = ax.get_yaxis().get_view_interval()
ax.add_artist(
Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=2))
ax.get_yaxis().tick_left()
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size / 1.5)
ax.set_ylabel(score_type)
if j > 0: # remove y axes
ax.get_yaxis().set_visible(False)
if i < (plots_per_column - 1): # remove x axes
ax.get_xaxis().set_visible(False)
if i == (plots_per_column - 1): # set x axis and ticks on bottom
ax.set_xticks(seq_length / num_x_ticks * (np.arange(num_x_ticks + 1)))
xmin, xmax = ax.get_xaxis().get_view_interval()
ymin, ymax = ax.get_yaxis().get_view_interval()
ax.add_artist(
Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=2))
ax.get_xaxis().tick_bottom()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size / 1.5)
ax.set_xlabel("Position")
if j > 0 and i < (plots_per_column - 1): # remove all axes
ax.axis('off')
add_legend = False
for _i, motif_label in enumerate(motif_label_dict[score_type]):
if score_type == 'Motif Scores':
scores_to_plot = scores[0, _i, :]
else:
scores_to_plot = scores[0, 0, 0, :]
if motif_label not in motif_labels_cache:
motif_labels_cache.append(motif_label)
add_legend = True
motif_color = motif_colors[motif_labels_cache.index(motif_label)]
ax.plot(scores_to_plot, label=motif_label, c=motif_color)
if add_legend:
leg = ax.legend(
loc=[0, 0.85],
frameon=False,
fontsize=font_size,
ncol=3,
handlelength=-0.5)
for legobj in leg.legendHandles:
legobj.set_color('w')
for _j, text in enumerate(leg.get_texts()):
text_color = motif_colors[motif_labels_cache.index(
motif_label_dict[score_type][_j])]
text.set_color(text_color)
for motif_site in motif_sites[key]:
ax.axvspan(
motif_site - highlight_width,
motif_site + highlight_width,
color='grey',
alpha=0.1)
<file_sep>import deepchem as dc
import numpy as np
import os
def test_reshard_with_X():
"""Test resharding on a simple example"""
X = np.random.rand(100, 10)
dataset = dc.data.DiskDataset.from_numpy(X)
assert dataset.get_number_shards() == 1
dataset.reshard(shard_size=10)
assert (dataset.X == X).all()
assert dataset.get_number_shards() == 10
def test_reshard_with_X_y():
"""Test resharding on a simple example"""
X = np.random.rand(100, 10)
y = np.random.rand(100,)
dataset = dc.data.DiskDataset.from_numpy(X, y)
assert dataset.get_number_shards() == 1
dataset.reshard(shard_size=10)
assert (dataset.X == X).all()
# This is necessary since from_numpy adds in shape information
assert (dataset.y.flatten() == y).all()
assert dataset.get_number_shards() == 10
def test_reshard_with_X_y_generative():
"""Test resharding for a hypothetical generative dataset."""
X = np.random.rand(100, 10, 10)
y = np.random.rand(100, 10, 10)
dataset = dc.data.DiskDataset.from_numpy(X, y)
assert (dataset.X == X).all()
assert (dataset.y == y).all()
assert dataset.get_number_shards() == 1
dataset.reshard(shard_size=10)
assert (dataset.X == X).all()
assert (dataset.y == y).all()
assert dataset.get_number_shards() == 10
def test_reshard_with_X_y_w():
"""Test resharding on a simple example"""
X = np.random.rand(100, 10)
y = np.random.rand(100,)
w = np.ones_like(y)
dataset = dc.data.DiskDataset.from_numpy(X, y, w)
assert dataset.get_number_shards() == 1
dataset.reshard(shard_size=10)
assert (dataset.X == X).all()
# This is necessary since from_numpy adds in shape information
assert (dataset.y.flatten() == y).all()
assert (dataset.w.flatten() == w).all()
assert dataset.get_number_shards() == 10
def test_reshard_with_X_y_w_ids():
"""Test resharding on a simple example"""
X = np.random.rand(100, 10)
y = np.random.rand(100,)
w = np.ones_like(y)
ids = np.arange(100)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
assert dataset.get_number_shards() == 1
dataset.reshard(shard_size=10)
assert (dataset.X == X).all()
# This is necessary since from_numpy adds in shape information
assert (dataset.y.flatten() == y).all()
assert (dataset.w.flatten() == w).all()
assert (dataset.ids == ids).all()
assert dataset.get_number_shards() == 10
def test_reshard_nolabels_smiles():
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "reaction_smiles.csv")
featurizer = dc.feat.DummyFeaturizer()
loader = dc.data.CSVLoader(tasks=[],
feature_field="reactions",
featurizer=featurizer)
dataset = loader.create_dataset(data_dir)
dataset.reshard(shard_size=10)
assert dataset.get_number_shards() == 1
def test_reshard_50sequences():
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "50_sequences.csv")
featurizer = dc.feat.DummyFeaturizer()
loader = dc.data.CSVLoader(tasks=[],
feature_field="SEQUENCE",
featurizer=featurizer)
dataset = loader.create_dataset(data_dir)
dataset.reshard(shard_size=10)
assert dataset.get_number_shards() == 5
<file_sep>import deepchem as dc
import numpy as np
import pytest
try:
import torch
import deepchem.models.torch_models.layers as torch_layers
has_torch = True
except:
has_torch = False
@pytest.mark.torch
def test_weave_layer():
"""Test invoking the torch equivalent of WeaveLayer."""
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
from rdkit import Chem
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
weave = torch_layers.WeaveLayer()
atom_feat = []
pair_feat = []
atom_to_pair = []
pair_split = []
start = 0
n_pair_feat = 14
for im, mol in enumerate(mols):
n_atoms = mol.get_num_atoms()
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(
np.transpose(np.array([C1.flatten() + start,
C0.flatten() + start])))
# number of pairs for each atom
pair_split.extend(C1.flatten() + start)
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(),
(n_atoms * n_atoms, n_pair_feat)))
inputs = [
np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),
np.concatenate(pair_feat, axis=0),
np.array(pair_split),
np.concatenate(atom_to_pair, axis=0)
]
torch.set_printoptions(precision=8)
# Assigning tensorflow equivalent weights to torch layer
weave.W_AA = torch.from_numpy(
np.load("deepchem/models/tests/assets/W_AA.npy"))
weave.W_PA = torch.from_numpy(
np.load("deepchem/models/tests/assets/W_PA.npy"))
weave.W_A = torch.from_numpy(
np.load("deepchem/models/tests/assets/W_A.npy"))
if weave.update_pair:
weave.W_AP = torch.from_numpy(
np.load("deepchem/models/tests/assets/W_AP.npy"))
weave.W_PP = torch.from_numpy(
np.load("deepchem/models/tests/assets/W_PP.npy"))
weave.W_P = torch.from_numpy(
np.load("deepchem/models/tests/assets/W_P.npy"))
# Outputs should be [A, P]
outputs = weave(inputs)
assert len(outputs) == 2
assert np.allclose(outputs[0].detach().numpy(),
np.load("deepchem/models/tests/assets/A.npy"),
atol=1e-4)
assert np.allclose(outputs[1].detach().numpy(),
np.load("deepchem/models/tests/assets/P.npy"),
atol=1e-4)
<file_sep>"""Utility functions for working with PyTorch."""
import torch
from typing import Callable, Union, List
def get_activation(fn: Union[Callable, str]):
"""Get a PyTorch activation function, specified either directly or as a string.
This function simplifies allowing users to specify activation functions by name.
If a function is provided, it is simply returned unchanged. If a string is provided,
the corresponding function in torch.nn.functional is returned.
"""
if isinstance(fn, str):
return getattr(torch.nn.functional, fn)
return fn
def unsorted_segment_sum(data: torch.Tensor, segment_ids: torch.Tensor,
num_segments: int) -> torch.Tensor:
"""Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum.
Parameters
----------
data: torch.Tensor
A tensor whose segments are to be summed.
segment_ids: torch.Tensor
The segment indices tensor.
num_segments: int
The number of segments.
Returns
-------
tensor: torch.Tensor
Examples
--------
>>> segment_ids = torch.Tensor([0, 1, 0]).to(torch.int64)
>>> data = torch.Tensor([[1, 2, 3, 4], [5, 6, 7, 8], [4, 3, 2, 1]])
>>> num_segments = 2
>>> result = unsorted_segment_sum(data=data,
segment_ids=segment_ids,
num_segments=num_segments)
>>> data.shape[0]
3
>>> segment_ids.shape[0]
3
>>> len(segment_ids.shape)
1
>>> result
tensor([[5., 5., 5., 5.],
[5., 6., 7., 8.]])
"""
if len(segment_ids.shape) != 1:
raise AssertionError("segment_ids have be a 1-D tensor")
if data.shape[0] != segment_ids.shape[0]:
raise AssertionError(
"segment_ids should be the same size as dimension 0 of input.")
s = torch.prod(torch.tensor(data.shape[1:])).long()
segment_ids = segment_ids.repeat_interleave(s).view(segment_ids.shape[0],
*data.shape[1:])
# data.shape and segment_ids.shape should be equal
assert data.shape == segment_ids.shape
shape: List[int] = [num_segments] + list(data.shape[1:])
tensor: torch.Tensor = torch.zeros(*shape).scatter_add(
0, segment_ids, data.float())
tensor = tensor.type(data.dtype)
return tensor
def segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:
""" This function computes the sum of values along segments within a tensor. It is useful when you have a tensor with segment IDs and you want to compute the sum of values for each segment.
This function is analogous to tf.segment_sum. (https://www.tensorflow.org/api_docs/python/tf/math/segment_sum).
Parameters
----------
data: torch.Tensor
A pytorch tensor containing the values to be summed. It can have any shape, but its rank (number of dimensions) should be at least 1.
segment_ids: torch.Tensor
A 1-D tensor containing the indices for the segmentation. The segments can be any non-negative integer values, but they must be sorted in non-decreasing order.
Returns
-------
out_tensor: torch.Tensor
Tensor with the same shape as data, where each value corresponds to the sum of values within the corresponding segment.
Examples
--------
>>> data = torch.Tensor([[1, 2, 3, 4], [4, 3, 2, 1], [5, 6, 7, 8]])
>>> segment_ids = torch.Tensor([0, 0, 1]).to(torch.int64)
>>> result = segment_sum(data=data, segment_ids=segment_ids)
>>> data.shape[0]
3
>>> segment_ids.shape[0]
3
>>> len(segment_ids.shape)
1
>>> result
tensor([[5., 5., 5., 5.],
[5., 6., 7., 8.]])
"""
if not all(segment_ids[i] <= segment_ids[i + 1]
for i in range(len(segment_ids) - 1)):
raise AssertionError("elements of segment_ids must be sorted")
if len(segment_ids.shape) != 1:
raise AssertionError("segment_ids have be a 1-D tensor")
if data.shape[0] != segment_ids.shape[0]:
raise AssertionError(
"segment_ids should be the same size as dimension 0 of input.")
num_segments = len(torch.unique(segment_ids))
out_tensor = unsorted_segment_sum(data, segment_ids, num_segments)
return out_tensor
<file_sep>from __future__ import print_function
import deepchem as dc
import numpy as np
import tensorflow as tf
from sklearn.metrics import accuracy_score
# Load the data.
tasks, datasets, transformers = dc.molnet.load_toxcast()
(train_dataset, valid_dataset, test_dataset) = datasets
x = train_dataset.X
y = train_dataset.y
w = train_dataset.w
n_features = x.shape[1]
n_molecules = y.shape[0]
n_tasks = y.shape[1]
# Toxcast has data on 6874 molecules and 617 tasks. However, the data is very
# sparse: most tasks do not include data for most molecules. It also is very
# unbalanced: there are many more negatives than positives. For each task,
# create a list of alternating positives and negatives so each batch will have
# equal numbers of both.
task_molecules = []
for i in range(n_tasks):
positives = [j for j in range(n_molecules) if w[j, i] > 0 and y[j, i] == 1]
negatives = [j for j in range(n_molecules) if w[j, i] > 0 and y[j, i] == 0]
np.random.shuffle(positives)
np.random.shuffle(negatives)
mols = sum((list(m) for m in zip(positives, negatives)), [])
task_molecules.append(mols)
# Define a MetaLearner describing the learning problem.
class ToxcastLearner(dc.metalearning.MetaLearner):
def __init__(self):
self.n_training_tasks = int(n_tasks * 0.8)
self.batch_size = 10
self.batch_start = [0] * n_tasks
self.set_task_index(0)
self.w1 = tf.Variable(
np.random.normal(size=[n_features, 1000], scale=0.02), dtype=tf.float32)
self.w2 = tf.Variable(
np.random.normal(size=[1000, 1], scale=0.02), dtype=tf.float32)
self.b1 = tf.Variable(np.ones(1000), dtype=tf.float32)
self.b2 = tf.Variable(np.zeros(1), dtype=tf.float32)
def compute_model(self, inputs, variables, training):
x, y = [tf.cast(i, tf.float32) for i in inputs]
w1, w2, b1, b2 = variables
dense1 = tf.nn.relu(tf.matmul(x, w1) + b1)
logits = tf.matmul(dense1, w2) + b2
output = tf.sigmoid(logits)
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
return loss, [output]
@property
def variables(self):
return [self.w1, self.w2, self.b1, self.b2]
def set_task_index(self, index):
self.task = index
def select_task(self):
self.set_task_index((self.task + 1) % self.n_training_tasks)
def get_batch(self):
task = self.task
start = self.batch_start[task]
mols = task_molecules[task][start:start + self.batch_size]
labels = np.zeros((self.batch_size, 1))
labels[np.arange(self.batch_size), 0] = y[mols, task]
if start + 2 * self.batch_size > len(task_molecules[task]):
self.batch_start[task] = 0
else:
self.batch_start[task] += self.batch_size
return [x[mols, :], labels]
# Run meta-learning on 80% of the tasks.
n_epochs = 20
learner = ToxcastLearner()
maml = dc.metalearning.MAML(learner)
steps = n_epochs * learner.n_training_tasks // maml.meta_batch_size
maml.fit(steps)
# Validate on the remaining tasks.
def compute_scores(optimize):
maml.restore()
y_true = []
y_pred = []
losses = []
for task in range(learner.n_training_tasks, n_tasks):
learner.set_task_index(task)
if optimize:
maml.train_on_current_task(restore=True)
inputs = learner.get_batch()
loss, prediction = maml.predict_on_batch(inputs)
y_true.append(inputs[1])
y_pred.append(prediction[0][:, 0])
losses.append(loss)
y_true = np.concatenate(y_true)
y_pred = np.concatenate(y_pred)
print()
print('Cross entropy loss:', np.mean(losses))
print('Prediction accuracy:', accuracy_score(y_true, y_pred > 0.5))
print('ROC AUC:', dc.metrics.roc_auc_score(y_true, y_pred))
print()
print('Before fine tuning:')
compute_scores(False)
print('After fine tuning:')
compute_scores(True)
<file_sep># Harvard Organic Photovoltaic Dataset
The HOPV datasets consist of the "Harvard Organic
Photovoltaic Dataset. This dataset includes 350 small
molecules and polymers that were utilized as p-type materials
in OPVs. Experimental properties include: HOMO [a.u.], LUMO
[a.u.], Electrochemical gap [a.u.], Optical gap [a.u.], Power
conversion efficiency [%], Open circuit potential [V], Short
circuit current density [mA/cm^2], and fill factor [%].
Theoretical calculations in the original dataset have been
removed (for now).
Lopez, <NAME>., et al. "The Harvard organic photovoltaic dataset." Scientific data 3.1 (2016): 1-7.
In this example, we train models on the HOPV dataset to predict these properties.
<file_sep>"""
Genomic data handling Iterable.
"""
from typing import Dict, Iterable, Union
import numpy as np
def seq_one_hot_encode(sequences, letters: str = 'ATCGN') -> np.ndarray:
"""One hot encodes list of genomic sequences.
Sequences encoded have shape (N_sequences, N_letters, sequence_length, 1).
These sequences will be processed as images with one color channel.
Parameters
----------
sequences: np.ndarray or Iterator[Bio.SeqRecord]
Iterable object of genetic sequences
letters: str, optional (default "ATCGN")
String with the set of possible letters in the sequences.
Raises
------
ValueError:
If sequences are of different lengths.
Returns
-------
np.ndarray
A numpy array of shape `(N_sequences, N_letters, sequence_length, 1)`.
"""
# The label encoder is given characters for ACGTN
letter_encoder = {l: i for i, l in enumerate(letters)}
alphabet_length = len(letter_encoder)
# Peak at the first sequence to get the length of the sequence.
if isinstance(sequences, np.ndarray):
first_seq = sequences[0]
tail_seq = sequences[1:]
else:
first_seq = next(sequences)
tail_seq = sequences
sequence_length = len(first_seq)
seqs = []
seqs.append(
_seq_to_encoded(first_seq, letter_encoder, alphabet_length,
sequence_length))
for other_seq in tail_seq:
if len(other_seq) != sequence_length:
raise ValueError("The genetic sequences must have a same length")
seqs.append(
_seq_to_encoded(other_seq, letter_encoder, alphabet_length,
sequence_length))
return np.expand_dims(np.array(seqs), -1)
def _seq_to_encoded(seq: Union[str, Iterable[str]], letter_encoder: Dict[str,
int],
alphabet_length: int, sequence_length: int) -> np.ndarray:
"""One hot encodes a genomic sequence.
Sequences encoded have shape (N_sequences, N_letters, sequence_length, 1).
These sequences will be processed as images with one color channel.
Parameters
----------
seq: str or Bio.SeqRecord
a genetic sequence
letter_encoder: Dict[str, int]
The keys are letters and the values are unique int values (like 0, 1, 2...).
alphabet_length: int
Length with the set of possible letters in the sequences.
sequence_length: int
Length with a genetic sequence
Returns
-------
encoded_seq: np.ndarray
A numpy array of shape `(N_letters, sequence_length)`.
"""
encoded_seq = np.zeros((alphabet_length, sequence_length))
seq_ints = [letter_encoder[s] for s in seq]
encoded_seq[seq_ints, np.arange(sequence_length)] = 1
return encoded_seq
def encode_bio_sequence(fname: str,
file_type: str = "fasta",
letters: str = "ATCGN") -> np.ndarray:
"""
Loads a sequence file and returns an array of one-hot sequences.
Parameters
----------
fname: str
Filename of fasta file.
file_type: str, optional (default "fasta")
The type of file encoding to process, e.g. fasta or fastq, this
is passed to Biopython.SeqIO.parse.
letters: str, optional (default "ATCGN")
The set of letters that the sequences consist of, e.g. ATCG.
Returns
-------
np.ndarray
A numpy array of shape `(N_sequences, N_letters, sequence_length, 1)`.
Notes
-----
This function requires BioPython to be installed.
"""
try:
from Bio import SeqIO
except ModuleNotFoundError:
raise ImportError("This function requires BioPython to be installed.")
sequences = SeqIO.parse(fname, file_type)
return seq_one_hot_encode(sequences, letters)
<file_sep>import numpy as np
import deepchem as dc
def test_coulomb_fit_transformer():
"""Test coulomb fit transformer on singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformer = dc.trans.CoulombFitTransformer(dataset)
X_t = fit_transformer.X_transform(dataset.X)
assert len(X_t.shape) == 2
<file_sep>"""
Tests to make sure deepchem models can fit models on easy datasets.
"""
import tempfile
import unittest
import numpy as np
from sklearn.datasets import load_diabetes, load_digits
from sklearn.model_selection import train_test_split
try:
import xgboost
import lightgbm
has_xgboost_and_lightgbm = True
except:
has_xgboost_and_lightgbm = False
import deepchem as dc
@unittest.skipIf(not has_xgboost_and_lightgbm,
'xgboost or lightgbm are not installed')
def test_singletask_regression_with_xgboost():
np.random.seed(123)
# prepare dataset
dataset = load_diabetes()
X, y = dataset.data, dataset.target
frac_train = .7
X_train, X_test, y_train, y_test = \
train_test_split(X, y, train_size=frac_train)
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
# global setting
regression_metric = dc.metrics.Metric(dc.metrics.mae_score)
params = {'early_stopping_rounds': 25}
# xgboost test
xgb_model = xgboost.XGBRegressor(n_estimators=50,
random_state=123,
verbose=False)
model = dc.models.GBDTModel(xgb_model, **params)
# fit trained model
model.fit(train_dataset)
model.save()
# eval model on test
scores = model.evaluate(test_dataset, [regression_metric])
assert scores[regression_metric.name] < 55
@unittest.skipIf(not has_xgboost_and_lightgbm,
'xgboost or lightgbm are not installed')
def test_singletask_regression_with_lightgbm():
np.random.seed(123)
# prepare dataset
dataset = load_diabetes()
X, y = dataset.data, dataset.target
frac_train = .7
X_train, X_test, y_train, y_test = \
train_test_split(X, y, train_size=frac_train)
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
# global setting
regression_metric = dc.metrics.Metric(dc.metrics.mae_score)
params = {'early_stopping_rounds': 25}
# lightgbm test
lgbm_model = lightgbm.LGBMRegressor(n_estimators=50,
random_state=123,
silent=True)
model = dc.models.GBDTModel(lgbm_model, **params)
# fit trained model
model.fit(train_dataset)
model.save()
# eval model on test
scores = model.evaluate(test_dataset, [regression_metric])
assert scores[regression_metric.name] < 55
@unittest.skipIf(not has_xgboost_and_lightgbm,
'xgboost or lightgbm are not installed')
def test_multitask_regression_with_xgboost():
np.random.seed(123)
# prepare dataset
n_tasks = 4
tasks = range(n_tasks)
dataset = load_diabetes()
X, y = dataset.data, dataset.target
y = np.reshape(y, (len(y), 1))
y = np.hstack([y] * n_tasks)
frac_train = .7
X_train, X_test, y_train, y_test = \
train_test_split(X, y, train_size=frac_train)
train_dataset = dc.data.DiskDataset.from_numpy(X_train, y_train)
test_dataset = dc.data.DiskDataset.from_numpy(X_test, y_test)
# global setting
regression_metric = dc.metrics.Metric(dc.metrics.mae_score)
params = {'early_stopping_rounds': 25}
# xgboost test
def xgboost_builder(model_dir):
xgb_model = xgboost.XGBRegressor(n_estimators=50,
seed=123,
verbose=False)
return dc.models.GBDTModel(xgb_model, model_dir, **params)
model = dc.models.SingletaskToMultitask(tasks, xgboost_builder)
# fit trained model
model.fit(train_dataset)
model.save()
# eval model on test
scores = model.evaluate(test_dataset, [regression_metric])
score = scores[regression_metric.name]
assert score < 55
@unittest.skipIf(not has_xgboost_and_lightgbm,
'xgboost or lightgbm are not installed')
def test_multitask_regression_with_lightgbm():
np.random.seed(123)
# prepare dataset
n_tasks = 4
tasks = range(n_tasks)
dataset = load_diabetes()
X, y = dataset.data, dataset.target
y = np.reshape(y, (len(y), 1))
y = np.hstack([y] * n_tasks)
frac_train = .7
X_train, X_test, y_train, y_test = \
train_test_split(X, y, train_size=frac_train)
train_dataset = dc.data.DiskDataset.from_numpy(X_train, y_train)
test_dataset = dc.data.DiskDataset.from_numpy(X_test, y_test)
# global setting
regression_metric = dc.metrics.Metric(dc.metrics.mae_score)
params = {'early_stopping_rounds': 25}
# lightgbm test
def lightgbm_builder(model_dir):
lgbm_model = lightgbm.LGBMRegressor(n_estimators=50,
seed=123,
silent=False)
return dc.models.GBDTModel(lgbm_model, model_dir, **params)
model = dc.models.SingletaskToMultitask(tasks, lightgbm_builder)
# fit trained model
model.fit(train_dataset)
model.save()
# eval model on test
scores = model.evaluate(test_dataset, [regression_metric])
score = scores[regression_metric.name]
assert score < 55
@unittest.skipIf(not has_xgboost_and_lightgbm,
'xgboost or lightgbm are not installed')
def test_classification_with_xgboost():
"""Test that sklearn models can learn on simple classification datasets."""
np.random.seed(123)
# prepare dataset
dataset = load_digits(n_class=2)
X, y = dataset.data, dataset.target
frac_train = .7
X_train, X_test, y_train, y_test = \
train_test_split(X, y, train_size=frac_train)
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
# global setting
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
params = {'early_stopping_rounds': 25}
# xgboost test
xgb_model = xgboost.XGBClassifier(n_estimators=50, seed=123, verbose=False)
model = dc.models.GBDTModel(xgb_model, **params)
# fit trained model
model.fit(train_dataset)
model.save()
# eval model on test
scores = model.evaluate(test_dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@unittest.skipIf(not has_xgboost_and_lightgbm,
'xgboost or lightgbm are not installed')
def test_classification_with_lightgbm():
"""Test that sklearn models can learn on simple classification datasets."""
np.random.seed(123)
# prepare dataset
dataset = load_digits(n_class=2)
X, y = dataset.data, dataset.target
frac_train = .7
X_train, X_test, y_train, y_test = \
train_test_split(X, y, train_size=frac_train)
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
# global setting
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
params = {'early_stopping_rounds': 25}
# lightgbm test
lgbm_model = lightgbm.LGBMClassifier(n_estimators=50, seed=123, silent=True)
model = dc.models.GBDTModel(lgbm_model, **params)
# fit trained model
model.fit(train_dataset)
model.save()
# eval model on test
scores = model.evaluate(test_dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@unittest.skipIf(not has_xgboost_and_lightgbm,
'xgboost or lightgbm are not installed')
def test_reload_with_xgboost():
np.random.seed(123)
# prepare dataset
dataset = load_diabetes()
X, y = dataset.data, dataset.target
frac_train = .7
X_train, X_test, y_train, y_test = \
train_test_split(X, y, train_size=frac_train)
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
# global setting
regression_metric = dc.metrics.Metric(dc.metrics.mae_score)
model_dir = tempfile.mkdtemp()
params = {'early_stopping_rounds': 25, 'model_dir': model_dir}
# xgboost test
xgb_model = xgboost.XGBRegressor(n_estimators=50,
random_state=123,
verbose=False)
model = dc.models.GBDTModel(xgb_model, **params)
# fit trained model
model.fit(train_dataset)
model.save()
# reload
reloaded_model = dc.models.GBDTModel(None, model_dir)
reloaded_model.reload()
# check predictions match on test dataset
original_pred = model.predict(test_dataset)
reload_pred = reloaded_model.predict(test_dataset)
assert np.all(original_pred == reload_pred)
# eval model on test
scores = reloaded_model.evaluate(test_dataset, [regression_metric])
assert scores[regression_metric.name] < 55
@unittest.skipIf(not has_xgboost_and_lightgbm,
'xgboost or lightgbm are not installed')
def test_reload_with_lightgbm():
np.random.seed(123)
# prepare dataset
dataset = load_diabetes()
X, y = dataset.data, dataset.target
frac_train = .7
X_train, X_test, y_train, y_test = \
train_test_split(X, y, train_size=frac_train)
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
# global setting
regression_metric = dc.metrics.Metric(dc.metrics.mae_score)
model_dir = tempfile.mkdtemp()
params = {'early_stopping_rounds': 25, 'model_dir': model_dir}
# lightgbm test
lgbm_model = lightgbm.LGBMRegressor(n_estimators=50,
random_state=123,
silent=True)
model = dc.models.GBDTModel(lgbm_model, **params)
# fit trained model
model.fit(train_dataset)
model.save()
# reload
reloaded_model = dc.models.GBDTModel(None, model_dir)
reloaded_model.reload()
# check predictions match on test dataset
original_pred = model.predict(test_dataset)
reload_pred = reloaded_model.predict(test_dataset)
assert np.all(original_pred == reload_pred)
# eval model on test
scores = reloaded_model.evaluate(test_dataset, [regression_metric])
assert scores[regression_metric.name] < 55
<file_sep>echo "Pulling qm9 dataset from deepchem"
wget http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/gdb9.tar.gz
echo "Extracting qm9 structures"
tar -zxvf gdb9.tar.gz
<file_sep>"""
Tests for transformer objects.
"""
import os
import unittest
import numpy as np
import pytest
import scipy.ndimage
try:
import tensorflow as tf
has_tensorflow = True
except:
has_tensorflow = False
import deepchem as dc
from deepchem.trans.transformers import DataTransforms
class TestDataTransforms(unittest.TestCase):
"""
Test DataTransforms for images
"""
@pytest.mark.tensorflow
def setUp(self):
"""
init to load the MNIST data for DataTransforms Tests
"""
super(TestDataTransforms, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.mnist.load_data()
train = dc.data.NumpyDataset(x_train, y_train)
# extract only the images (no need of the labels)
data = (train.X)[0]
# reshaping the vector to image
data = np.reshape(data, (28, 28))
self.d = data
@pytest.mark.tensorflow
def test_blurring(self):
# Check Blurring
dt = DataTransforms(self.d)
blurred = dt.gaussian_blur(sigma=1.5)
check_blur = scipy.ndimage.gaussian_filter(self.d, 1.5)
assert np.allclose(check_blur, blurred)
@pytest.mark.tensorflow
def test_center_crop(self):
# Check center crop
dt = DataTransforms(self.d)
x_crop = 50
y_crop = 50
crop = dt.center_crop(x_crop, y_crop)
y = self.d.shape[0]
x = self.d.shape[1]
x_start = x // 2 - (x_crop // 2)
y_start = y // 2 - (y_crop // 2)
check_crop = self.d[y_start:y_start + y_crop, x_start:x_start + x_crop]
assert np.allclose(check_crop, crop)
@pytest.mark.tensorflow
def test_crop(self):
# Check crop
dt = DataTransforms(self.d)
crop = dt.crop(0, 10, 0, 10)
y = self.d.shape[0]
x = self.d.shape[1]
check_crop = self.d[10:y - 10, 0:x - 0]
assert np.allclose(crop, check_crop)
@pytest.mark.tensorflow
def test_convert2gray(self):
# Check convert2gray
dt = DataTransforms(self.d)
gray = dt.convert2gray()
check_gray = np.dot(self.d[..., :3], [0.2989, 0.5870, 0.1140])
assert np.allclose(check_gray, gray)
@pytest.mark.tensorflow
def test_rotation(self):
# Check rotation
dt = DataTransforms(self.d)
angles = [0, 5, 10, 90]
for ang in angles:
rotate = dt.rotate(ang)
check_rotate = scipy.ndimage.rotate(self.d, ang)
assert np.allclose(rotate, check_rotate)
# Some more test cases for flip
rotate = dt.rotate(-90)
check_rotate = scipy.ndimage.rotate(self.d, 270)
assert np.allclose(rotate, check_rotate)
@pytest.mark.tensorflow
def test_flipping(self):
# Check flip
dt = DataTransforms(self.d)
flip_lr = dt.flip(direction="lr")
flip_ud = dt.flip(direction="ud")
check_lr = np.fliplr(self.d)
check_ud = np.flipud(self.d)
assert np.allclose(flip_ud, check_ud)
assert np.allclose(flip_lr, check_lr)
@pytest.mark.tensorflow
def test_scaling(self):
from PIL import Image
# Check Scales
dt = DataTransforms(self.d)
h = 150
w = 150
scale = Image.fromarray(self.d).resize((h, w))
check_scale = dt.scale(h, w)
np.allclose(scale, check_scale)
@pytest.mark.tensorflow
def test_shift(self):
# Check shift
dt = DataTransforms(self.d)
height = 5
width = 5
if len(self.d.shape) == 2:
shift = scipy.ndimage.shift(self.d, [height, width])
if len(self.d.shape) == 3:
shift = scipy.ndimage.shift(self.d, [height, width, 0])
check_shift = dt.shift(width, height)
assert np.allclose(shift, check_shift)
@pytest.mark.tensorflow
def test_gaussian_noise(self):
# check gaussian noise
dt = DataTransforms(self.d)
np.random.seed(0)
random_noise = self.d
random_noise = random_noise + np.random.normal(
loc=0, scale=25.5, size=self.d.shape)
np.random.seed(0)
check_random_noise = dt.gaussian_noise(mean=0, std=25.5)
assert np.allclose(random_noise, check_random_noise)
@pytest.mark.tensorflow
def test_salt_pepper_noise(self):
# check salt and pepper noise
dt = DataTransforms(self.d)
np.random.seed(0)
prob = 0.05
random_noise = self.d
noise = np.random.random(size=self.d.shape)
random_noise[noise < (prob / 2)] = 0
random_noise[noise > (1 - prob / 2)] = 255
np.random.seed(0)
check_random_noise = dt.salt_pepper_noise(prob, salt=255, pepper=0)
assert np.allclose(random_noise, check_random_noise)
@pytest.mark.tensorflow
def test_median_filter(self):
# Check median filter
from PIL import Image, ImageFilter
dt = DataTransforms(self.d)
filtered = dt.median_filter(size=3)
image = Image.fromarray(self.d)
image = image.filter(ImageFilter.MedianFilter(size=3))
check_filtered = np.array(image)
assert np.allclose(check_filtered, filtered)
<file_sep>import numpy as np
from deepchem.feat import ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
from deepchem.molnet import load_bace_classification
def get_molecules():
tasks, all_dataset, transformers = load_bace_classification(
featurizer="Raw")
return all_dataset[0].X
def test_mol_ordering():
mols = get_molecules()
featurizer = ConvMolFeaturizer()
featurized_mols = featurizer.featurize(mols)
for i in range(len(featurized_mols)):
atom_features = featurized_mols[i].atom_features
degree_list = np.expand_dims(featurized_mols[i].degree_list, axis=1)
atom_features = np.concatenate([degree_list, atom_features], axis=1)
featurized_mols[i].atom_features = atom_features
conv_mol = ConvMol.agglomerate_mols(featurized_mols)
for start, end in conv_mol.deg_slice.tolist():
members = conv_mol.membership[start:end]
sorted_members = np.array(sorted(members))
members = np.array(members)
assert np.all(sorted_members == members)
conv_mol_atom_features = conv_mol.get_atom_features()
adj_number = 0
for start, end in conv_mol.deg_slice.tolist():
deg_features = conv_mol_atom_features[start:end]
adj_number_array = deg_features[:, 0]
assert np.all(adj_number_array == adj_number)
adj_number += 1
<file_sep>"""
Script that trains Tensorflow multitask models on QM8 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
from deepchem.molnet import load_qm8
np.random.seed(123)
qm8_tasks, datasets, transformers = load_qm8()
train_dataset, valid_dataset, test_dataset = datasets
fit_transformers = [dc.trans.CoulombFitTransformer(train_dataset)]
regression_metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
model = dc.models.MultitaskFitTransformRegressor(
n_tasks=len(qm8_tasks),
n_features=[26, 26],
learning_rate=0.001,
momentum=.8,
batch_size=32,
weight_init_stddevs=[1 / np.sqrt(400), 1 / np.sqrt(100), 1 / np.sqrt(100)],
bias_init_consts=[0., 0., 0.],
layer_sizes=[400, 100, 100],
dropouts=[0.01, 0.01, 0.01],
fit_transformers=fit_transformers,
seed=123)
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
train_scores = model.evaluate(train_dataset, regression_metric, transformers)
print("Train scores [kcal/mol]")
print(train_scores)
valid_scores = model.evaluate(valid_dataset, regression_metric, transformers)
print("Valid scores [kcal/mol]")
print(valid_scores)
test_scores = model.evaluate(test_dataset, regression_metric, transformers)
print("Test scores [kcal/mol]")
print(test_scores)
<file_sep>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 6 23:41:26 2017
@author: zqwu
"""
import numpy as np
import deepchem
from deepchem.molnet.preset_hyper_parameters import hps
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.kernel_ridge import KernelRidge
def benchmark_classification(train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=False,
hyper_parameters=None,
seed=123):
"""Calculate performance of different models on the specific dataset & tasks
Parameters
----------
train_dataset: dataset struct
dataset used for model training and evaluation
valid_dataset: dataset struct
dataset only used for model evaluation (and hyperparameter tuning)
test_dataset: dataset struct
dataset only used for model evaluation
tasks: list of string
list of targets(tasks, datasets)
transformers: dc.trans.Transformer struct
transformer used for model evaluation
n_features: integer
number of features, or length of binary fingerprints
metric: list of dc.metrics.Metric objects
metrics used for evaluation
model: string, optional
choice of model
'rf', 'tf', 'tf_robust', 'logreg', 'irv', 'graphconv', 'dag', 'xgb',
'weave', 'kernelsvm', 'textcnn', 'mpnn'
test: boolean, optional
whether to calculate test_set performance
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
Returns
-------
train_scores : dict
predicting results(AUC) on training set
valid_scores : dict
predicting results(AUC) on valid set
test_scores : dict
predicting results(AUC) on test set
"""
train_scores = {}
valid_scores = {}
test_scores = {}
assert model in [
'rf', 'tf', 'tf_robust', 'logreg', 'irv', 'graphconv', 'dag', 'xgb',
'weave', 'kernelsvm', 'textcnn', 'mpnn'
]
if hyper_parameters is None:
hyper_parameters = hps[model]
model_name = model
if model_name == 'tf':
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow MultitaskDNN model
model = deepchem.models.MultitaskClassifier(
len(tasks),
n_features,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
weight_decay_penalty=penalty,
weight_decay_penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
random_seed=seed)
elif model_name == 'tf_robust':
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
bypass_layer_sizes = hyper_parameters['bypass_layer_sizes']
bypass_weight_init_stddevs = hyper_parameters[
'bypass_weight_init_stddevs']
bypass_bias_init_consts = hyper_parameters['bypass_bias_init_consts']
bypass_dropouts = hyper_parameters['bypass_dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow robust MultitaskDNN model
model = deepchem.models.RobustMultitaskClassifier(
len(tasks),
n_features,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
bypass_layer_sizes=bypass_layer_sizes,
bypass_weight_init_stddevs=bypass_weight_init_stddevs,
bypass_bias_init_consts=bypass_bias_init_consts,
bypass_dropouts=bypass_dropouts,
weight_decay_penalty=penalty,
weight_decay_penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
random_seed=seed)
elif model_name == 'logreg':
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
nb_epoch = None
# Building scikit logistic regression model
def model_builder(model_dir):
sklearn_model = LogisticRegression(penalty=penalty_type,
C=1. / penalty,
class_weight="balanced",
n_jobs=-1)
return deepchem.models.sklearn_models.SklearnModel(
sklearn_model, model_dir)
model = deepchem.models.multitask.SingletaskToMultitask(
tasks, model_builder)
elif model_name == 'irv':
penalty = hyper_parameters['penalty']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_K = hyper_parameters['n_K']
# Transform fingerprints to IRV features
transformer = deepchem.trans.IRVTransformer(n_K, len(tasks),
train_dataset)
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
if test:
test_dataset = transformer.transform(test_dataset)
# Building tensorflow IRV model
model = deepchem.models.TensorflowMultitaskIRVClassifier(
len(tasks),
K=n_K,
penalty=penalty,
batch_size=batch_size,
learning_rate=learning_rate,
random_seed=seed,
mode='classification')
elif model_name == 'graphconv':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_filters = hyper_parameters['n_filters']
n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']
model = deepchem.models.GraphConvModel(
len(tasks),
graph_conv_layers=[n_filters] * 2,
dense_layer_size=n_fully_connected_nodes,
batch_size=batch_size,
learning_rate=learning_rate,
random_seed=seed,
mode='classification')
elif model_name == 'dag':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_graph_feat = hyper_parameters['n_graph_feat']
default_max_atoms = hyper_parameters['default_max_atoms']
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
max_atoms = min([max_atoms, default_max_atoms])
print('Maximum number of atoms: %i' % max_atoms)
reshard_size = 256
transformer = deepchem.trans.DAGTransformer(max_atoms=max_atoms)
train_dataset.reshard(reshard_size)
train_dataset = transformer.transform(train_dataset)
valid_dataset.reshard(reshard_size)
valid_dataset = transformer.transform(valid_dataset)
if test:
test_dataset.reshard(reshard_size)
test_dataset = transformer.transform(test_dataset)
model = deepchem.models.DAGModel(len(tasks),
max_atoms=max_atoms,
n_atom_feat=n_features,
n_graph_feat=n_graph_feat,
n_outputs=30,
batch_size=batch_size,
learning_rate=learning_rate,
random_seed=seed,
use_queue=False,
mode='classification')
elif model_name == 'weave':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_graph_feat = hyper_parameters['n_graph_feat']
n_pair_feat = hyper_parameters['n_pair_feat']
model = deepchem.models.WeaveModel(len(tasks),
n_atom_feat=n_features,
n_pair_feat=n_pair_feat,
n_hidden=50,
n_graph_feat=n_graph_feat,
batch_size=batch_size,
learning_rate=learning_rate,
use_queue=False,
random_seed=seed,
mode='classification')
elif model_name == 'textcnn':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_embedding = hyper_parameters['n_embedding']
filter_sizes = hyper_parameters['filter_sizes']
num_filters = hyper_parameters['num_filters']
all_data = deepchem.data.DiskDataset.merge(
[train_dataset, valid_dataset, test_dataset])
char_dict, length = deepchem.models.TextCNNModel.build_char_dict(
all_data)
model = deepchem.models.TextCNNModel(len(tasks),
char_dict,
seq_length=length,
n_embedding=n_embedding,
filter_sizes=filter_sizes,
num_filters=num_filters,
learning_rate=learning_rate,
batch_size=batch_size,
use_queue=False,
random_seed=seed,
mode='classification')
elif model_name == 'mpnn':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
T = hyper_parameters['T']
M = hyper_parameters['M']
model = deepchem.models.MPNNModel(len(tasks),
n_atom_feat=n_features[0],
n_pair_feat=n_features[1],
n_hidden=n_features[0],
T=T,
M=M,
batch_size=batch_size,
learning_rate=learning_rate,
use_queue=False,
mode="classification")
elif model_name == 'rf':
n_estimators = hyper_parameters['n_estimators']
nb_epoch = None
# Building scikit random forest model
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(class_weight="balanced",
n_estimators=n_estimators,
n_jobs=-1)
return deepchem.models.sklearn_models.SklearnModel(
sklearn_model, model_dir)
model = deepchem.models.multitask.SingletaskToMultitask(
tasks, model_builder)
elif model_name == 'kernelsvm':
C = hyper_parameters['C']
gamma = hyper_parameters['gamma']
nb_epoch = None
# Building scikit learn Kernel SVM model
def model_builder(model_dir):
sklearn_model = SVC(C=C,
gamma=gamma,
class_weight="balanced",
probability=True)
return deepchem.models.SklearnModel(sklearn_model, model_dir)
model = deepchem.models.multitask.SingletaskToMultitask(
tasks, model_builder)
elif model_name == 'xgb':
max_depth = hyper_parameters['max_depth']
learning_rate = hyper_parameters['learning_rate']
n_estimators = hyper_parameters['n_estimators']
gamma = hyper_parameters['gamma']
min_child_weight = hyper_parameters['min_child_weight']
max_delta_step = hyper_parameters['max_delta_step']
subsample = hyper_parameters['subsample']
colsample_bytree = hyper_parameters['colsample_bytree']
colsample_bylevel = hyper_parameters['colsample_bylevel']
reg_alpha = hyper_parameters['reg_alpha']
reg_lambda = hyper_parameters['reg_lambda']
scale_pos_weight = hyper_parameters['scale_pos_weight']
base_score = hyper_parameters['base_score']
seed = hyper_parameters['seed']
early_stopping_rounds = hyper_parameters['early_stopping_rounds']
nb_epoch = None
esr = {'early_stopping_rounds': early_stopping_rounds}
# Building xgboost classification model
def model_builder(model_dir):
import xgboost
xgboost_model = xgboost.XGBClassifier(
max_depth=max_depth,
learning_rate=learning_rate,
n_estimators=n_estimators,
gamma=gamma,
min_child_weight=min_child_weight,
max_delta_step=max_delta_step,
subsample=subsample,
colsample_bytree=colsample_bytree,
colsample_bylevel=colsample_bylevel,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
scale_pos_weight=scale_pos_weight,
base_score=base_score,
seed=seed)
return deepchem.models.xgboost_models.XGBoostModel(
xgboost_model, model_dir, **esr)
model = deepchem.models.multitask.SingletaskToMultitask(
tasks, model_builder)
if nb_epoch is None:
model.fit(train_dataset)
else:
model.fit(train_dataset, nb_epoch=nb_epoch)
train_scores[model_name] = model.evaluate(train_dataset, metric,
transformers)
valid_scores[model_name] = model.evaluate(valid_dataset, metric,
transformers)
if test:
test_scores[model_name] = model.evaluate(test_dataset, metric,
transformers)
return train_scores, valid_scores, test_scores
def benchmark_regression(train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=False,
hyper_parameters=None,
seed=123):
"""Calculate performance of different models on the specific dataset & tasks
Parameters
----------
train_dataset: dataset struct
dataset used for model training and evaluation
valid_dataset: dataset struct
dataset only used for model evaluation (and hyperparameter tuning)
test_dataset: dataset struct
dataset only used for model evaluation
tasks: list of string
list of targets(tasks, datasets)
transformers: dc.trans.Transformer struct
transformer used for model evaluation
n_features: integer
number of features, or length of binary fingerprints
metric: list of dc.metrics.Metric objects
metrics used for evaluation
model: string, optional
choice of model
'tf_regression', 'tf_regression_ft', 'rf_regression', 'graphconvreg',
'dtnn', 'dag_regression', 'xgb_regression', 'weave_regression',
'textcnn_regression', 'krr', 'ani', 'krr_ft', 'mpnn'
test: boolean, optional
whether to calculate test_set performance
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
Returns
-------
train_scores : dict
predicting results(R2) on training set
valid_scores : dict
predicting results(R2) on valid set
test_scores : dict
predicting results(R2) on test set
"""
train_scores = {}
valid_scores = {}
test_scores = {}
assert model in [
'tf_regression', 'tf_regression_ft', 'rf_regression', 'graphconvreg',
'dtnn', 'dag_regression', 'xgb_regression', 'weave_regression',
'textcnn_regression', 'krr', 'ani', 'krr_ft', 'mpnn'
]
if hyper_parameters is None:
hyper_parameters = hps[model]
model_name = model
if model_name == 'tf_regression':
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
model = deepchem.models.MultitaskRegressor(
len(tasks),
n_features,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
weight_decay_penalty=penalty,
weight_decay_penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
seed=seed)
elif model_name == 'tf_regression_ft':
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
fit_transformers = [hyper_parameters['fit_transformers'](train_dataset)]
model = deepchem.models.MultitaskFitTransformRegressor(
n_tasks=len(tasks),
n_features=n_features,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
weight_decay_penalty=penalty,
weight_decay_penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
fit_transformers=fit_transformers,
n_eval=10,
seed=seed)
elif model_name == 'graphconvreg':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_filters = hyper_parameters['n_filters']
n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']
model = deepchem.models.GraphConvModel(
len(tasks),
graph_conv_layers=[n_filters] * 2,
dense_layer_size=n_fully_connected_nodes,
batch_size=batch_size,
learning_rate=learning_rate,
random_seed=seed,
mode='regression')
elif model_name == 'dtnn':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_embedding = hyper_parameters['n_embedding']
n_distance = hyper_parameters['n_distance']
assert len(n_features) == 2, 'DTNN is only applicable to qm datasets'
model = deepchem.models.DTNNModel(len(tasks),
n_embedding=n_embedding,
n_distance=n_distance,
batch_size=batch_size,
learning_rate=learning_rate,
random_seed=seed,
output_activation=False,
use_queue=False,
mode='regression')
elif model_name == 'dag_regression':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_graph_feat = hyper_parameters['n_graph_feat']
default_max_atoms = hyper_parameters['default_max_atoms']
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
max_atoms = min([max_atoms, default_max_atoms])
print('Maximum number of atoms: %i' % max_atoms)
reshard_size = 256
transformer = deepchem.trans.DAGTransformer(max_atoms=max_atoms)
train_dataset.reshard(reshard_size)
train_dataset = transformer.transform(train_dataset)
valid_dataset.reshard(reshard_size)
valid_dataset = transformer.transform(valid_dataset)
if test:
test_dataset.reshard(reshard_size)
test_dataset = transformer.transform(test_dataset)
model = deepchem.models.DAGModel(len(tasks),
max_atoms=max_atoms,
n_atom_feat=n_features,
n_graph_feat=n_graph_feat,
n_outputs=30,
batch_size=batch_size,
learning_rate=learning_rate,
random_seed=seed,
use_queue=False,
mode='regression')
elif model_name == 'weave_regression':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_graph_feat = hyper_parameters['n_graph_feat']
n_pair_feat = hyper_parameters['n_pair_feat']
model = deepchem.models.WeaveModel(len(tasks),
n_atom_feat=n_features,
n_pair_feat=n_pair_feat,
n_hidden=50,
n_graph_feat=n_graph_feat,
batch_size=batch_size,
learning_rate=learning_rate,
use_queue=False,
random_seed=seed,
mode='regression')
elif model_name == 'textcnn_regression':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_embedding = hyper_parameters['n_embedding']
filter_sizes = hyper_parameters['filter_sizes']
num_filters = hyper_parameters['num_filters']
char_dict, length = deepchem.models.TextCNNModel.build_char_dict(
train_dataset)
model = deepchem.models.TextCNNModel(len(tasks),
char_dict,
seq_length=length,
n_embedding=n_embedding,
filter_sizes=filter_sizes,
num_filters=num_filters,
learning_rate=learning_rate,
batch_size=batch_size,
use_queue=False,
random_seed=seed,
mode='regression')
elif model_name == 'ani':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
layer_structures = hyper_parameters['layer_structures']
assert len(n_features) == 2, 'ANI is only applicable to qm datasets'
max_atoms = n_features[0]
atom_number_cases = np.unique(
np.concatenate([
train_dataset.X[:, :, 0], valid_dataset.X[:, :, 0],
test_dataset.X[:, :, 0]
]))
atom_number_cases = atom_number_cases.astype(int).tolist()
try:
# Remove token for paddings
atom_number_cases.remove(0)
except:
pass
ANItransformer = deepchem.trans.ANITransformer(
max_atoms=max_atoms, atom_cases=atom_number_cases)
train_dataset = ANItransformer.transform(train_dataset)
valid_dataset = ANItransformer.transform(valid_dataset)
if test:
test_dataset = ANItransformer.transform(test_dataset)
n_feat = ANItransformer.get_num_feats() - 1
model = deepchem.models.ANIRegression(
len(tasks),
max_atoms,
n_feat,
layer_structures=layer_structures,
atom_number_cases=atom_number_cases,
batch_size=batch_size,
learning_rate=learning_rate,
use_queue=False,
mode="regression",
random_seed=seed)
elif model_name == 'mpnn':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
T = hyper_parameters['T']
M = hyper_parameters['M']
model = deepchem.models.MPNNModel(len(tasks),
n_atom_feat=n_features[0],
n_pair_feat=n_features[1],
n_hidden=n_features[0],
T=T,
M=M,
batch_size=batch_size,
learning_rate=learning_rate,
use_queue=False,
mode="regression")
elif model_name == 'rf_regression':
n_estimators = hyper_parameters['n_estimators']
nb_epoch = None
# Building scikit random forest model
def model_builder(model_dir):
sklearn_model = RandomForestRegressor(n_estimators=n_estimators,
n_jobs=-1)
return deepchem.models.sklearn_models.SklearnModel(
sklearn_model, model_dir)
model = deepchem.models.multitask.SingletaskToMultitask(
tasks, model_builder)
elif model_name == 'krr':
alpha = hyper_parameters['alpha']
nb_epoch = None
# Building scikit learn Kernel Ridge Regression model
def model_builder(model_dir):
sklearn_model = KernelRidge(kernel="rbf", alpha=alpha)
return deepchem.models.SklearnModel(sklearn_model, model_dir)
model = deepchem.models.multitask.SingletaskToMultitask(
tasks, model_builder)
elif model_name == 'krr_ft':
alpha = hyper_parameters['alpha']
nb_epoch = None
ft_transformer = deepchem.trans.CoulombFitTransformer(train_dataset)
train_dataset = ft_transformer.transform(train_dataset)
valid_dataset = ft_transformer.transform(valid_dataset)
test_dataset = ft_transformer.transform(test_dataset)
# Building scikit learn Kernel Ridge Regression model
def model_builder(model_dir):
sklearn_model = KernelRidge(kernel="rbf", alpha=alpha)
return deepchem.models.SklearnModel(sklearn_model, model_dir)
model = deepchem.models.multitask.SingletaskToMultitask(
tasks, model_builder)
elif model_name == 'xgb_regression':
max_depth = hyper_parameters['max_depth']
learning_rate = hyper_parameters['learning_rate']
n_estimators = hyper_parameters['n_estimators']
gamma = hyper_parameters['gamma']
min_child_weight = hyper_parameters['min_child_weight']
max_delta_step = hyper_parameters['max_delta_step']
subsample = hyper_parameters['subsample']
colsample_bytree = hyper_parameters['colsample_bytree']
colsample_bylevel = hyper_parameters['colsample_bylevel']
reg_alpha = hyper_parameters['reg_alpha']
reg_lambda = hyper_parameters['reg_lambda']
scale_pos_weight = hyper_parameters['scale_pos_weight']
base_score = hyper_parameters['base_score']
seed = hyper_parameters['seed']
early_stopping_rounds = hyper_parameters['early_stopping_rounds']
nb_epoch = None
esr = {'early_stopping_rounds': early_stopping_rounds}
# Building xgboost regression model
def model_builder(model_dir):
import xgboost
xgboost_model = xgboost.XGBRegressor(
max_depth=max_depth,
learning_rate=learning_rate,
n_estimators=n_estimators,
gamma=gamma,
min_child_weight=min_child_weight,
max_delta_step=max_delta_step,
subsample=subsample,
colsample_bytree=colsample_bytree,
colsample_bylevel=colsample_bylevel,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
scale_pos_weight=scale_pos_weight,
base_score=base_score,
seed=seed)
return deepchem.models.xgboost_models.XGBoostModel(
xgboost_model, model_dir, **esr)
model = deepchem.models.multitask.SingletaskToMultitask(
tasks, model_builder)
print('-----------------------------')
print('Start fitting: %s' % model_name)
if nb_epoch is None:
model.fit(train_dataset)
else:
model.fit(train_dataset, nb_epoch=nb_epoch)
train_scores[model_name] = model.evaluate(train_dataset, metric,
transformers)
valid_scores[model_name] = model.evaluate(valid_dataset, metric,
transformers)
if test:
test_scores[model_name] = model.evaluate(test_dataset, metric,
transformers)
return train_scores, valid_scores, test_scores
'''
def low_data_benchmark_classification(train_dataset,
valid_dataset,
n_features,
metric,
model='siamese',
hyper_parameters=None,
seed=123):
"""
Calculate low data benchmark performance
Parameters
----------
train_dataset : dataset struct
loaded dataset, ConvMol struct, used for training
valid_dataset : dataset struct
loaded dataset, ConvMol struct, used for validation
n_features : integer
number of features, or length of binary fingerprints
metric: list of dc.metrics.Metric objects
metrics used for evaluation
model : string, optional (default='siamese')
choice of which model to use, should be: siamese, attn, res
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
Returns
-------
valid_scores : dict
predicting results(AUC) on valid set
"""
train_scores = {} # train set not evaluated in low data model
valid_scores = {}
assert model in ['siamese', 'attn', 'res']
if hyper_parameters is None:
hyper_parameters = hps[model]
# Loading hyperparameters
# num positive/negative ligands
n_pos = hyper_parameters['n_pos']
n_neg = hyper_parameters['n_neg']
# Set batch sizes for network
test_batch_size = hyper_parameters['test_batch_size']
support_batch_size = n_pos + n_neg
# Model structure
n_filters = hyper_parameters['n_filters']
n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']
# Traning settings
nb_epochs = hyper_parameters['nb_epochs']
n_train_trials = hyper_parameters['n_train_trials']
n_eval_trials = hyper_parameters['n_eval_trials']
learning_rate = hyper_parameters['learning_rate']
tf.set_random_seed(seed)
support_graph = deepchem.nn.SequentialSupportGraph(n_features)
prev_features = n_features
for count, n_filter in enumerate(n_filters):
support_graph.add(
deepchem.nn.GraphConv(int(n_filter), prev_features, activation='relu'))
support_graph.add(deepchem.nn.GraphPool())
prev_features = int(n_filter)
for count, n_fcnode in enumerate(n_fully_connected_nodes):
support_graph.add(
deepchem.nn.Dense(int(n_fcnode), prev_features, activation='tanh'))
prev_features = int(n_fcnode)
support_graph.add_test(
deepchem.nn.GraphGather(test_batch_size, activation='tanh'))
support_graph.add_support(
deepchem.nn.GraphGather(support_batch_size, activation='tanh'))
if model in ['siamese']:
pass
elif model in ['attn']:
max_depth = hyper_parameters['max_depth']
support_graph.join(
deepchem.nn.AttnLSTMEmbedding(test_batch_size, support_batch_size,
prev_features, max_depth))
elif model in ['res']:
max_depth = hyper_parameters['max_depth']
support_graph.join(
deepchem.nn.ResiLSTMEmbedding(test_batch_size, support_batch_size,
prev_features, max_depth))
model_low_data = deepchem.models.SupportGraphClassifier(
support_graph,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=learning_rate)
print('-------------------------------------')
print('Start fitting by low data model: ' + model)
# Fit trained model
model_low_data.fit(
train_dataset,
nb_epochs=nb_epochs,
n_episodes_per_epoch=n_train_trials,
n_pos=n_pos,
n_neg=n_neg,
log_every_n_samples=50)
# Evaluating low data model
valid_scores[model] = model_low_data.evaluate(
valid_dataset, metric, n_pos, n_neg, n_trials=n_eval_trials)
return valid_scores
'''
<file_sep>import os
import deepchem as dc
import numpy as np
import tempfile
def test_make_legacy_dataset_from_numpy():
"""Test that legacy DiskDataset objects can be constructed."""
current_dir = os.path.dirname(os.path.abspath(__file__))
# legacy_dataset is a dataset in the legacy format kept around for testing purposes.
data_dir = os.path.join(current_dir, "legacy_dataset")
dataset = dc.data.DiskDataset(data_dir)
assert dataset.legacy_metadata
assert len(dataset.metadata_df.columns) == 4
assert list(dataset.metadata_df.columns) == ['ids', 'X', 'y', 'w']
# Test constructor reload works for legacy format
dataset2 = dc.data.DiskDataset(dataset.data_dir)
assert dataset2.legacy_metadata
assert len(dataset2.metadata_df.columns) == 4
assert list(dataset2.metadata_df.columns) == ['ids', 'X', 'y', 'w']
def test_reshard():
"""Test that resharding updates legacy datasets."""
# legacy_dataset_reshard is a sharded dataset in the legacy format kept
# around for testing resharding.
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "legacy_dataset_reshard")
dataset = dc.data.DiskDataset(data_dir)
assert dataset.legacy_metadata
assert len(dataset.metadata_df.columns) == 4
assert list(dataset.metadata_df.columns) == ['ids', 'X', 'y', 'w']
with tempfile.TemporaryDirectory() as tmpdirname:
copy = dataset.copy(tmpdirname)
assert np.all(copy.X == dataset.X)
assert np.all(copy.y == dataset.y)
assert np.all(copy.w == dataset.w)
assert np.all(copy.ids == dataset.ids)
# Reshard copy
copy.reshard(shard_size=10)
assert copy.get_number_shards() == 10
# Check metadata has been updated
assert not copy.legacy_metadata
assert len(copy.metadata_df.columns) == 8
assert list(copy.metadata_df.columns) == [
'ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape', 'w_shape'
]
<file_sep>import tensorflow as tf
from collections.abc import Sequence as SequenceCollection
import logging
import deepchem as dc
from deepchem.metrics import to_one_hot
from deepchem.models import KerasModel
from deepchem.models.layers import Stack
from deepchem.models.losses import SoftmaxCrossEntropy, L2Loss
from typing import Tuple, Iterable, List
logger = logging.getLogger(__name__)
class RobustMultitaskClassifier(KerasModel):
"""Implements a neural network for robust multitasking.
The key idea of this model is to have bypass layers that feed
directly from features to task output. This might provide some
flexibility toroute around challenges in multitasking with
destructive interference.
References
----------
This technique was introduced in [1]_
.. [1] Ramsundar, Bharath, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
"""
def __init__(self,
n_tasks,
n_features,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
n_classes=2,
bypass_layer_sizes=[100],
bypass_weight_init_stddevs=[.02],
bypass_bias_init_consts=[1.],
bypass_dropouts=[.5],
**kwargs):
""" Create a RobustMultitaskClassifier.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
bias_init_consts: list or loat
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
n_classes: int
the number of classes
bypass_layer_sizes: list
the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers.
bypass_weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of bypass layers.
same requirements as weight_init_stddevs
bypass_bias_init_consts: list or float
the value to initialize the biases in bypass layers
same requirements as bias_init_consts
bypass_dropouts: list or float
the dropout probablity to use for bypass layers.
same requirements as dropouts
"""
self.n_tasks = n_tasks
self.n_features = n_features
self.n_classes = n_classes
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
if weight_decay_penalty != 0.0:
if weight_decay_penalty_type == 'l1':
regularizer = tf.keras.regularizers.l1(weight_decay_penalty)
else:
regularizer = tf.keras.regularizers.l2(weight_decay_penalty)
else:
regularizer = None
n_bypass_layers = len(bypass_layer_sizes)
if not isinstance(bypass_weight_init_stddevs, SequenceCollection):
bypass_weight_init_stddevs = [bypass_weight_init_stddevs
] * n_bypass_layers
if not isinstance(bypass_bias_init_consts, SequenceCollection):
bypass_bias_init_consts = [bypass_bias_init_consts
] * n_bypass_layers
if not isinstance(bypass_dropouts, SequenceCollection):
bypass_dropouts = [bypass_dropouts] * n_bypass_layers
bypass_activation_fns = [activation_fns[0]] * n_bypass_layers
# Add the input features.
mol_features = tf.keras.Input(shape=(n_features,))
prev_layer = mol_features
# Add the shared dense layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns):
layer = tf.keras.layers.Dense(
size,
activation=activation_fn,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_stddev),
bias_initializer=tf.constant_initializer(value=bias_const),
kernel_regularizer=regularizer)(prev_layer)
if dropout > 0.0:
layer = tf.keras.layers.Dropout(rate=dropout)(layer)
prev_layer = layer
top_multitask_layer = prev_layer
task_outputs = []
for i in range(self.n_tasks):
prev_layer = mol_features
# Add task-specific bypass layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
bypass_layer_sizes, bypass_weight_init_stddevs,
bypass_bias_init_consts, bypass_dropouts,
bypass_activation_fns):
layer = tf.keras.layers.Dense(
size,
activation=activation_fn,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_stddev),
bias_initializer=tf.constant_initializer(value=bias_const),
kernel_regularizer=regularizer)(prev_layer)
if dropout > 0.0:
layer = tf.keras.layers.Dropout(rate=dropout)(layer)
prev_layer = layer
top_bypass_layer = prev_layer
if n_bypass_layers > 0:
task_layer = tf.keras.layers.Concatenate(axis=1)(
[top_multitask_layer, top_bypass_layer])
else:
task_layer = top_multitask_layer
task_out = tf.keras.layers.Dense(n_classes)(task_layer)
task_outputs.append(task_out)
logits = Stack(axis=1)(task_outputs)
output = tf.keras.layers.Softmax()(logits)
model = tf.keras.Model(inputs=mol_features, outputs=[output, logits])
super(RobustMultitaskClassifier,
self).__init__(model,
SoftmaxCrossEntropy(),
output_types=['prediction', 'loss'],
**kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None:
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
yield ([X_b], [y_b], [w_b])
def create_estimator_inputs(self, feature_columns, weight_column, features,
labels, mode):
tensors = {}
for layer, column in zip(self.features, feature_columns):
tensors[layer] = tf.feature_column.input_layer(features, [column])
if weight_column is not None:
tensors[self.task_weights[0]] = tf.feature_column.input_layer(
features, [weight_column])
if labels is not None:
tensors[self.labels[0]] = tf.one_hot(tf.cast(labels, tf.int32),
self.n_classes)
return tensors
class RobustMultitaskRegressor(KerasModel):
"""Implements a neural network for robust multitasking.
The key idea of this model is to have bypass layers that feed
directly from features to task output. This might provide some
flexibility to route around challenges in multitasking with
destructive interference.
References
----------
.. [1] Ramsundar, Bharath, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
"""
def __init__(self,
n_tasks,
n_features,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
bypass_layer_sizes=[100],
bypass_weight_init_stddevs=[.02],
bypass_bias_init_consts=[1.],
bypass_dropouts=[.5],
**kwargs):
""" Create a RobustMultitaskRegressor.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
bias_init_consts: list or loat
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
bypass_layer_sizes: list
the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers.
bypass_weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of bypass layers.
same requirements as weight_init_stddevs
bypass_bias_init_consts: list or float
the value to initialize the biases in bypass layers
same requirements as bias_init_consts
bypass_dropouts: list or float
the dropout probablity to use for bypass layers.
same requirements as dropouts
"""
self.n_tasks = n_tasks
self.n_features = n_features
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
if weight_decay_penalty != 0.0:
if weight_decay_penalty_type == 'l1':
regularizer = tf.keras.regularizers.l1(weight_decay_penalty)
else:
regularizer = tf.keras.regularizers.l2(weight_decay_penalty)
else:
regularizer = None
n_bypass_layers = len(bypass_layer_sizes)
if not isinstance(bypass_weight_init_stddevs, SequenceCollection):
bypass_weight_init_stddevs = [bypass_weight_init_stddevs
] * n_bypass_layers
if not isinstance(bypass_bias_init_consts, SequenceCollection):
bypass_bias_init_consts = [bypass_bias_init_consts
] * n_bypass_layers
if not isinstance(bypass_dropouts, SequenceCollection):
bypass_dropouts = [bypass_dropouts] * n_bypass_layers
bypass_activation_fns = [activation_fns[0]] * n_bypass_layers
# Add the input features.
mol_features = tf.keras.Input(shape=(n_features,))
prev_layer = mol_features
# Add the shared dense layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns):
layer = tf.keras.layers.Dense(
size,
activation=activation_fn,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_stddev),
bias_initializer=tf.constant_initializer(value=bias_const),
kernel_regularizer=regularizer)(prev_layer)
if dropout > 0.0:
layer = tf.keras.layers.Dropout(rate=dropout)(layer)
prev_layer = layer
top_multitask_layer = prev_layer
task_outputs = []
for i in range(self.n_tasks):
prev_layer = mol_features
# Add task-specific bypass layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
bypass_layer_sizes, bypass_weight_init_stddevs,
bypass_bias_init_consts, bypass_dropouts,
bypass_activation_fns):
layer = tf.keras.layers.Dense(
size,
activation=activation_fn,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_stddev),
bias_initializer=tf.constant_initializer(value=bias_const),
kernel_regularizer=regularizer)(prev_layer)
if dropout > 0.0:
layer = tf.keras.layers.Dropout(rate=dropout)(layer)
prev_layer = layer
top_bypass_layer = prev_layer
if n_bypass_layers > 0:
task_layer = tf.keras.layers.Concatenate(axis=1)(
[top_multitask_layer, top_bypass_layer])
else:
task_layer = top_multitask_layer
task_out = tf.keras.layers.Dense(1)(task_layer)
task_outputs.append(task_out)
outputs = Stack(axis=1)(task_outputs)
model = tf.keras.Model(inputs=mol_features, outputs=outputs)
super(RobustMultitaskRegressor,
self).__init__(model,
L2Loss(),
output_types=['prediction'],
**kwargs)
def default_generator(
self,
dataset: dc.data.Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
yield ([X_b], [y_b], [w_b])
<file_sep>"""
Tests for random hyperparam optimization.
"""
import unittest
import tempfile
import numpy as np
import pytest
import deepchem as dc
import sklearn
import sklearn.ensemble
import os
from scipy.stats import uniform
class TestRandomHyperparamOpt(unittest.TestCase):
"""
Test random hyperparameter optimization API.
"""
def setUp(self):
"""Set up common resources."""
def rf_model_builder(**model_params):
rf_params = {
k: v for (k, v) in model_params.items() if k != 'model_dir'
}
model_dir = model_params['model_dir']
sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params)
return dc.models.SklearnModel(sklearn_model, model_dir)
self.rf_model_builder = rf_model_builder
self.max_iter = 5
self.train_dataset = dc.data.NumpyDataset(X=np.random.rand(50, 5),
y=np.random.rand(50, 1))
self.valid_dataset = dc.data.NumpyDataset(X=np.random.rand(20, 5),
y=np.random.rand(20, 1))
def test_rf_hyperparam(self):
"""Test of hyperparam_opt with singletask RF ECFP regression API."""
optimizer = dc.hyper.RandomHyperparamOpt(self.rf_model_builder,
max_iter=self.max_iter)
sampler = uniform(loc=0, scale=0.001).rvs
params_dict = {
"n_estimators": [10, 100],
"min_impurity_decrease": sampler
}
transformers = []
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict, self.train_dataset, self.valid_dataset, metric,
transformers)
valid_score = best_model.evaluate(self.valid_dataset, [metric],
transformers)
assert valid_score["pearson_r2_score"] == max(all_results.values())
assert valid_score["pearson_r2_score"] > 0
def test_rf_hyperparam_min(self):
"""Test of hyperparam_opt with singletask RF ECFP regression API."""
optimizer = dc.hyper.RandomHyperparamOpt(self.rf_model_builder,
max_iter=self.max_iter)
sampler = uniform(loc=0, scale=0.001).rvs
params_dict = {
"n_estimators": [10, 100],
"min_impurity_decrease": sampler
}
transformers = []
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
self.train_dataset,
self.valid_dataset,
metric,
transformers,
use_max=False)
valid_score = best_model.evaluate(self.valid_dataset, [metric],
transformers)
assert valid_score["pearson_r2_score"] == min(all_results.values())
assert valid_score["pearson_r2_score"] > 0
def test_rf_with_logdir(self):
"""Test that using a logdir can work correctly."""
optimizer = dc.hyper.RandomHyperparamOpt(self.rf_model_builder,
max_iter=self.max_iter)
sampler = uniform(loc=0, scale=0.001).rvs
params_dict = {
"n_estimators": [10, 5],
"min_impurity_decrease": sampler
}
transformers = []
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
with tempfile.TemporaryDirectory() as tmpdirname:
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
self.train_dataset,
self.valid_dataset,
metric,
transformers,
logdir=tmpdirname)
# max_iter model variants, 1 results.txt file
assert len(os.listdir(tmpdirname)) == self.max_iter + 1
@pytest.mark.torch
def test_multitask_example(self):
"""Test a simple example of optimizing a multitask model with a random search."""
# Generate dummy dataset
np.random.seed(123)
train_dataset = dc.data.NumpyDataset(np.random.rand(10, 3),
np.zeros((10, 2)), np.ones(
(10, 2)), np.arange(10))
valid_dataset = dc.data.NumpyDataset(np.random.rand(5, 3),
np.zeros((5, 2)), np.ones((5, 2)),
np.arange(5))
optimizer = dc.hyper.RandomHyperparamOpt(
lambda **params: dc.models.MultitaskRegressor(
n_tasks=2,
n_features=3,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
learning_rate=0.003,
**params),
max_iter=self.max_iter)
params_dict = {"batch_size": [10, 20]}
transformers = []
metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean)
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
train_dataset,
valid_dataset,
metric,
transformers,
use_max=False)
valid_score = best_model.evaluate(valid_dataset, [metric])
assert valid_score["mean-mean_squared_error"] == min(
all_results.values())
assert valid_score["mean-mean_squared_error"] > 0
@pytest.mark.torch
def test_multitask_example_multiple_params(self):
"""Test a simple example of optimizing a multitask model with a random search
with multiple parameters to optimize."""
# Generate dummy dataset
np.random.seed(123)
train_dataset = dc.data.NumpyDataset(np.random.rand(10, 3),
np.zeros((10, 2)), np.ones(
(10, 2)), np.arange(10))
valid_dataset = dc.data.NumpyDataset(np.random.rand(5, 3),
np.zeros((5, 2)), np.ones((5, 2)),
np.arange(5))
optimizer = dc.hyper.RandomHyperparamOpt(
lambda **params: dc.models.MultitaskRegressor(
n_tasks=2,
n_features=3,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
**params),
max_iter=self.max_iter)
sampler = uniform(loc=0.003, scale=10).rvs
params_dict = {"learning_rate": sampler, "batch_size": [10, 50]}
# These are per-example multiplier
transformers = []
metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean)
with tempfile.TemporaryDirectory() as tmpdirname:
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
train_dataset,
valid_dataset,
metric,
transformers,
logdir=tmpdirname,
use_max=False)
valid_score = best_model.evaluate(valid_dataset, [metric])
# Test that 2 parameters were optimized
for hp_str in all_results.keys():
# Recall that the key is a string of the form _batch_size_39_learning_rate_0.01 for example
assert "batch_size" in hp_str
assert "learning_rate" in hp_str
assert valid_score["mean-mean_squared_error"] == min(
all_results.values())
assert valid_score["mean-mean_squared_error"] > 0
@pytest.mark.torch
def test_multitask_nb_epoch(self):
"""Test a simple example of optimizing a multitask model with a random
search with a different number of training epochs."""
# Generate dummy dataset
np.random.seed(123)
train_dataset = dc.data.NumpyDataset(np.random.rand(10, 3),
np.zeros((10, 2)), np.ones(
(10, 2)), np.arange(10))
valid_dataset = dc.data.NumpyDataset(np.random.rand(5, 3),
np.zeros((5, 2)), np.ones((5, 2)),
np.arange(5))
optimizer = dc.hyper.RandomHyperparamOpt(
lambda **params: dc.models.MultitaskRegressor(
n_tasks=2,
n_features=3,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
learning_rate=0.003,
**params),
max_iter=self.max_iter)
params_dict = {"batch_size": [10, 20]}
transformers = []
metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean)
# Define nb_epoch in hyperparam_search function call
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
train_dataset,
valid_dataset,
metric,
transformers,
nb_epoch=3,
use_max=False)
valid_score = best_model.evaluate(valid_dataset, [metric])
assert valid_score["mean-mean_squared_error"] == min(
all_results.values())
assert valid_score["mean-mean_squared_error"] > 0
<file_sep>"""
Test Autodock Vina Utility Functions.
"""
import os
import numpy as np
import unittest
from deepchem.utils import docking_utils
from deepchem.utils import rdkit_utils
class TestVinaUtils(unittest.TestCase):
def setUp(self):
# TODO test more formats for ligand
self.current_dir = os.path.dirname(os.path.realpath(__file__))
self.docked_ligands = os.path.join(self.current_dir, 'assets',
'1jld_ligand_docked.pdbqt')
def test_load_docked_ligand(self):
docked_ligands, scores = docking_utils.load_docked_ligands(
self.docked_ligands)
assert len(docked_ligands) == 9
assert len(scores) == 9
for ligand, score in zip(docked_ligands, scores):
xyz = rdkit_utils.get_xyz_from_mol(ligand)
assert score < 0 # This is a binding free energy
assert np.count_nonzero(xyz) > 0
def test_write_gnina_conf(self):
docking_utils.write_gnina_conf(
'protein.pdb',
'ligand.sdf',
'conf.txt',
)
assert os.path.exists('conf.txt')
os.remove('conf.txt')
def test_read_gnina_log(self):
log_file = os.path.join(self.current_dir, 'assets', 'gnina_log.txt')
scores = docking_utils.read_gnina_log(log_file)
assert np.array_equal(
scores, np.array([[-4.37, 0.6392, 4.336], [-3.56, 0.6202, 4.162]]))
def test_prepare_inputs(self):
pdbid = '3cyx'
ligand_smiles = 'CC(C)(C)NC(O)C1CC2CCCCC2C[NH+]1CC(O)C(CC1CCCCC1)NC(O)C(CC(N)O)NC(O)C1CCC2CCCCC2N1'
protein, ligand = docking_utils.prepare_inputs(pdbid,
ligand_smiles,
pdb_name=pdbid)
assert np.isclose(protein.GetNumAtoms(), 1510, atol=3)
assert np.isclose(ligand.GetNumAtoms(), 124, atol=3)
protein, ligand = docking_utils.prepare_inputs(
pdbid + '.pdb', 'ligand_' + pdbid + '.pdb')
assert np.isclose(protein.GetNumAtoms(), 1510, atol=3)
assert np.isclose(ligand.GetNumAtoms(), 124, atol=3)
os.remove(pdbid + '.pdb')
os.remove('ligand_' + pdbid + '.pdb')
<file_sep>"""
Tests for splitter objects.
"""
import os
import unittest
import numpy as np
import deepchem as dc
from deepchem.data import NumpyDataset
from deepchem.splits import IndexSplitter
def load_sparse_multitask_dataset():
"""Load sparse tox multitask data, sample dataset."""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = [
"task1", "task2", "task3", "task4", "task5", "task6", "task7", "task8",
"task9"
]
input_file = os.path.join(current_dir,
"assets/sparse_multitask_example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
def load_multitask_data():
"""Load example multitask data."""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = [
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
]
input_file = os.path.join(
current_dir, "../../models/tests/assets/multitask_example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
def load_butina_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["task"]
# task_type = "regression"
input_file = os.path.join(current_dir, "assets/butina_example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
class TestSplitter(unittest.TestCase):
"""
Test some basic splitters.
"""
def test_random_group_split(self):
solubility_dataset = load_solubility_data()
groups = [0, 4, 1, 2, 3, 7, 0, 3, 1, 0]
# 0 1 2 3 4 5 6 7 8 9
group_splitter = dc.splits.RandomGroupSplitter(groups)
train_idxs, valid_idxs, test_idxs = group_splitter.split(
solubility_dataset, frac_train=0.5, frac_valid=0.25, frac_test=0.25)
class_ind = [-1] * 10
all_idxs = []
for s in train_idxs + valid_idxs + test_idxs:
all_idxs.append(s)
assert sorted(all_idxs) == list(range(10))
for split_idx, split in enumerate([train_idxs, valid_idxs, test_idxs]):
for s in split:
if class_ind[s] == -1:
class_ind[s] = split_idx
else:
assert class_ind[s] == split_idx
def test_singletask_random_split(self):
"""
Test singletask RandomSplitter class.
"""
solubility_dataset = load_solubility_data()
random_splitter = dc.splits.RandomSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_index_split(self):
"""
Test singletask IndexSplitter class.
"""
solubility_dataset = load_solubility_data()
random_splitter = dc.splits.IndexSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
solubility_dataset)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
# TODO(rbharath): The IndexSplitter() had a bug with splitting sharded
# data. Make a test for properly splitting of sharded data. Perhaps using
# reshard() to handle this?
def test_singletask_scaffold_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = load_solubility_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_singletask_fingerprint_split(self):
"""
Test singletask Fingerprint class.
"""
solubility_dataset = load_solubility_data()
assert (len(solubility_dataset.X) == 10)
scaffold_splitter = dc.splits.FingerprintSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
s1 = set(train_data.ids)
assert valid_data.ids[0] not in s1
assert test_data.ids[0] not in s1
def test_singletask_stratified_split(self):
"""
Test singletask SingletaskStratifiedSplitter class.
"""
solubility_dataset = load_solubility_data()
stratified_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
stratified_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.NumpyDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_maxmin_split(self):
"""
Test singletask MaxMinSplitter class.
"""
solubility_dataset = load_butina_data()
maxmin_splitter = dc.splits.MaxMinSplitter()
train_data, valid_data, test_data = \
maxmin_splitter.train_valid_test_split(
solubility_dataset)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_singletask_butina_split(self):
"""
Test singletask ButinaSplitter class.
"""
solubility_dataset = load_butina_data()
butina_splitter = dc.splits.ButinaSplitter()
train_data, valid_data, test_data = \
butina_splitter.train_valid_test_split(
solubility_dataset)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_k_fold_splitter(self):
"""
Test that we can 5 fold index wise over 5 points
"""
ds = NumpyDataset(np.array(range(5)), np.array(range(5)))
index_splitter = IndexSplitter()
K = 5
fold_datasets = index_splitter.k_fold_split(ds, K)
for fold in range(K):
train, cv = fold_datasets[fold][0], fold_datasets[fold][1]
self.assertTrue(cv.X[0] == fold)
train_data = set(list(train.X))
self.assertFalse(fold in train_data)
self.assertEqual(K - 1, len(train))
self.assertEqual(1, len(cv))
def test_singletask_random_k_fold_split(self):
"""
Test singletask RandomSplitter class.
"""
solubility_dataset = load_solubility_data()
random_splitter = dc.splits.RandomSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = random_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
def test_singletask_index_k_fold_split(self):
"""
Test singletask IndexSplitter class.
"""
solubility_dataset = load_solubility_data()
index_splitter = dc.splits.IndexSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = index_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(
[x[1] for x in fold_datasets])
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_scaffold_k_fold_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = load_solubility_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = scaffold_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(
[x[1] for x in fold_datasets])
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_stratified_column_indices(self):
"""
Test RandomStratifiedSplitter's split method on simple singletas.
"""
# Test singletask case.
n_samples = 100
n_positives = 20
n_tasks = 1
X = np.ones(n_samples)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
train, valid, test = stratified_splitter.split(dataset, 0.5, 0, 0.5)
# The split index should partition dataset in half.
assert len(train) == 50
assert len(valid) == 0
assert len(test) == 50
assert np.count_nonzero(y[train]) == 10
assert np.count_nonzero(y[test]) == 10
def test_singletask_stratified_column_indices_mask(self):
"""
Test RandomStratifiedSplitter's split method on dataset with mask.
"""
# Test singletask case.
n_samples = 100
n_positives = 20
n_tasks = 1
# Test case where some weights are zero (i.e. masked)
X = np.ones(n_samples)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
# Set half the positives to have zero weight
w[:n_positives // 2] = 0
dataset = dc.data.NumpyDataset(X, y, w)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
train, valid, test = stratified_splitter.split(dataset, 0.5, 0, 0.5)
# There are 10 nonzero actives.
# The split index should partition this into half, so expect 5
w_present = (w != 0)
y_present = y * w_present
assert np.count_nonzero(y_present[train]) == 5
def test_multitask_stratified_column_indices(self):
"""
Test RandomStratifiedSplitter split on multitask dataset.
"""
n_samples = 100
n_tasks = 10
p = .05 # proportion actives
X = np.ones(n_samples)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
train, valid, test = stratified_splitter.split(dataset, 0.5, 0, 0.5)
for task in range(n_tasks):
task_actives = np.count_nonzero(y[:, task])
# The split index should partition the positives for each task roughly in half.
target = task_actives / 2
assert target - 2 <= np.count_nonzero(y[train, task]) <= target + 2
def test_multitask_stratified_column_indices_masked(self):
"""
Test RandomStratifiedSplitter split on multitask dataset.
"""
n_samples = 200
n_tasks = 10
p = .05 # proportion actives
X = np.ones(n_samples)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
# Mask half the examples
w[:n_samples // 2] = 0
dataset = dc.data.NumpyDataset(X, y, w)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
train, valid, test = stratified_splitter.split(dataset, 0.5, 0, 0.5)
w_present = (w != 0)
y_present = y * w_present
for task in range(n_tasks):
task_actives = np.count_nonzero(y_present[:, task])
target = task_actives / 2
# The split index should partition dataset in half.
assert target - 1 <= np.count_nonzero(y_present[train,
task]) <= target + 1
def test_random_stratified_split(self):
"""
Test RandomStratifiedSplitter on a singletask split.
"""
np.random.seed(2314)
# Test singletask case.
n_samples = 20
n_positives = 10
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
dataset_1, dataset_2 = stratified_splitter.train_test_split(
dataset, frac_train=.5)
print(dataset_1.get_shape())
print(dataset_2.get_shape())
# Should have split cleanly in half (picked random seed to ensure this)
assert len(dataset_1) == 10
assert len(dataset_2) == 10
# Check positives are correctly distributed
y_1 = dataset_1.y
assert np.count_nonzero(y_1) == n_positives / 2
y_2 = dataset_2.y
assert np.count_nonzero(y_2) == n_positives / 2
def test_singletask_stratified_train_valid_test_split(self):
"""
Test RandomStratifiedSplitter on a singletask train/valid/test split.
"""
np.random.seed(2314)
# Test singletask case.
n_samples = 100
n_positives = 10
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
train, valid, test = stratified_splitter.train_valid_test_split(
dataset, frac_train=.8, frac_valid=.1, frac_test=.1)
# Should have made an 80/10/10 train/valid/test split of actives.
self.assertEqual(np.count_nonzero(train.y), 8)
self.assertEqual(np.count_nonzero(valid.y), 1)
self.assertEqual(np.count_nonzero(test.y), 1)
def test_singletask_stratified_k_fold_split(self):
"""
Test RandomStratifiedSplitter k-fold class.
"""
n_samples = 100
n_positives = 20
n_features = 10
X = np.random.rand(n_samples, n_features)
y = np.zeros(n_samples)
y[:n_positives] = 1
w = np.ones(n_samples)
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
ids_set = set(dataset.ids)
K = 5
fold_datasets = stratified_splitter.k_fold_split(dataset, K)
fold_datasets = [f[1] for f in fold_datasets]
for fold in range(K):
fold_dataset = fold_datasets[fold]
# Verify lengths is 100/k == 20
# Note: This wouldn't work for multitask str
# assert len(fold_dataset) == n_samples/K
fold_labels = fold_dataset.y
# Verify that each fold has n_positives/K = 4 positive examples.
assert np.count_nonzero(fold_labels == 1) == n_positives / K
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(fold_datasets)
assert len(merged_dataset) == len(dataset)
assert sorted(merged_dataset.ids) == (sorted(dataset.ids))
def test_multitask_random_split(self):
"""
Test multitask RandomSplitter class.
"""
multitask_dataset = load_multitask_data()
random_splitter = dc.splits.RandomSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_multitask_index_split(self):
"""
Test multitask IndexSplitter class.
"""
multitask_dataset = load_multitask_data()
index_splitter = dc.splits.IndexSplitter()
train_data, valid_data, test_data = \
index_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_multitask_scaffold_split(self):
"""
Test multitask ScaffoldSplitter class.
"""
multitask_dataset = load_multitask_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_specified_split(self):
solubility_dataset = load_solubility_data()
random_splitter = dc.splits.SpecifiedSplitter(valid_indices=[7],
test_indices=[8])
train_data, valid_data, test_data = \
random_splitter.split(
solubility_dataset)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_random_seed(self):
"""Test that splitters use the random seed correctly."""
dataset = load_solubility_data()
splitter = dc.splits.RandomSplitter()
train1, valid1, test1 = splitter.train_valid_test_split(dataset, seed=1)
train2, valid2, test2 = splitter.train_valid_test_split(dataset, seed=2)
train3, valid3, test3 = splitter.train_valid_test_split(dataset, seed=1)
assert np.array_equal(train1.X, train3.X)
assert np.array_equal(valid1.X, valid3.X)
assert np.array_equal(test1.X, test3.X)
assert not np.array_equal(train1.X, train2.X)
assert not np.array_equal(valid1.X, valid2.X)
assert not np.array_equal(test1.X, test2.X)
def test_fingerprint_split(self):
"""
Test FingerprintSplitter.
"""
multitask_dataset = load_multitask_data()
splitter = dc.splits.FingerprintSplitter()
train_data, valid_data, test_data = \
splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_fingerprint_k_fold_split(self):
"""
Test FingerprintSplitter.k_fold_split.
"""
multitask_dataset = load_multitask_data()
splitter = dc.splits.FingerprintSplitter()
cv_folds = splitter.k_fold_split(multitask_dataset, k=3)
assert len(multitask_dataset) == len(cv_folds[0][0]) + len(
cv_folds[0][1])
assert len(multitask_dataset) == len(cv_folds[1][0]) + len(
cv_folds[1][1])
assert len(multitask_dataset) == len(cv_folds[2][0]) + len(
cv_folds[2][1])
<file_sep>import pytest
try:
from deepchem.feat.dft_data import DFTEntry
from dqc.qccalc.ks import KS
from deepchem.utils.dftutils import KSCalc
import torch
has_dqc = True
except:
has_dqc = False
import numpy as np
@pytest.mark.dqc
def test_entryDM():
e_type = 'dm'
true_val = 'deepchem/feat/tests/data/dftHF_output.npy'
systems = [{
'moldesc': 'H 0.86625 0 0; F -0.86625 0 0',
'basis': '6-311++G(3df,3pd)'
}]
dm_entry_for_HF = DFTEntry.create(e_type, true_val, systems)
assert dm_entry_for_HF.entry_type == 'dm'
dm_HF_system0 = dm_entry_for_HF.get_systems()[0]
mol_dqc = dm_HF_system0.get_dqc_mol(dm_entry_for_HF)
hf_zs = torch.Tensor([1, 9])
hf_pos = torch.DoubleTensor([[0.86625, 0.0000, 0.0000],
[-0.86625, 0.0000, 0.0000]])
assert (mol_dqc.atomzs == hf_zs).all()
assert (hf_pos.numpy() == mol_dqc.atompos.numpy()).all()
dm0 = dm_entry_for_HF.get_true_val()
assert dm0.shape == (57, 57)
@pytest.mark.dqc
def test_entryAE():
e_type = 'ae'
true_val = '0.09194410469'
weight = 1340
systems = [{
'moldesc': 'Li 1.5070 0 0; H -1.5070 0 0',
'basis': '6-311++G(3df,3pd)'
}, {
'moldesc': 'Li 0 0 0',
'basis': '6-311++G(3df,3pd)',
'spin': 1
}, {
'moldesc': 'H 0 0 0',
'basis': '6-311++G(3df,3pd)',
'spin': 1
}]
ae_entry_for_LiH = DFTEntry.create(e_type, true_val, systems, weight)
assert ae_entry_for_LiH.entry_type == 'ae'
assert ae_entry_for_LiH.get_true_val() == 0.09194410469
assert ae_entry_for_LiH.get_weight() == 1340
def run(syst):
mol_dqc = syst.get_dqc_mol(ae_entry_for_LiH)
qc = KS(mol_dqc, xc='lda_x').run()
return KSCalc(qc)
qcs = [run(syst) for syst in ae_entry_for_LiH.get_systems()]
val = np.array(0.05362133)
calc_val = ae_entry_for_LiH.get_val(qcs)
assert np.allclose(val, calc_val)
<file_sep># This script may work on only Bash and Zsh
# usage: source scripts/flake8_for_ci.sh
items=(
"deepchem/data"
"deepchem/dock"
"deepchem/feat"
"deepchem/hyper"
"deepchem/metalearning"
"deepchem/metrics"
"deepchem/rl"
"deepchem/splits"
"deepchem/trans"
"deepchem/utils"
"deepchem/molnet"
"deepchem/models"
)
for item in "${items[@]}" ; do
echo ${item}; flake8 ${item} --exclude=__init__.py --count --show-source --statistics
done
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import time
import numpy as np
import tensorflow as tf
from deepchem.data import DiskDataset
from deepchem.nn import model_ops
from legacy import TensorflowGraph, TensorflowGraphModel, TensorflowMultiTaskRegressor
from deepchem.utils.save import log
import atomicnet_ops
class TensorflowFragmentRegressor(TensorflowMultiTaskRegressor):
"""Create atomic convolution neural network potential for binding energy.
Example:
>>> B = 10 # batch_size
>>> N_1 = 6 # frag1_n_atoms
>>> N_2 = 6 # frag2_n_atoms
>>> N = 12 # complex_n_atoms
>>> M = 6 # n_neighbors
>>> n_tasks = 1
>>> C_1 = np.zeros((N_1, 3))
>>> C_2 = np.zeros((N_2, 3))
>>> C = np.zeros((N, 3))
>>> NL_1 = {}
>>> for i in range(n_atoms): NL_1[i] = [0 for m in range(M)]
>>> NL_2 = {}
>>> for i in range(n_atoms): NL_2[i] = [0 for m in range(M)]
>>> NL = {}
>>> for i in range(n_atoms): NL[i] = [0 for m in range(M)]
>>> Z_1 = np.zeros((N))
>>> Z_2 = np.zeros((N))
>>> Z = np.zeros((N))
>>> X = [(C_1, NL_1, Z_1, C_2, NL_2, Z_2, C, NL, Z) for i in range(B)]
>>> y = np.zeros(B, n_tasks)
>>> w = np.zeros(B, n_tasks)
>>> ids = np.zeros(B,)
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> rp = [[12.0, 0.0, 0.04]]
>>> at = None
>>> model = TensorflowFragmentRegressor(n_tasks, rp, at, N_1, N_2, N, M)
>>> model.fit(dataset)
"""
def __init__(self,
n_tasks,
radial_params,
atom_types,
frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
logdir=None,
layer_sizes=[100],
weight_init_stddevs=[0.1],
bias_init_consts=[1.],
penalty=0.0,
penalty_type="l2",
dropouts=[0.5],
learning_rate=.001,
momentum=.8,
optimizer="adam",
batch_size=48,
conv_layers=1,
boxsize=None,
verbose=True,
seed=None):
"""Initialize TensorflowFragmentRegressor.
Parameters
----------
n_tasks: int
Number of tasks.
radial_params: list
Of length l, where l is number of radial filters learned.
atom_types: list
Of length a, where a is number of atom_types for filtering.
frag1_num_atoms: int
Maximum number of atoms in fragment 1.
frag2_num_atoms: int
Maximum number of atoms in fragment 2.
complex_num_atoms: int
Maximum number of atoms in complex.
max_num_neighbors: int
Maximum number of neighbors per atom.
logdir: str
Path to model save directory.
layer_sizes: list
List of layer sizes.
weight_init_stddevs: list
List of standard deviations for weights (sampled from zero-mean
gaussians). One for each layer.
bias_init_consts: list
List of bias initializations. One for each layer.
penalty: float
Amount of penalty (l2 or l1 applied)
penalty_type: str
Either "l2" or "l1"
dropouts: list
List of dropout amounts. One for each layer.
learning_rate: float
Learning rate for model.
momentum: float
Momentum. Only applied if optimizer=="momentum"
optimizer: str
Type of optimizer applied.
batch_size: int
Size of minibatches for training.
conv_layers: int
Number of atomic convolution layers (experimental feature).
boxsize: float or None
Simulation box length [Angstrom]. If None, no periodic boundary conditions.
verbose: bool, optional (Default True)
Whether to perform logging.
seed: int, optional (Default None)
If not none, is used as random seed for tensorflow.
"""
self.n_tasks = n_tasks
self.radial_params = radial_params
self.atom_types = atom_types
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.complex_num_atoms = complex_num_atoms
self.max_num_neighbors = max_num_neighbors
self.conv_layers = conv_layers
self.boxsize = boxsize
TensorflowGraphModel.__init__(
self,
n_tasks,
None,
logdir,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
penalty=penalty,
penalty_type=penalty_type,
dropouts=dropouts,
learning_rate=learning_rate,
momentum=momentum,
optimizer=optimizer,
batch_size=batch_size,
pad_batches=True,
verbose=verbose,
seed=seed)
def construct_feed_dict(self, F_b, y_b=None, w_b=None, ids_b=None):
"""Construct a feed dictionary from minibatch data.
B = batch_size, N = max_num_atoms
Parameters
----------
F_b: np.ndarray of B tuples of (X_1, L_1, Z_1, X_2, L_2, Z_2, X, L, Z)
X_1: ndarray shape (N, 3).
Fragment 1 Cartesian coordinates [Angstrom].
L_1: dict with N keys.
Fragment 1 neighbor list.
Z_1: ndarray shape (N,).
Fragment 1 atomic numbers.
X_2: ndarray shape (N, 3).
Fragment 2 Cartesian coordinates [Angstrom].
L_2: dict with N keys.
Fragment 2 neighbor list.
Z_2: ndarray shape (N,).
Fragment 2 atomic numbers.
X: ndarray shape (N, 3).
Complex Cartesian coordinates [Angstrom].
L: dict with N keys.
Complex neighbor list.
Z: ndarray shape (N,).
Complex atomic numbers.
y_b: np.ndarray of shape (B, num_tasks)
Tasks.
w_b: np.ndarray of shape (B, num_tasks)
Task weights.
ids_b: List of length (B,)
Datapoint identifiers. Not currently used.
Returns
-------
retval: dict
Tensorflow feed dict
"""
N = self.complex_num_atoms
N_1 = self.frag1_num_atoms
N_2 = self.frag2_num_atoms
M = self.max_num_neighbors
orig_dict = {}
batch_size = F_b.shape[0]
num_features = F_b[0][0].shape[1]
frag1_X_b = np.zeros((batch_size, N_1, num_features))
for i in range(batch_size):
frag1_X_b[i] = F_b[i][0]
orig_dict["frag1_X_placeholder"] = frag1_X_b
frag2_X_b = np.zeros((batch_size, N_2, num_features))
for i in range(batch_size):
frag2_X_b[i] = F_b[i][3]
orig_dict["frag2_X_placeholder"] = frag2_X_b
complex_X_b = np.zeros((batch_size, N, num_features))
for i in range(batch_size):
complex_X_b[i] = F_b[i][6]
orig_dict["complex_X_placeholder"] = complex_X_b
frag1_Nbrs = np.zeros((batch_size, N_1, M))
frag1_Z_b = np.zeros((batch_size, N_1))
for i in range(batch_size):
frag1_Z_b[i] = F_b[i][2]
frag1_Nbrs_Z = np.zeros((batch_size, N_1, M))
for atom in range(N_1):
for i in range(batch_size):
atom_nbrs = F_b[i][1].get(atom, "")
frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j]
orig_dict["frag1_Z_placeholder"] = frag1_Z_b
orig_dict["frag1_Nbrs_placeholder"] = frag1_Nbrs
orig_dict["frag1_Nbrs_Z_placeholder"] = frag1_Nbrs_Z
frag2_Nbrs = np.zeros((batch_size, N_2, M))
frag2_Z_b = np.zeros((batch_size, N_2))
for i in range(batch_size):
frag2_Z_b[i] = F_b[i][5]
frag2_Nbrs_Z = np.zeros((batch_size, N_2, M))
for atom in range(N_2):
for i in range(batch_size):
atom_nbrs = F_b[i][4].get(atom, "")
frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j]
orig_dict["frag2_Z_placeholder"] = frag2_Z_b
orig_dict["frag2_Nbrs_placeholder"] = frag2_Nbrs
orig_dict["frag2_Nbrs_Z_placeholder"] = frag2_Nbrs_Z
complex_Nbrs = np.zeros((batch_size, N, M))
complex_Z_b = np.zeros((batch_size, N))
for i in range(batch_size):
complex_Z_b[i] = F_b[i][8]
complex_Nbrs_Z = np.zeros((batch_size, N, M))
for atom in range(N):
for i in range(batch_size):
atom_nbrs = F_b[i][7].get(atom, "")
complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j]
orig_dict["complex_Z_placeholder"] = complex_Z_b
orig_dict["complex_Nbrs_placeholder"] = complex_Nbrs
orig_dict["complex_Nbrs_Z_placeholder"] = complex_Nbrs_Z
for task in range(self.n_tasks):
if y_b is not None:
orig_dict["labels_%d" % task] = y_b[:, task]
else:
orig_dict["labels_%d" % task] = np.zeros((self.batch_size,))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
def build(self, graph, name_scopes, training):
N = self.complex_num_atoms
N_1 = self.frag1_num_atoms
N_2 = self.frag2_num_atoms
M = self.max_num_neighbors
B = self.batch_size
placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
name_scopes)
with graph.as_default():
with placeholder_scope:
self.frag1_X_placeholder = tf.placeholder(
tf.float32, shape=[B, N_1, 3], name='frag1_X_placeholder')
self.frag1_Z_placeholder = tf.placeholder(
tf.float32, shape=[B, N_1], name='frag1_Z_placeholder')
self.frag1_Nbrs_placeholder = tf.placeholder(
tf.int32, shape=[B, N_1, M], name="frag1_Nbrs_placeholder")
self.frag1_Nbrs_Z_placeholder = tf.placeholder(
tf.float32, shape=[B, N_1, M], name='frag1_Nbrs_Z_placeholder')
self.frag2_X_placeholder = tf.placeholder(
tf.float32, shape=[B, N_2, 3], name='frag2_X_placeholder')
self.frag2_Z_placeholder = tf.placeholder(
tf.float32, shape=[B, N_2], name='frag2_Z_placeholder')
self.frag2_Nbrs_placeholder = tf.placeholder(
tf.int32, shape=[B, N_2, M], name="frag2_Nbrs_placeholder")
self.frag2_Nbrs_Z_placeholder = tf.placeholder(
tf.float32, shape=[B, N_2, M], name='frag2_Nbrs_Z_placeholder')
self.complex_X_placeholder = tf.placeholder(
tf.float32, shape=[B, N, 3], name='complex_X_placeholder')
self.complex_Z_placeholder = tf.placeholder(
tf.float32, shape=[B, N], name='complex_Z_placeholder')
self.complex_Nbrs_placeholder = tf.placeholder(
tf.int32, shape=[B, N, M], name="complex_Nbrs_placeholder")
self.complex_Nbrs_Z_placeholder = tf.placeholder(
tf.float32, shape=[B, N, M], name='complex_Nbrs_Z_placeholder')
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
boxsize = self.boxsize
conv_layers = self.conv_layers
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
num_layers = lengths_set.pop()
assert num_layers > 0, 'Must have some layers defined.'
radial_params = self.radial_params
atom_types = self.atom_types
frag1_layer = atomicnet_ops.AtomicConvolutionLayer(
self.frag1_X_placeholder, self.frag1_Nbrs_placeholder,
self.frag1_Nbrs_Z_placeholder, atom_types, radial_params, boxsize, B,
N_1, M, 3)
for x in range(conv_layers - 1):
l = int(frag1_layer.get_shape()[-1])
frag1_layer = atomicnet_ops.AtomicConvolutionLayer(
frag1_layer, self.frag1_Nbrs_placeholder,
self.frag1_Nbrs_Z_placeholder, atom_types, radial_params, boxsize,
B, N_1, M, l)
frag2_layer = atomicnet_ops.AtomicConvolutionLayer(
self.frag2_X_placeholder, self.frag2_Nbrs_placeholder,
self.frag2_Nbrs_Z_placeholder, atom_types, radial_params, boxsize, B,
N_2, M, 3)
for x in range(conv_layers - 1):
l = int(frag2_layer.get_shape()[-1])
frag2_layer = atomicnet_ops.AtomicConvolutionLayer(
frag2_layer, self.frag2_Nbrs_placeholder,
self.frag2_Nbrs_Z_placeholder, atom_types, radial_params, boxsize,
B, N_2, M, l)
complex_layer = atomicnet_ops.AtomicConvolutionLayer(
self.complex_X_placeholder, self.complex_Nbrs_placeholder,
self.complex_Nbrs_Z_placeholder, atom_types, radial_params, boxsize,
B, N, M, 3)
for x in range(conv_layers - 1):
l = int(complex_layer.get_shape()[-1])
complex_layer = atomicnet_ops.AtomicConvolutionLayer(
complex_layer, self.complex_Nbrs_placeholder,
self.complex_Nbrs_Z_placeholder, atom_types, radial_params, boxsize,
B, N, M, l)
weights = []
biases = []
output_weights = []
output_biases = []
output = []
n_features = int(frag1_layer.get_shape()[-1])
for ind, atomtype in enumerate(atom_types):
prev_layer_size = n_features
weights.append([])
biases.append([])
output_weights.append([])
output_biases.append([])
for i in range(num_layers):
weight, bias = atomicnet_ops.InitializeWeightsBiases(
prev_layer_size=prev_layer_size,
size=layer_sizes[i],
weights=tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
biases=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]]))
weights[ind].append(weight)
biases[ind].append(bias)
prev_layer_size = layer_sizes[i]
weight, bias = atomicnet_ops.InitializeWeightsBiases(prev_layer_size, 1)
output_weights[ind].append(weight)
output_biases[ind].append(bias)
def atomnet(current_input, atomtype):
prev_layer = current_input
for i in range(num_layers):
layer = atomicnet_ops.AtomicNNLayer(
tensor=prev_layer,
size=layer_sizes[i],
weights=weights[atomtype][i],
biases=biases[atomtype][i])
layer = tf.nn.relu(layer)
layer = model_ops.dropout(layer, dropouts[i], training)
prev_layer = layer
prev_layer_size = layer_sizes[i]
output_layer = tf.squeeze(
atomicnet_ops.AtomicNNLayer(
tensor=prev_layer,
size=prev_layer_size,
weights=output_weights[atomtype][0],
biases=output_biases[atomtype][0]))
return output_layer
frag1_zeros = tf.zeros((B, N_1))
frag2_zeros = tf.zeros((B, N_2))
complex_zeros = tf.zeros((B, N))
frag1_atomtype_energy = []
frag2_atomtype_energy = []
complex_atomtype_energy = []
for ind, atomtype in enumerate(atom_types):
frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer)
frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer)
complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer)
cond = tf.equal(self.frag1_Z_placeholder, atomtype)
frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros))
cond = tf.equal(self.frag2_Z_placeholder, atomtype)
frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros))
cond = tf.equal(self.complex_Z_placeholder, atomtype)
complex_atomtype_energy.append(
tf.where(cond, complex_outputs, complex_zeros))
frag1_outputs = tf.add_n(frag1_atomtype_energy)
frag2_outputs = tf.add_n(frag2_atomtype_energy)
complex_outputs = tf.add_n(complex_atomtype_energy)
frag1_energy = tf.reduce_sum(frag1_outputs, 1)
frag2_energy = tf.reduce_sum(frag2_outputs, 1)
complex_energy = tf.reduce_sum(complex_outputs, 1)
binding_energy = complex_energy - (frag1_energy + frag2_energy)
output.append(binding_energy)
return output
<file_sep>import deepchem as dc
import numpy as np
import os
def test_select():
"""Test that dataset select works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
indices = [0, 4, 5, 8]
select_dataset = dataset.select(indices)
assert isinstance(select_dataset, dc.data.DiskDataset)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(X[indices], X_sel)
np.testing.assert_array_equal(y[indices], y_sel)
np.testing.assert_array_equal(w[indices], w_sel)
np.testing.assert_array_equal(ids[indices], ids_sel)
def test_image_dataset_select():
"""Test that select works on image datasets."""
path = os.path.join(os.path.dirname(__file__), 'images')
files = [os.path.join(path, f) for f in os.listdir(path)]
dataset = dc.data.ImageDataset(files, np.random.random(10))
indices = [0, 4, 5, 8, 2]
select_dataset = dataset.select(indices)
assert isinstance(select_dataset, dc.data.ImageDataset)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(dataset.X[indices], X_sel)
np.testing.assert_array_equal(dataset.y[indices], y_sel)
np.testing.assert_array_equal(dataset.w[indices], w_sel)
np.testing.assert_array_equal(dataset.ids[indices], ids_sel)
def test_numpy_dataset_select():
"""Test that dataset select works with numpy dataset."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
indices = [0, 4, 5, 8, 2]
select_dataset = dataset.select(indices)
assert isinstance(select_dataset, dc.data.NumpyDataset)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(X[indices], X_sel)
np.testing.assert_array_equal(y[indices], y_sel)
np.testing.assert_array_equal(w[indices], w_sel)
np.testing.assert_array_equal(ids[indices], ids_sel)
def test_select_multishard():
"""Test that dataset select works with multiple shards."""
num_datapoints = 100
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
dataset.reshard(shard_size=10)
indices = [10, 42, 51, 82, 2, 4, 6]
select_dataset = dataset.select(indices)
assert isinstance(select_dataset, dc.data.DiskDataset)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(X[indices], X_sel)
np.testing.assert_array_equal(y[indices], y_sel)
np.testing.assert_array_equal(w[indices], w_sel)
np.testing.assert_array_equal(ids[indices], ids_sel)
def test_select_not_sorted():
"""Test that dataset select with ids not in sorted order."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
indices = [4, 2, 8, 5, 0]
select_dataset = dataset.select(indices)
assert isinstance(select_dataset, dc.data.DiskDataset)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(X[indices], X_sel)
np.testing.assert_array_equal(y[indices], y_sel)
np.testing.assert_array_equal(w[indices], w_sel)
np.testing.assert_array_equal(ids[indices], ids_sel)
def test_select_to_numpy():
"""Test that dataset select works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
indices = [0, 4, 5, 8]
select_dataset = dataset.select(indices, output_numpy_dataset=True)
assert isinstance(select_dataset, dc.data.NumpyDataset)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(X[indices], X_sel)
np.testing.assert_array_equal(y[indices], y_sel)
np.testing.assert_array_equal(w[indices], w_sel)
np.testing.assert_array_equal(ids[indices], ids_sel)
<file_sep>"""
Tests to make sure deepchem models can overfit on tiny datasets.
"""
import os
import numpy as np
import pytest
from flaky import flaky
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
try:
import tensorflow as tf
from tensorflow.python.framework import test_util # noqa: F401
has_tensorflow = True
except:
has_tensorflow = False
import deepchem as dc
from deepchem.models.optimizers import Adam
def test_sklearn_regression_overfit():
"""Test that sklearn models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_sklearn_classification_overfit():
"""Test that sklearn models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_skewed_classification_overfit():
"""Test sklearn models can overfit 0/1 datasets with few actives."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@pytest.mark.torch
def test_regression_overfit():
"""Test that MultitaskRegressor can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.007
@pytest.mark.torch
def test_classification_overfit():
"""Test that MultitaskClassifier can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.MultitaskClassifier(n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=Adam(learning_rate=0.0003,
beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@pytest.mark.torch
def test_residual_classification_overfit():
"""Test that a residual network can overfit simple classification datasets."""
n_samples = 10
n_features = 5
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.MultitaskClassifier(n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True)
# Fit trained model
model.fit(dataset, nb_epoch=500)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
@pytest.mark.torch
def test_fittransform_regression_overfit():
"""Test that MultitaskFitTransformRegressor can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.MultitaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.01],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1,
optimizer=Adam(learning_rate=0.003, beta1=0.9, beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
@pytest.mark.torch
def test_skewed_classification_overfit():
"""Test MultitaskClassifier can overfit 0/1 datasets with few actives."""
# n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.MultitaskClassifier(n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
@pytest.mark.torch
def test_skewed_missing_classification_overfit():
"""MultitaskClassifier, skewed data, few actives
Test MultitaskClassifier overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .002
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
y_flat, w_flat = np.squeeze(y), np.squeeze(w)
y_nonzero = y_flat[w_flat != 0]
num_nonzero = np.count_nonzero(y_nonzero)
weight_nonzero = len(y_nonzero) / num_nonzero
w_flat[y_flat != 0] = weight_nonzero
w = np.reshape(w_flat, (n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.MultitaskClassifier(n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[1.],
batch_size=n_samples,
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .7
def test_sklearn_multitask_classification_overfit():
"""Test SKLearn singletask-to-multitask overfits tiny data."""
n_tasks = 10
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
@pytest.mark.torch
def test_multitask_classification_overfit():
"""Test MultitaskClassifier overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score,
task_averager=np.mean,
n_tasks=n_tasks)
model = dc.models.MultitaskClassifier(n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=Adam(learning_rate=0.0003,
beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
@pytest.mark.torch
def test_multitask_classification_regularization():
"""Test regularizing a MultitaskClassifier."""
n_tasks = 10
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score,
task_averager=np.mean,
n_tasks=n_tasks)
model = dc.models.MultitaskClassifier(n_tasks,
n_features,
layer_sizes=[1000],
dropouts=0,
weight_decay_penalty=1.0,
weight_decay_penalty_type='l1',
batch_size=n_samples,
learning_rate=0.0003)
# Fit trained model
model.fit(dataset, nb_epoch=500)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Most weights should be close to zero.
elements = 0.0
num_nonzero = 0.0
for p in model.model.parameters():
if len(p.shape) == 2 and p.shape[0] == 1000:
elements += p.numel()
num_nonzero += (p.abs() > 1e-3).sum()
assert num_nonzero / elements < 0.1
@pytest.mark.tensorflow
def test_robust_multitask_classification_overfit():
"""Test robust multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score,
task_averager=np.mean)
model = dc.models.RobustMultitaskClassifier(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@pytest.mark.tensorflow
def test_IRV_multitask_classification_overfit():
"""Test IRV classifier overfits tiny data."""
n_tasks = 5
n_samples = 10
n_features = 128
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.randint(2, size=(n_samples, n_features))
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
IRV_transformer = dc.trans.IRVTransformer(5, n_tasks, dataset)
dataset_trans = IRV_transformer.transform(dataset)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score,
task_averager=np.mean)
model = dc.models.MultitaskIRVClassifier(n_tasks,
K=5,
learning_rate=0.01,
batch_size=n_samples)
# Fit trained model
model.fit(dataset_trans)
# Eval model on train
scores = model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_multitask_regression_overfit():
"""Test SKLearn singletask-to-multitask overfits tiny regression data."""
n_tasks = 2
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score,
task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestRegressor()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
@pytest.mark.torch
def test_multitask_regression_overfit():
"""Test MultitaskRegressor overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean,
mode="regression")
model = dc.models.MultitaskRegressor(n_tasks,
n_features,
dropouts=0.0,
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=1000)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .02
@pytest.mark.torch
def test_multitask_regression_regularization():
"""Test regularizing a MultitaskRegressor."""
n_tasks = 10
n_samples = 10
n_features = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean,
mode="regression")
model = dc.models.MultitaskRegressor(n_tasks,
n_features,
dropouts=0.0,
batch_size=n_samples,
weight_decay_penalty=0.01,
weight_decay_penalty_type='l1')
# Fit trained model
model.fit(dataset, nb_epoch=1000)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
# Most weights should be close to zero.
elements = 0.0
num_nonzero = 0.0
for p in model.model.parameters():
if len(p.shape) == 2 and p.shape[0] == 1000:
elements += p.numel()
num_nonzero += (p.abs() > 1e-3).sum()
assert num_nonzero / elements < 0.1
@pytest.mark.torch
def test_residual_regression_overfit():
"""Test that a residual multitask network can overfit tiny data."""
n_tasks = 10
n_samples = 10
n_features = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean,
mode="regression")
model = dc.models.MultitaskRegressor(n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True)
# Fit trained model
model.fit(dataset, nb_epoch=1000)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .02
@pytest.mark.tensorflow
def test_robust_multitask_regression_overfit():
"""Test robust multitask overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 10
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean,
mode="regression")
model = dc.models.RobustMultitaskRegressor(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .2
@pytest.mark.tensorflow
def test_progressive_classification_overfit():
"""Test progressive multitask overfits tiny data."""
np.random.seed(123)
n_tasks = 5
n_samples = 10
n_features = 6
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
metric = dc.metrics.Metric(dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskClassifier(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=300)
# Eval model on train
scores = model.evaluate(dataset, [metric])
assert scores[metric.name] > .9
@pytest.mark.tensorflow
def test_progressive_regression_overfit():
"""Test progressive multitask overfits tiny data."""
np.random.seed(123)
n_tasks = 5
n_samples = 10
n_features = 6
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
metric = dc.metrics.Metric(dc.metrics.rms_score, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.002,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [metric])
assert scores[metric.name] < .2
@pytest.mark.torch
def test_multitask_regressor_uncertainty():
"""Test computing uncertainty for a MultitaskRegressor."""
n_tasks = 1
n_samples = 30
n_features = 1
noise = 0.1
# Generate dummy dataset
X = np.random.rand(n_samples, n_features, 1)
y = 10 * X + np.random.normal(scale=noise, size=(n_samples, n_tasks, 1))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(n_tasks,
n_features,
layer_sizes=[200],
weight_init_stddevs=[.1],
batch_size=n_samples,
dropouts=0.1,
learning_rate=0.003,
uncertainty=True)
# Fit trained model
model.fit(dataset, nb_epoch=2500)
# Predict the output and uncertainty.
pred, std = model.predict_uncertainty(dataset)
assert np.mean(np.abs(y - pred)) < 1.0
assert noise < np.mean(std) < 1.0
@pytest.mark.torch
def test_multitask_regressor_delaney_uncertainty():
"""Test computing uncertainty on a larger dataset."""
tasks, datasets, transformers = dc.molnet.load_delaney('ECFP')
train_dataset, valid_dataset, test_dataset = datasets
model = dc.models.MultitaskRegressor(len(tasks), 1024, uncertainty=True)
model.fit(train_dataset, nb_epoch=20)
metric = dc.metrics.Metric(dc.metrics.pearsonr)
scores = model.evaluate(test_dataset, [metric], transformers)
assert scores['pearsonr'] > 0.5
@pytest.mark.slow
@pytest.mark.tensorflow
def test_DAG_singletask_regression_overfit():
"""Test DAG regressor multitask overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "assets/example_regression.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score,
task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
model = dc.models.DAGModel(n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=1200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
@pytest.mark.tensorflow
def test_weave_singletask_classification_overfit():
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "assets/example_classification.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
batch_size = 10
model = dc.models.WeaveModel(n_tasks,
batch_size=batch_size,
learning_rate=0.0003,
dropout=0.0,
mode="classification")
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
@pytest.mark.slow
@pytest.mark.tensorflow
def test_weave_singletask_regression_overfit():
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "assets/example_regression.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score,
task_averager=np.mean)
batch_size = 10
model = dc.models.WeaveModel(n_tasks,
batch_size=batch_size,
learning_rate=0.0003,
dropout=0.0,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
@pytest.mark.slow
@pytest.mark.tensorflow
def test_MPNN_singletask_regression_overfit():
"""Test MPNN overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "assets/example_regression.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score,
task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
batch_size = 10
model = dc.models.MPNNModel(n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=2,
M=3,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=50)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
@pytest.mark.tensorflow
def test_textCNN_singletask_classification_overfit():
"""Test textCNN model overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.RawFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "assets/example_classification.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
char_dict, length = dc.models.TextCNNModel.build_char_dict(dataset)
batch_size = 10
model = dc.models.TextCNNModel(n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification")
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
@flaky()
@pytest.mark.tensorflow
def test_textCNN_singletask_regression_overfit():
"""Test textCNN model overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.RawFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "assets/example_regression.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score,
task_averager=np.mean)
char_dict, length = dc.models.TextCNNModel.build_char_dict(dataset)
batch_size = 10
model = dc.models.TextCNNModel(n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
@pytest.mark.torch
def test_dtnn_singletask_regression_overfit():
current_dir = os.path.dirname(os.path.abspath(__file__))
dataset_file = os.path.join(current_dir, "assets/qm9_mini.sdf")
TASKS = ["alpha"]
loader = dc.data.SDFLoader(tasks=TASKS,
featurizer=dc.feat.CoulombMatrix(29),
sanitize=True)
data = loader.create_dataset(dataset_file, shard_size=100)
dataset = data.select(range(10))
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score,
task_averager=np.mean)
model = dc.models.torch_models.DTNNModel(dataset.y.shape[1])
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
@flaky()
@pytest.mark.torch
def test_dtnn_multitask_regression_overfit():
current_dir = os.path.dirname(os.path.abspath(__file__))
dataset_file = os.path.join(current_dir, "assets/qm9_mini.sdf")
TASKS = ["alpha", "u0_atom"]
loader = dc.data.SDFLoader(tasks=TASKS,
featurizer=dc.feat.CoulombMatrix(29),
sanitize=True)
data = loader.create_dataset(dataset_file, shard_size=100)
dataset = data.select(range(10))
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score,
task_averager=np.mean)
model = dc.models.torch_models.DTNNModel(dataset.y.shape[1])
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
<file_sep>"""
Utility functions to evaluate models on datasets.
"""
import csv
import logging
import warnings
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import deepchem as dc
from deepchem.metrics import Metric
logger = logging.getLogger(__name__)
Score = Dict[str, float]
Metric_Func = Callable[..., Any]
Metrics = Union[Metric, Metric_Func, List[Metric], List[Metric_Func]]
def output_statistics(scores: Score, stats_out: str) -> None:
"""Write computed stats to file.
Statistics are written to specified `stats_out` file.
Parameters
----------
scores: dict
Dictionary mapping names of metrics to scores.
stats_out: str
Name of file to write scores to.
"""
logger.warning("output_statistics is deprecated.")
with open(stats_out, "w") as statsfile:
statsfile.write(str(scores) + "\n")
def output_predictions(dataset: "dc.data.Dataset", y_preds: np.ndarray,
csv_out: str) -> None:
"""Writes predictions to file.
Writes predictions made on `dataset` to a specified file on
disk. `dataset.ids` are used to format predictions. The produce CSV file will have format as follows
| ID | Task1Name | Task2Name |
| ----------- | ------------ | ------------ |
| identifer1 | prediction11 | prediction12 |
| identifer2 | prediction21 | prediction22 |
Parameters
----------
dataset: dc.data.Dataset
Dataset on which predictions have been made.
y_preds: np.ndarray
Predictions to output
csv_out: str
Name of file to write predictions to.
"""
data_ids = dataset.ids
n_tasks = len(dataset.get_task_names())
y_preds = np.reshape(y_preds, (len(y_preds), n_tasks))
assert len(y_preds) == len(data_ids)
with open(csv_out, "w") as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(["ID"] + dataset.get_task_names())
for mol_id, y_pred in zip(data_ids, y_preds):
csvwriter.writerow([mol_id] + list(y_pred))
def _process_metric_input(metrics: Metrics) -> List[Metric]:
"""A private helper method which processes metrics correctly.
Metrics can be input as `dc.metrics.Metric` objects, lists of
`dc.metrics.Metric` objects, or as raw metric functions or lists of
raw metric functions. Metric functions are functions which accept
two arguments `y_true, y_pred` both of which must be `np.ndarray`
objects and return a float value. This functions normalizes these
different types of inputs to type `list[dc.metrics.Metric]` object
for ease of later processing.
Note that raw metric functions which don't have names attached will
simply be named "metric-#" where # is their position in the provided
metric list. For example, "metric-1" or "metric-7"
Parameters
----------
metrics: dc.metrics.Metric/list[dc.metrics.Metric]/metric function/ list[metric function]
Input metrics to process.
Returns
-------
final_metrics: list[dc.metrics.Metric]
Converts all input metrics and outputs a list of
`dc.metrics.Metric` objects.
"""
# Make sure input is a list
if not isinstance(metrics, list):
# FIXME: Incompatible types in assignment
metrics = [metrics] # type: ignore
final_metrics = []
# FIXME: Argument 1 to "enumerate" has incompatible type
for i, metric in enumerate(metrics): # type: ignore
# Ensure that metric is wrapped in a list.
if isinstance(metric, Metric):
final_metrics.append(metric)
# This case checks if input is a function then wraps a
# dc.metrics.Metric object around it
elif callable(metric):
wrap_metric = Metric(metric, name="metric-%d" % (i + 1))
final_metrics.append(wrap_metric)
else:
raise ValueError(
"metrics must be one of metric function / dc.metrics.Metric object /"
"list of dc.metrics.Metric or metric functions.")
return final_metrics
def relative_difference(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Compute the relative difference between x and y
The two argument arrays must have the same shape.
Parameters
----------
x: np.ndarray
First input array
y: np.ndarray
Second input array
Returns
-------
z: np.ndarray
We will have `z == np.abs(x-y) / np.abs(max(x, y))`.
"""
warnings.warn(
"Directly use `(x - y) / np.abs(y)` or `np.isclose`, `np.allclose` for testing tolerance",
FutureWarning)
z = (x - y) / abs(y)
return z
class Evaluator(object):
"""Class that evaluates a model on a given dataset.
The evaluator class is used to evaluate a `dc.models.Model` class on
a given `dc.data.Dataset` object. The evaluator is aware of
`dc.trans.Transformer` objects so will automatically undo any
transformations which have been applied.
Examples
--------
Evaluators allow for a model to be evaluated directly on a Metric
for `sklearn`. Let's do a bit of setup constructing our dataset and
model.
>>> import deepchem as dc
>>> import numpy as np
>>> X = np.random.rand(10, 5)
>>> y = np.random.rand(10, 1)
>>> dataset = dc.data.NumpyDataset(X, y)
>>> model = dc.models.MultitaskRegressor(1, 5)
>>> transformers = []
Then you can evaluate this model as follows
>>> import sklearn
>>> evaluator = Evaluator(model, dataset, transformers)
>>> multitask_scores = evaluator.compute_model_performance(
... sklearn.metrics.mean_absolute_error)
Evaluators can also be used with `dc.metrics.Metric` objects as well
in case you want to customize your metric further.
>>> evaluator = Evaluator(model, dataset, transformers)
>>> metric = dc.metrics.Metric(dc.metrics.mae_score)
>>> multitask_scores = evaluator.compute_model_performance(metric)
"""
def __init__(self, model, dataset: "dc.data.Dataset",
transformers: List["dc.trans.Transformer"]):
"""Initialize this evaluator
Parameters
----------
model: Model
Model to evaluate. Note that this must be a regression or
classification model and not a generative model.
dataset: Dataset
Dataset object to evaluate `model` on.
transformers: List[Transformer]
List of `dc.trans.Transformer` objects. These transformations
must have been applied to `dataset` previously. The dataset will
be untransformed for metric evaluation.
"""
self.model = model
self.dataset = dataset
self.output_transformers = [
transformer for transformer in transformers
if transformer.transform_y
]
def output_statistics(self, scores: Score, stats_out: str):
""" Write computed stats to file.
Parameters
----------
scores: dict
Dictionary mapping names of metrics to scores.
stats_out: str
Name of file to write scores to.
"""
logger.warning(
"Evaluator.output_statistics is deprecated."
"Please use dc.utils.evaluate.output_statistics instead."
"This method will be removed in a future version of DeepChem.")
with open(stats_out, "w") as statsfile:
statsfile.write(str(scores) + "\n")
def output_predictions(self, y_preds: np.ndarray, csv_out: str):
"""Writes predictions to file.
Writes predictions made on `self.dataset` to a specified file on
disk. `self.dataset.ids` are used to format predictions.
Parameters
----------
y_preds: np.ndarray
Predictions to output
csv_out: str
Name of file to write predictions to.
"""
logger.warning(
"Evaluator.output_predictions is deprecated."
"Please use dc.utils.evaluate.output_predictions instead."
"This method will be removed in a future version of DeepChem.")
data_ids = self.dataset.ids
n_tasks = len(self.dataset.get_task_names())
y_preds = np.reshape(y_preds, (len(y_preds), n_tasks))
assert len(y_preds) == len(data_ids)
with open(csv_out, "w") as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(["ID"] + self.dataset.get_task_names())
for mol_id, y_pred in zip(data_ids, y_preds):
csvwriter.writerow([mol_id] + list(y_pred))
def compute_model_performance(
self,
metrics: Metrics,
csv_out: Optional[str] = None,
stats_out: Optional[str] = None,
per_task_metrics: bool = False,
use_sample_weights: bool = False,
n_classes: int = 2) -> Union[Score, Tuple[Score, Score]]:
"""
Computes statistics of model on test data and saves results to csv.
Parameters
----------
metrics: dc.metrics.Metric/list[dc.metrics.Metric]/function
The set of metrics provided. This class attempts to do some
intelligent handling of input. If a single `dc.metrics.Metric`
object is provided or a list is provided, it will evaluate
`self.model` on these metrics. If a function is provided, it is
assumed to be a metric function that this method will attempt to
wrap in a `dc.metrics.Metric` object. A metric function must
accept two arguments, `y_true, y_pred` both of which are
`np.ndarray` objects and return a floating point score. The
metric function may also accept a keyword argument
`sample_weight` to account for per-sample weights.
csv_out: str, optional (DEPRECATED)
Filename to write CSV of model predictions.
stats_out: str, optional (DEPRECATED)
Filename to write computed statistics.
per_task_metrics: bool, optional
If true, return computed metric for each task on multitask dataset.
use_sample_weights: bool, optional (default False)
If set, use per-sample weights `w`.
n_classes: int, optional (default None)
If specified, will use `n_classes` as the number of unique classes
in `self.dataset`. Note that this argument will be ignored for
regression metrics.
Returns
-------
multitask_scores: dict
Dictionary mapping names of metrics to metric scores.
all_task_scores: dict, optional
If `per_task_metrics == True`, then returns a second dictionary
of scores for each task separately.
"""
if csv_out is not None:
logger.warning(
"csv_out is deprecated as an argument and will be removed in a future version of DeepChem."
"Output is not written to CSV; manually write output instead.")
if stats_out is not None:
logger.warning(
"stats_out is deprecated as an argument and will be removed in a future version of DeepChem."
"Stats output is not written; please manually write output instead"
)
# Process input metrics
metrics = _process_metric_input(metrics)
y = self.dataset.y
y = dc.trans.undo_transforms(y, self.output_transformers)
w = self.dataset.w
y_pred = self.model.predict(self.dataset, self.output_transformers)
n_tasks = len(self.dataset.get_task_names())
multitask_scores = {}
all_task_scores = {}
# Compute multitask metrics
for metric in metrics:
results = metric.compute_metric(
y,
y_pred,
w,
per_task_metrics=per_task_metrics,
n_tasks=n_tasks,
n_classes=n_classes,
use_sample_weights=use_sample_weights)
if per_task_metrics:
multitask_scores[metric.name], computed_metrics = results
all_task_scores[metric.name] = computed_metrics
else:
multitask_scores[metric.name] = results
if not per_task_metrics:
return multitask_scores
else:
return multitask_scores, all_task_scores
class GeneratorEvaluator(object):
"""Evaluate models on a stream of data.
This class is a partner class to `Evaluator`. Instead of operating
over datasets this class operates over a generator which yields
batches of data to feed into provided model.
Examples
--------
>>> import deepchem as dc
>>> import numpy as np
>>> X = np.random.rand(10, 5)
>>> y = np.random.rand(10, 1)
>>> dataset = dc.data.NumpyDataset(X, y)
>>> model = dc.models.MultitaskRegressor(1, 5)
>>> generator = model.default_generator(dataset, pad_batches=False)
>>> transformers = []
Then you can evaluate this model as follows
>>> import sklearn
>>> evaluator = GeneratorEvaluator(model, generator, transformers)
>>> multitask_scores = evaluator.compute_model_performance(
... sklearn.metrics.mean_absolute_error)
Evaluators can also be used with `dc.metrics.Metric` objects as well
in case you want to customize your metric further. (Note that a given
generator can only be used once so we have to redefine the generator here.)
>>> generator = model.default_generator(dataset, pad_batches=False)
>>> evaluator = GeneratorEvaluator(model, generator, transformers)
>>> metric = dc.metrics.Metric(dc.metrics.mae_score)
>>> multitask_scores = evaluator.compute_model_performance(metric)
"""
def __init__(self,
model,
generator: Iterable[Tuple[Any, Any, Any]],
transformers: List["dc.trans.Transformer"],
labels: Optional[List] = None,
weights: Optional[List] = None):
"""
Parameters
----------
model: Model
Model to evaluate.
generator: generator
Generator which yields batches to feed into the model. For a
KerasModel, it should be a tuple of the form (inputs, labels,
weights). The "correct" way to create this generator is to use
`model.default_generator` as shown in the example above.
transformers: List[Transformer]
Tranformers to "undo" when applied to the models outputs
labels: list of Layer
layers which are keys in the generator to compare to outputs
weights: list of Layer
layers which are keys in the generator for weight matrices
"""
self.model = model
self.generator = generator
self.output_transformers = [
transformer for transformer in transformers
if transformer.transform_y
]
self.label_keys = labels
self.weights = weights
if labels is not None and len(labels) != 1:
raise ValueError(
"GeneratorEvaluator currently only supports one label")
def compute_model_performance(
self,
metrics: Metrics,
per_task_metrics: bool = False,
use_sample_weights: bool = False,
n_classes: int = 2) -> Union[Score, Tuple[Score, Score]]:
"""
Computes statistics of model on test data and saves results to csv.
Parameters
----------
metrics: dc.metrics.Metric/list[dc.metrics.Metric]/function
The set of metrics provided. This class attempts to do some
intelligent handling of input. If a single `dc.metrics.Metric`
object is provided or a list is provided, it will evaluate
`self.model` on these metrics. If a function is provided, it is
assumed to be a metric function that this method will attempt to
wrap in a `dc.metrics.Metric` object. A metric function must
accept two arguments, `y_true, y_pred` both of which are
`np.ndarray` objects and return a floating point score.
per_task_metrics: bool, optional
If true, return computed metric for each task on multitask
dataset.
use_sample_weights: bool, optional (default False)
If set, use per-sample weights `w`.
n_classes: int, optional (default None)
If specified, will assume that all `metrics` are classification
metrics and will use `n_classes` as the number of unique classes
in `self.dataset`.
Returns
-------
multitask_scores: dict
Dictionary mapping names of metrics to metric scores.
all_task_scores: dict, optional
If `per_task_metrics == True`, then returns a second dictionary
of scores for each task separately.
"""
metrics = _process_metric_input(metrics)
# We use y/w to aggregate labels/weights across generator.
y = []
w = []
def generator_closure():
"""This function is used to pull true labels/weights out as we iterate over the generator."""
if self.label_keys is None:
weights = None
# This is a KerasModel.
for batch in self.generator:
# Some datasets have weights
try:
inputs, labels, weights = batch
except ValueError:
try:
inputs, labels, weights, ids = batch
except ValueError:
raise ValueError(
"Generator must yield values of form (input, labels, weights) or (input, labels, weights, ids)"
)
y.append(labels[0])
if len(weights) > 0:
w.append(weights[0])
yield (inputs, labels, weights)
# Process predictions and populate y/w lists
y_pred = self.model.predict_on_generator(generator_closure())
# Combine labels/weights
y = np.concatenate(y, axis=0)
w = np.concatenate(w, axis=0)
multitask_scores = {}
all_task_scores = {}
# Undo data transformations.
y_true = dc.trans.undo_transforms(y, self.output_transformers)
y_pred = dc.trans.undo_transforms(y_pred, self.output_transformers)
# Compute multitask metrics
for metric in metrics:
results = metric.compute_metric(
y_true,
y_pred,
w,
per_task_metrics=per_task_metrics,
n_classes=n_classes,
use_sample_weights=use_sample_weights)
if per_task_metrics:
multitask_scores[metric.name], computed_metrics = results
all_task_scores[metric.name] = computed_metrics
else:
multitask_scores[metric.name] = results
if not per_task_metrics:
return multitask_scores
else:
return multitask_scores, all_task_scores
<file_sep>from typing import TYPE_CHECKING
from deepchem.feat.base_classes import Featurizer
if TYPE_CHECKING:
import transformers
class HuggingFaceFeaturizer(Featurizer):
"""Wrapper class that wraps HuggingFace tokenizers as DeepChem featurizers
The `HuggingFaceFeaturizer` wrapper provides a wrapper
around Hugging Face tokenizers allowing them to be used as DeepChem
featurizers. This might be useful in scenarios where user needs to use
a hugging face tokenizer when loading a dataset.
Example
-------
>>> from deepchem.feat import HuggingFaceFeaturizer
>>> from transformers import RobertaTokenizerFast
>>> hf_tokenizer = RobertaTokenizerFast.from_pretrained("seyonec/PubChem10M_SMILES_BPE_60k")
>>> featurizer = HuggingFaceFeaturizer(tokenizer=hf_tokenizer)
>>> result = featurizer.featurize(['CC(=O)C'])
"""
def __init__(
self,
tokenizer: 'transformers.tokenization_utils_fast.PreTrainedTokenizerFast'
):
"""Initializes a tokenizer wrapper
Parameters
----------
tokenizer: transformers.tokenization_utils_fast.PreTrainedTokenizerFast
The tokenizer to use for featurization
"""
self.tokenizer = tokenizer
def _featurize(self, datapoint):
"""Featurizes a single datapoint using the tokenizer"""
return self.tokenizer(datapoint).data
<file_sep>"""
Tests for getting featurizer, transformer, and splitter classes.
"""
import unittest
from deepchem.feat.base_classes import Featurizer
from deepchem.trans.transformers import Transformer
from deepchem.splits.splitters import Splitter
from deepchem.molnet.defaults import get_defaults
class TestDefaults(unittest.TestCase):
"""Tests for getting featurizer, transformer, and splitter classes."""
def test_defaults(self):
"""Test getting defaults for MolNet loaders."""
feats = get_defaults("feat")
trans = get_defaults("trans")
splits = get_defaults("splits")
fkey = next(iter(feats))
assert isinstance(fkey, str)
assert issubclass(feats[fkey], Featurizer)
tkey = next(iter(trans))
assert isinstance(tkey, str)
assert issubclass(trans[tkey], Transformer)
skey = next(iter(splits))
assert isinstance(skey, str)
assert issubclass(splits[skey], Splitter)
<file_sep>"""
Tests for splitter objects.
"""
import unittest
import numpy as np
import deepchem as dc
class TestTaskSplitters(unittest.TestCase):
"""Test some basic splitters."""
def test_multitask_train_valid_test_split(self):
"""Test TaskSplitter train/valid/test split on multitask dataset."""
n_samples = 100
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y)
task_splitter = dc.splits.TaskSplitter()
train, valid, test = task_splitter.train_valid_test_split(dataset,
frac_train=.4,
frac_valid=.3,
frac_test=.3)
assert len(train.get_task_names()) == 4
assert len(valid.get_task_names()) == 3
assert len(test.get_task_names()) == 3
def test_multitask_K_fold_split(self):
"""Test TaskSplitter K-fold split on multitask dataset."""
n_samples = 100
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y)
K = 5
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
for fold_dataset in fold_datasets:
assert len(fold_dataset.get_task_names()) == 2
def test_uneven_k_fold_split(self):
"""Test k-fold-split works when K does not divide n_tasks."""
n_samples = 100
n_features = 10
n_tasks = 17
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y)
K = 4
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
for fold in range(K - 1):
fold_dataset = fold_datasets[fold]
assert len(fold_dataset.get_task_names()) == 4
assert len(fold_datasets[-1].get_task_names()) == 5
def test_uneven_train_valid_test_split(self):
"""Test train/valid/test split works when proportions don't divide n_tasks."""
n_samples = 100
n_features = 10
n_tasks = 11
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y)
task_splitter = dc.splits.TaskSplitter()
train, valid, test = task_splitter.train_valid_test_split(dataset,
frac_train=.4,
frac_valid=.3,
frac_test=.3)
assert len(train.get_task_names()) == 4
assert len(valid.get_task_names()) == 3
# Note that the extra task goes to test
assert len(test.get_task_names()) == 4
def test_merge_fold_datasets(self):
"""Test that (K-1) folds can be merged into train dataset."""
n_samples = 100
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w)
K = 5
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
# Number tasks per fold
n_per_fold = 2
for fold in range(K):
train_inds = list(set(range(K)) - set([fold]))
train_fold_datasets = [fold_datasets[ind] for ind in train_inds]
train_dataset = dc.splits.merge_fold_datasets(train_fold_datasets)
# Find the tasks that correspond to this test fold
train_tasks = list(
set(range(10)) -
set(range(fold * n_per_fold, (fold + 1) * n_per_fold)))
# Assert that all arrays look like they should
np.testing.assert_array_equal(train_dataset.X, X)
np.testing.assert_array_equal(train_dataset.y, y[:, train_tasks])
np.testing.assert_array_equal(train_dataset.w, w[:, train_tasks])
np.testing.assert_array_equal(train_dataset.X, X)
<file_sep>"""
Normalizing flows for transforming probability distributions.
"""
import logging
from typing import List, Optional, Sequence, Callable
import tensorflow as tf
from deepchem.models.keras_model import KerasModel
from deepchem.utils.typing import OneOrMany
from deepchem.utils.data_utils import load_from_disk, save_to_disk
logger = logging.getLogger(__name__)
class NormalizingFlow(tf.keras.models.Model):
"""Base class for normalizing flow.
The purpose of a normalizing flow is to map a simple distribution (that is
easy to sample from and evaluate probability densities for) to a more
complex distribution that is learned from data. The base distribution
p(x) is transformed by the associated normalizing flow y=g(x) to model the
distribution p(y).
Normalizing flows combine the advantages of autoregressive models
(which provide likelihood estimation but do not learn features) and
variational autoencoders (which learn feature representations but
do not provide marginal likelihoods).
"""
def __init__(self, base_distribution, flow_layers: Sequence,
**kwargs) -> None:
"""Create a new NormalizingFlow.
Parameters
----------
base_distribution: tfd.Distribution
Probability distribution to be transformed.
Typically an N dimensional multivariate Gaussian.
flow_layers: Sequence[tfb.Bijector]
An iterable of bijectors that comprise the flow.
**kwargs
"""
try:
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
except ModuleNotFoundError:
raise ImportError(
"This class requires tensorflow-probability to be installed.")
self.base_distribution = base_distribution
self.flow_layers = flow_layers
# Chain of flows is also a normalizing flow
bijector = tfb.Chain(list(reversed(self.flow_layers)))
# An instance of tfd.TransformedDistribution
self.flow = tfd.TransformedDistribution(
distribution=self.base_distribution, bijector=bijector)
super(NormalizingFlow, self).__init__(**kwargs)
def __call__(self, *inputs, training=True):
return self.flow.bijector.forward(*inputs)
class NormalizingFlowModel(KerasModel):
"""A base distribution and normalizing flow for applying transformations.
Normalizing flows are effective for any application requiring
a probabilistic model that can both sample from a distribution and
compute marginal likelihoods, e.g. generative modeling,
unsupervised learning, or probabilistic inference. For a thorough review
of normalizing flows, see [1]_.
A distribution implements two main operations:
1. Sampling from the transformed distribution
2. Calculating log probabilities
A normalizing flow implements three main operations:
1. Forward transformation
2. Inverse transformation
3. Calculating the Jacobian
Deep Normalizing Flow models require normalizing flow layers where
input and output dimensions are the same, the transformation is invertible,
and the determinant of the Jacobian is efficient to compute and
differentiable. The determinant of the Jacobian of the transformation
gives a factor that preserves the probability volume to 1 when transforming
between probability densities of different random variables.
References
----------
.. [1] Papamakarios, George et al. "Normalizing Flows for Probabilistic Modeling and Inference." (2019). https://arxiv.org/abs/1912.02762.
"""
def __init__(self, model: NormalizingFlow, **kwargs) -> None:
"""Creates a new NormalizingFlowModel.
In addition to the following arguments, this class also accepts all the keyword arguments from KerasModel.
Parameters
----------
model: NormalizingFlow
An instance of NormalizingFlow.
Examples
--------
>> import tensorflow_probability as tfp
>> tfd = tfp.distributions
>> tfb = tfp.bijectors
>> flow_layers = [
.. tfb.RealNVP(
.. num_masked=2,
.. shift_and_log_scale_fn=tfb.real_nvp_default_template(
.. hidden_layers=[8, 8]))
..]
>> base_distribution = tfd.MultivariateNormalDiag(loc=[0., 0., 0.])
>> nf = NormalizingFlow(base_distribution, flow_layers)
>> nfm = NormalizingFlowModel(nf)
>> dataset = NumpyDataset(
.. X=np.random.rand(5, 3).astype(np.float32),
.. y=np.random.rand(5,),
.. ids=np.arange(5))
>> nfm.fit(dataset)
"""
try:
import tensorflow_probability as tfp
_ = tfp.distributions
_ = tfp.bijectors
except ModuleNotFoundError:
raise ImportError(
"This class requires tensorflow-probability to be installed.")
self.nll_loss_fn = lambda input, labels, weights: self.create_nll(input)
super(NormalizingFlowModel, self).__init__(model=model,
loss=self.nll_loss_fn,
**kwargs)
self.flow = self.model.flow # normalizing flow
# TODO: Incompability between TF and TFP means that TF doesn't track
# trainable variables in the flow; must override `_create_gradient_fn`
# self._variables = self.flow.trainable_variables
def create_nll(self, input: OneOrMany[tf.Tensor]) -> tf.Tensor:
"""Create the negative log likelihood loss function.
The default implementation is appropriate for most cases. Subclasses can
override this if there is a need to customize it.
Parameters
----------
input: OneOrMany[tf.Tensor]
A batch of data.
Returns
-------
A Tensor equal to the loss function to use for optimization.
"""
return -tf.reduce_mean(self.flow.log_prob(input, training=True))
def save(self):
"""Saves model to disk using joblib."""
save_to_disk(self.model, self.get_model_filename(self.model_dir))
def reload(self):
"""Loads model from joblib file on disk."""
self.model = load_from_disk(self.get_model_filename(self.model_dir))
def _create_gradient_fn(self,
variables: Optional[List[tf.Variable]]) -> Callable:
"""Create a function that computes gradients and applies them to the model.
Because of the way TensorFlow function tracing works, we need to create a
separate function for each new set of variables.
Parameters
----------
variables: Optional[List[tf.Variable]]
Variables to track during training.
Returns
-------
Callable function that applies gradients for batch of training data.
"""
@tf.function(experimental_relax_shapes=True)
def apply_gradient_for_batch(inputs, labels, weights, loss):
with tf.GradientTape() as tape:
tape.watch(self.flow.trainable_variables)
if isinstance(inputs, tf.Tensor):
inputs = [inputs]
if self._loss_outputs is not None:
inputs = [inputs[i] for i in self._loss_outputs]
batch_loss = loss(inputs, labels, weights)
if variables is None:
vars = self.flow.trainable_variables
else:
vars = variables
grads = tape.gradient(batch_loss, vars)
self._tf_optimizer.apply_gradients(zip(grads, vars))
self._global_step.assign_add(1)
return batch_loss
return apply_gradient_for_batch
class NormalizingFlowLayer(object):
"""Base class for normalizing flow layers.
This is an abstract base class for implementing new normalizing flow
layers that are not available in tfb. It should not be called directly.
A normalizing flow transforms random variables into new random variables.
Each learnable layer is a bijection, an invertible
transformation between two probability distributions. A simple initial
density is pushed through the normalizing flow to produce a richer,
more multi-modal distribution. Normalizing flows have three main operations:
1. Forward
Transform a distribution. Useful for generating new samples.
2. Inverse
Reverse a transformation, useful for computing conditional probabilities.
3. Log(|det(Jacobian)|) [LDJ]
Compute the determinant of the Jacobian of the transformation,
which is a scaling that conserves the probability "volume" to equal 1.
For examples of customized normalizing flows applied to toy problems,
see [1]_.
References
----------
.. [1] <NAME>. "Normalizing Flows." (2020). https://github.com/bsaund/normalizing_flows.
Notes
-----
- A sequence of normalizing flows is a normalizing flow.
- The Jacobian is the matrix of first-order derivatives of the transform.
"""
def __init__(self, **kwargs):
"""Create a new NormalizingFlowLayer."""
pass
def _forward(self, x: tf.Tensor) -> tf.Tensor:
"""Forward transformation.
x = g(y)
Parameters
----------
x: tf.Tensor
Input tensor.
Returns
-------
fwd_x: tf.Tensor
Transformed tensor.
"""
raise NotImplementedError("Forward transform must be defined.")
def _inverse(self, y: tf.Tensor) -> tf.Tensor:
"""Inverse transformation.
x = g^{-1}(y)
Parameters
----------
y: tf.Tensor
Input tensor.
Returns
-------
inv_y: tf.Tensor
Inverted tensor.
"""
raise NotImplementedError("Inverse transform must be defined.")
def _forward_log_det_jacobian(self, x: tf.Tensor) -> tf.Tensor:
"""Log |Determinant(Jacobian(x)|
Note x = g^{-1}(y)
Parameters
----------
x: tf.Tensor
Input tensor.
Returns
-------
ldj: tf.Tensor
Log of absolute value of determinant of Jacobian of x.
"""
raise NotImplementedError("LDJ must be defined.")
def _inverse_log_det_jacobian(self, y: tf.Tensor) -> tf.Tensor:
"""Inverse LDJ.
The ILDJ = -LDJ.
Note x = g^{-1}(y)
Parameters
----------
y: tf.Tensor
Input tensor.
Returns
-------
ildj: tf.Tensor
Log of absolute value of determinant of Jacobian of y.
"""
return -self._forward_log_det_jacobian(self._inverse(y))
<file_sep>import unittest
import numpy as np
from deepchem.feat import OneHotFeaturizer
from deepchem.feat.molecule_featurizers.one_hot_featurizer import ZINC_CHARSET
class TestOneHotFeaturizer(unittest.TestCase):
"""
Test OneHotFeaturizer.
"""
def test_onehot_featurizer_arbitrary(self):
"""
Test simple one hot encoding for arbitrary string.
"""
string = "abcdefghijklmnopqrstuvwxyzwebhasw"
charset = "abcdefghijklmnopqrstuvwxyz"
length = len(charset) + 1
defaultMaxLength = 100
featurizer = OneHotFeaturizer(charset)
feature = featurizer([string]) # Implicit call to featurize()
assert feature.shape == (1, defaultMaxLength, length)
# untransform
undo_string = featurizer.untransform(feature[0])
assert string == undo_string
def test_onehot_featurizer_SMILES(self):
"""
Test simple one hot encoding for SMILES strings.
"""
from rdkit import Chem
length = len(ZINC_CHARSET) + 1
smiles = 'CC(=O)Oc1ccccc1C(=O)O'
mol = Chem.MolFromSmiles(smiles)
featurizer = OneHotFeaturizer()
feature = featurizer([mol])
defaultMaxLength = 100
assert feature.shape == (1, defaultMaxLength, length)
# untranform
undo_smiles = featurizer.untransform(feature[0])
assert smiles == undo_smiles
def test_onehot_featurizer_arbitrary_with_max_length(self):
"""
Test one hot encoding with max_length.
"""
string = "abcdefghijklmnopqrstuvwxyzvewqmc"
charset = "abcdefghijklmnopqrstuvwxyz"
length = len(charset) + 1
featurizer = OneHotFeaturizer(charset, max_length=120)
feature = featurizer([string])
assert feature.shape == (1, 120, length)
# untranform
undo_string = featurizer.untransform(feature[0])
assert string == undo_string
def test_onehot_featurizer_SMILES_with_max_length(self):
"""
Test one hot encoding with max_length.
"""
from rdkit import Chem
length = len(ZINC_CHARSET) + 1
smiles = 'CC(=O)Oc1ccccc1C(=O)O'
mol = Chem.MolFromSmiles(smiles)
featurizer = OneHotFeaturizer(max_length=120)
feature = featurizer([mol])
assert feature.shape == (1, 120, length)
# untranform
undo_smiles = featurizer.untransform(feature[0])
assert smiles == undo_smiles
def test_correct_transformation_SMILES(self):
"""
Test correct one hot encoding.
"""
from rdkit import Chem
charset = ['C', 'N', '=', ')', '(', 'O']
smiles = 'CN=C=O'
mol = Chem.MolFromSmiles(smiles)
featurizer = OneHotFeaturizer(charset=charset, max_length=100)
feature = featurizer([mol])
assert np.allclose(feature[0][0], np.array([1, 0, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][1], np.array([0, 1, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][2], np.array([0, 0, 1, 0, 0, 0, 0]))
assert np.allclose(feature[0][3], np.array([1, 0, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][4], np.array([0, 0, 1, 0, 0, 0, 0]))
assert np.allclose(feature[0][5], np.array([0, 0, 0, 0, 0, 1, 0]))
# untranform
undo_smiles = featurizer.untransform(feature[0])
assert smiles == undo_smiles
def test_correct_transformation_arbitrary(self):
"""
Test correct one hot encoding.
"""
charset = "1234567890"
string = "12345"
featurizer = OneHotFeaturizer(charset=charset, max_length=100)
feature = featurizer([string])
assert np.allclose(feature[0][0],
np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][1],
np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][2],
np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][3],
np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][4],
np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
assert "This test case has not yet been written."
<file_sep>import pytest
import deepchem as dc
import numpy as np
try:
from deepchem.models.torch_models.modular import ModularTorchModel
import torch
import torch.nn as nn
class ExampleTorchModel(ModularTorchModel):
"""Example TorchModel for testing pretraining."""
def __init__(self, input_dim, d_hidden, n_layers, d_output, **kwargs):
self.input_dim = input_dim
self.d_hidden = d_hidden
self.n_layers = n_layers
self.d_output = d_output
self.components = self.build_components()
self.model = self.build_model()
super().__init__(self.model, self.components, **kwargs)
def build_components(self):
return {
'encoder': self.encoder(),
'FF1': self.FF1(),
'FF2': self.FF2()
}
def loss_func(self, inputs, labels, weights):
preds1 = self.components['FF2'](self.components['encoder'](inputs))
labels = labels[0]
loss1 = torch.nn.functional.mse_loss(preds1, labels)
preds2 = self.components['FF1'](inputs)
loss2 = torch.nn.functional.smooth_l1_loss(preds2, labels)
total_loss = loss1 + loss2
return (total_loss * weights[0]).mean()
def encoder(self):
embedding = []
for i in range(self.n_layers):
if i == 0:
embedding.append(nn.Linear(self.input_dim, self.d_hidden))
embedding.append(nn.ReLU())
else:
embedding.append(nn.Linear(self.d_hidden, self.d_hidden))
embedding.append(nn.ReLU())
return nn.Sequential(*embedding)
def FF1(self):
linear = nn.Linear(self.input_dim, self.d_output)
af = nn.Sigmoid()
return nn.Sequential(linear, af)
def FF2(self):
linear = nn.Linear(self.d_hidden, self.d_output)
af = nn.ReLU()
return nn.Sequential(linear, af)
def build_model(self):
return nn.Sequential(self.components['encoder'],
self.components['FF2'])
class ExamplePretrainer(ModularTorchModel):
def __init__(self, model, pt_tasks, **kwargs):
self.source_model = model # the pretrainer takes the original model as input in order to modify it
self.pt_tasks = pt_tasks
self.components = self.build_components()
self.model = self.build_model()
super().__init__(self.model, self.components, **kwargs)
def FF_pt(self):
linear = nn.Linear(self.source_model.d_hidden, self.pt_tasks)
af = nn.ReLU()
return nn.Sequential(linear, af)
def loss_func(self, inputs, labels, weights):
inputs = inputs[0]
labels = labels[0]
weights = weights[0]
preds = self.components['FF_pt'](self.components['encoder'](inputs))
loss = torch.nn.functional.mse_loss(preds, labels)
loss = loss * weights
loss = loss.mean()
return loss
def build_components(self):
pt_components = self.source_model.build_components()
pt_components.update({'FF_pt': self.FF_pt()})
return pt_components
def build_model(self):
return nn.Sequential(self.components['encoder'],
self.components['FF_pt'])
except:
pass
@pytest.mark.torch
def test_overfit_modular():
"""Overfit test the pretrainer to ensure it can learn a simple task."""
np.random.seed(123)
torch.manual_seed(10)
n_samples = 6
n_feat = 3
d_hidden = 3
n_layers = 1
n_tasks = 6
X = np.random.rand(n_samples, n_feat)
y = np.zeros((n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
example_model = ExampleTorchModel(n_feat, d_hidden, n_layers, n_tasks)
example_model.fit(dataset, nb_epoch=1000)
prediction = np.round(np.squeeze(example_model.predict_on_batch(X)))
assert np.array_equal(y, prediction)
@pytest.mark.torch
def test_fit_restore():
"""Test that the pretrainer can be restored and continue training."""
np.random.seed(123)
torch.manual_seed(10)
n_samples = 6
n_feat = 3
d_hidden = 3
n_layers = 1
n_tasks = 6
X = np.random.rand(n_samples, n_feat)
y = np.zeros((n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
example_model = ExampleTorchModel(n_feat, d_hidden, n_layers, n_tasks)
example_model.fit(dataset, nb_epoch=1000)
# Create an identical model, do a single step of fitting with restore=True and make sure it got restored correctly.
example_model2 = ExampleTorchModel(n_feat,
d_hidden,
n_layers,
n_tasks,
model_dir=example_model.model_dir)
example_model2.fit(dataset, nb_epoch=1, restore=True)
prediction = np.squeeze(example_model2.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
@pytest.mark.torch
def test_load_freeze_unfreeze():
np.random.seed(123)
torch.manual_seed(10)
n_samples = 60
n_feat = 3
d_hidden = 3
n_layers = 1
ft_tasks = 6
pt_tasks = 6
X_ft = np.random.rand(n_samples, n_feat)
y_ft = np.random.rand(n_samples, ft_tasks).astype(np.float32)
dataset_ft = dc.data.NumpyDataset(X_ft, y_ft)
X_ft2 = np.random.rand(n_samples, n_feat)
y_ft2 = np.zeros((n_samples, ft_tasks)).astype(np.float32)
dataset_ft2 = dc.data.NumpyDataset(X_ft2, y_ft2)
X_pt = np.random.rand(n_samples, n_feat)
y_pt = np.random.rand(n_samples, pt_tasks).astype(np.float32)
dataset_pt = dc.data.NumpyDataset(X_pt, y_pt)
example_model = ExampleTorchModel(n_feat, d_hidden, n_layers, ft_tasks)
example_pretrainer = ExamplePretrainer(example_model, pt_tasks)
example_pretrainer.fit(dataset_pt, nb_epoch=1000)
example_model.load_from_pretrained(model_dir=example_pretrainer.model_dir,
components=['encoder'])
example_model.freeze_components(['encoder'])
example_model.fit(dataset_ft, nb_epoch=100)
# check that the first layer is still the same between the two models
assert np.array_equal(
example_pretrainer.components['encoder'][0].weight.data.cpu().numpy(),
example_model.components['encoder'][0].weight.data.cpu().numpy())
# check that the predictions are different because of the fine tuning
assert not np.array_equal(
np.round(np.squeeze(example_pretrainer.predict_on_batch(X_ft))),
np.round(np.squeeze(example_model.predict_on_batch(X_ft))))
example_model.unfreeze_components(['encoder'])
example_model.fit(dataset_ft2, nb_epoch=100)
# check that the first layer is different between the two models
assert not np.array_equal(
example_pretrainer.components['encoder'][0].weight.data.cpu().numpy(),
example_model.components['encoder'][0].weight.data.cpu().numpy())
<file_sep>import logging
from typing import List
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.utils.molecule_feature_utils import one_hot_encode
from deepchem.feat.base_classes import Featurizer
from typing import Any, Iterable, Optional
logger = logging.getLogger(__name__)
ZINC_CHARSET = [
'#', ')', '(', '+', '-', '/', '1', '3', '2', '5', '4', '7', '6', '8', '=',
'@', 'C', 'B', 'F', 'I', 'H', 'O', 'N', 'S', '[', ']', '\\', 'c', 'l', 'o',
'n', 'p', 's', 'r'
]
class OneHotFeaturizer(Featurizer):
"""Encodes any arbitrary string or molecule as a one-hot array.
This featurizer encodes the characters within any given string as a one-hot
array. It also works with RDKit molecules: it can convert RDKit molecules to
SMILES strings and then one-hot encode the characters in said strings.
Standalone Usage:
>>> import deepchem as dc
>>> featurizer = dc.feat.OneHotFeaturizer()
>>> smiles = ['CCC']
>>> encodings = featurizer.featurize(smiles)
>>> type(encodings[0])
<class 'numpy.ndarray'>
>>> encodings[0].shape
(100, 35)
>>> featurizer.untransform(encodings[0])
'CCC'
Note
----
This class needs RDKit to be installed in order to accept RDKit molecules as
inputs.
It does not need RDKit to be installed to work with arbitrary strings.
"""
def __init__(self,
charset: List[str] = ZINC_CHARSET,
max_length: Optional[int] = 100):
"""Initialize featurizer.
Parameters
----------
charset: List[str] (default ZINC_CHARSET)
A list of strings, where each string is length 1 and unique.
max_length: Optional[int], optional (default 100)
The max length for string. If the length of string is shorter than
max_length, the string is padded using space.
If max_length is None, no padding is performed and arbitrary length
strings are allowed.
"""
if len(charset) != len(set(charset)):
raise ValueError("All values in charset must be unique.")
self.charset = charset
self.max_length = Optional[int]
if max_length is not None:
self.max_length = int(max_length)
else:
self.max_length = None
def featurize(self,
datapoints: Iterable[Any],
log_every_n: int = 1000,
**kwargs) -> np.ndarray:
"""Featurize strings or mols.
Parameters
----------
datapoints: list
A list of either strings (str or numpy.str_) or RDKit molecules.
log_every_n: int, optional (default 1000)
How many elements are featurized every time a featurization is logged.
"""
datapoints = list(datapoints)
if (len(datapoints) < 1):
return np.array([])
# Featurize data using featurize() in parent class
return Featurizer.featurize(self, datapoints, log_every_n)
def _featurize(self, datapoint: Any, **kwargs):
# Featurize str data
if isinstance(datapoint, (str, np.str_)):
return self._featurize_string(datapoint)
# Featurize mol data
else:
return self._featurize_mol(datapoint)
def _featurize_string(self, string: str) -> np.ndarray:
"""Compute one-hot featurization of string.
Parameters
----------
string: str
An arbitrary string to be featurized.
Returns
-------
np.ndarray
An one hot vector encoded from arbitrary input string.
The shape is `(max_length, len(charset) + 1)`.
The index of unknown character is `len(charset)`.
"""
if isinstance(self.max_length, int):
if (len(string) > self.max_length): # Validation
raise ValueError(
"The length of {} is longer than `max_length`.")
string = self.pad_string(string) # Padding
return np.array([
one_hot_encode(val, self.charset, include_unknown_set=True)
for val in string
])
def _featurize_mol(self, mol: RDKitMol) -> np.ndarray:
"""Compute one-hot featurization of this molecule.
Parameters
----------
mol: rdKit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
An one hot vector encoded from SMILES.
The shape is '(max_length, len(charset) + 1)'
The index of unknown character is 'len(charset)'.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
smiles = Chem.MolToSmiles(mol) # Convert mol to SMILES string.
return self._featurize_string(smiles) # Use string featurization.
def pad_smile(self, smiles: str) -> str:
"""Pad SMILES string to `self.pad_length`
Parameters
----------
smiles: str
The SMILES string to be padded.
Returns
-------
str
SMILES string space padded to self.pad_length
"""
return self.pad_string(smiles)
def pad_string(self, string: str) -> str:
"""Pad string to `self.pad_length`
Parameters
----------
string: str
The string to be padded.
Returns
-------
str
String space padded to self.pad_length
"""
if isinstance(self.max_length, int):
return string.ljust(self.max_length)
else:
return string
def untransform(self, one_hot_vectors: np.ndarray) -> str:
"""Convert from one hot representation back to original string
Parameters
----------
one_hot_vectors: np.ndarray
An array of one hot encoded features.
Returns
-------
str
Original string for an one hot encoded array.
"""
string = ""
for one_hot in one_hot_vectors:
try:
idx = np.argmax(one_hot)
string += self.charset[idx]
except IndexError:
string += ""
return string
<file_sep>"""
Script that trains multitask models on HOPV dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
from deepchem.molnet import load_hopv
# Only for debug!
np.random.seed(123)
# Load HOPV dataset
n_features = 1024
hopv_tasks, hopv_datasets, transformers = load_hopv()
train_dataset, valid_dataset, test_dataset = hopv_datasets
# Fit models
metric = [
dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean, mode="regression"),
dc.metrics.Metric(
dc.metrics.mean_absolute_error, np.mean, mode="regression")
]
model = dc.models.MultitaskRegressor(
len(hopv_tasks),
n_features,
layer_sizes=[1000],
dropouts=[.25],
learning_rate=0.001,
batch_size=50)
# Fit trained model
model.fit(train_dataset, nb_epoch=25)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Script that trains Sklearn RF models on PDBbind Pockets dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import os
import deepchem as dc
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from binding_pocket_datasets import load_pdbbind_pockets
# For stable runs
np.random.seed(123)
split = "random"
subset = "full"
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind_pockets(
split=split, subset=subset)
train_dataset, valid_dataset, test_dataset = pdbbind_datasets
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
current_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(current_dir, "pocket_%s_%s_RF" % (split, subset))
sklearn_model = RandomForestClassifier(n_estimators=500)
model = dc.models.SklearnModel(sklearn_model, model_dir=model_dir)
# Fit trained model
print("Fitting model on train dataset")
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
PCBA dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
PCBA_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/%s"
PCBA_TASKS = [
'PCBA-1030', 'PCBA-1379', 'PCBA-1452', 'PCBA-1454', 'PCBA-1457',
'PCBA-1458', 'PCBA-1460', 'PCBA-1461', 'PCBA-1468', 'PCBA-1469',
'PCBA-1471', 'PCBA-1479', 'PCBA-1631', 'PCBA-1634', 'PCBA-1688',
'PCBA-1721', 'PCBA-2100', 'PCBA-2101', 'PCBA-2147', 'PCBA-2242',
'PCBA-2326', 'PCBA-2451', 'PCBA-2517', 'PCBA-2528', 'PCBA-2546',
'PCBA-2549', 'PCBA-2551', 'PCBA-2662', 'PCBA-2675', 'PCBA-2676', 'PCBA-411',
'PCBA-463254', 'PCBA-485281', 'PCBA-485290', 'PCBA-485294', 'PCBA-485297',
'PCBA-485313', 'PCBA-485314', 'PCBA-485341', 'PCBA-485349', 'PCBA-485353',
'PCBA-485360', 'PCBA-485364', 'PCBA-485367', 'PCBA-492947', 'PCBA-493208',
'PCBA-504327', 'PCBA-504332', 'PCBA-504333', 'PCBA-504339', 'PCBA-504444',
'PCBA-504466', 'PCBA-504467', 'PCBA-504706', 'PCBA-504842', 'PCBA-504845',
'PCBA-504847', 'PCBA-504891', 'PCBA-540276', 'PCBA-540317', 'PCBA-588342',
'PCBA-588453', 'PCBA-588456', 'PCBA-588579', 'PCBA-588590', 'PCBA-588591',
'PCBA-588795', 'PCBA-588855', 'PCBA-602179', 'PCBA-602233', 'PCBA-602310',
'PCBA-602313', 'PCBA-602332', 'PCBA-624170', 'PCBA-624171', 'PCBA-624173',
'PCBA-624202', 'PCBA-624246', 'PCBA-624287', 'PCBA-624288', 'PCBA-624291',
'PCBA-624296', 'PCBA-624297', 'PCBA-624417', 'PCBA-651635', 'PCBA-651644',
'PCBA-651768', 'PCBA-651965', 'PCBA-652025', 'PCBA-652104', 'PCBA-652105',
'PCBA-652106', 'PCBA-686970', 'PCBA-686978', 'PCBA-686979', 'PCBA-720504',
'PCBA-720532', 'PCBA-720542', 'PCBA-720551', 'PCBA-720553', 'PCBA-720579',
'PCBA-720580', 'PCBA-720707', 'PCBA-720708', 'PCBA-720709', 'PCBA-720711',
'PCBA-743255', 'PCBA-743266', 'PCBA-875', 'PCBA-881', 'PCBA-883',
'PCBA-884', 'PCBA-885', 'PCBA-887', 'PCBA-891', 'PCBA-899', 'PCBA-902',
'PCBA-903', 'PCBA-904', 'PCBA-912', 'PCBA-914', 'PCBA-915', 'PCBA-924',
'PCBA-925', 'PCBA-926', 'PCBA-927', 'PCBA-938', 'PCBA-995'
]
class _PCBALoader(_MolnetLoader):
def __init__(self, assay_file_name: str,
featurizer: Union[dc.feat.Featurizer,
str], splitter: Union[dc.splits.Splitter,
str, None],
transformer_generators: List[Union[TransformerGenerator,
str]], tasks: List[str],
data_dir: Optional[str], save_dir: Optional[str], **kwargs):
super(_PCBALoader,
self).__init__(featurizer, splitter, transformer_generators,
tasks, data_dir, save_dir)
self.assay_file_name = assay_file_name
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, self.assay_file_name)
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=PCBA_URL %
self.assay_file_name,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file)
def load_pcba(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load PCBA dataset
PubChem BioAssay (PCBA) is a database consisting of biological activities of
small molecules generated by high-throughput screening. We use a subset of
PCBA, containing 128 bioassays measured over 400 thousand compounds,
used by previous work to benchmark machine learning methods.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "mol_id" - PubChem CID of the compound
- "smiles" - SMILES representation of the molecular structure
- "PCBA-XXX" - Measured results (Active/Inactive) for bioassays:
search for the assay ID at
https://pubchem.ncbi.nlm.nih.gov/search/#collection=bioassays
for details
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Wang, Yanli, et al. "PubChem's BioAssay database."
Nucleic acids research 40.D1 (2011): D400-D412.
"""
loader = _PCBALoader('pcba.csv.gz', featurizer, splitter, transformers,
PCBA_TASKS, data_dir, save_dir, **kwargs)
return loader.load_dataset('pcba', reload)
# def load_pcba_146(featurizer='ECFP',
# split='random',
# reload=True,
# data_dir=None,
# save_dir=None,
# **kwargs):
# return load_pcba_dataset(
# featurizer=featurizer,
# split=split,
# reload=reload,
# assay_file_name="pcba_146.csv.gz",
# data_dir=data_dir,
# save_dir=save_dir,
# **kwargs)
# def load_pcba_2475(featurizer='ECFP',
# split='random',
# reload=True,
# data_dir=None,
# save_dir=None,
# **kwargs):
# return load_pcba_dataset(
# featurizer=featurizer,
# split=split,
# reload=reload,
# assay_file_name="pcba_2475.csv.gz",
# data_dir=data_dir,
# save_dir=save_dir,
# **kwargs)
<file_sep>import os
import json
import logging
import numpy as np
from typing import Tuple
from urllib.error import URLError
from deepchem.utils.data_utils import download_url, get_data_dir
from deepchem.utils.typing import PymatgenStructure
from deepchem.feat import MaterialStructureFeaturizer
from deepchem.feat.graph_data import GraphData
ATOM_INIT_JSON_URL = 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/atom_init.json'
class CGCNNFeaturizer(MaterialStructureFeaturizer):
"""
Calculate structure graph features for crystals.
Based on the implementation in Crystal Graph Convolutional
Neural Networks (CGCNN). The method constructs a crystal graph
representation including atom features and bond features (neighbor
distances). Neighbors are determined by searching in a sphere around
atoms in the unit cell. A Gaussian filter is applied to neighbor distances.
All units are in angstrom.
This featurizer requires the optional dependency pymatgen. It may
be useful when 3D coordinates are available and when using graph
network models and crystal graph convolutional networks.
See [1]_ for more details.
References
----------
.. [1] <NAME> and <NAME>, "Crystal graph convolutional
neural networks for an accurate and interpretable prediction
of material properties", Phys. Rev. Lett. 120, 2018,
https://arxiv.org/abs/1710.10324
Examples
--------
>>> import deepchem as dc
>>> import pymatgen as mg
>>> featurizer = dc.feat.CGCNNFeaturizer()
>>> lattice = mg.core.Lattice.cubic(4.2)
>>> structure = mg.core.Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
>>> features = featurizer.featurize([structure])
>>> feature = features[0]
>>> print(type(feature))
<class 'deepchem.feat.graph_data.GraphData'>
Note
----
This class requires Pymatgen to be installed.
"""
def __init__(self,
radius: float = 8.0,
max_neighbors: float = 12,
step: float = 0.2):
"""
Parameters
----------
radius: float (default 8.0)
Radius of sphere for finding neighbors of atoms in unit cell.
max_neighbors: int (default 12)
Maximum number of neighbors to consider when constructing graph.
step: float (default 0.2)
Step size for Gaussian filter. This value is used when building edge features.
"""
self.radius = radius
self.max_neighbors = int(max_neighbors)
self.step = step
# load atom_init.json
data_dir = get_data_dir()
try:
download_url(ATOM_INIT_JSON_URL, data_dir)
except URLError:
logging.warning(
"Skipping CGCNNFeaturizer initialization due to network error.")
return
atom_init_json_path = os.path.join(data_dir, 'atom_init.json')
with open(atom_init_json_path, 'r') as f:
atom_init_json = json.load(f)
self.atom_features = {
int(key): np.array(value, dtype=np.float32)
for key, value in atom_init_json.items()
}
self.valid_atom_number = set(self.atom_features.keys())
def _featurize(self, datapoint: PymatgenStructure, **kwargs) -> GraphData:
"""
Calculate crystal graph features from pymatgen structure.
Parameters
----------
datapoint: pymatgen.core.Structure
A periodic crystal composed of a lattice and a sequence of atomic
sites with 3D coordinates and elements.
Returns
-------
graph: GraphData
A crystal graph with CGCNN style features.
"""
if 'struct' in kwargs and datapoint is None:
datapoint = kwargs.get("struct")
raise DeprecationWarning(
'Struct is being phased out as a parameter, please pass "datapoint" instead.'
)
node_features = self._get_node_features(datapoint)
edge_index, edge_features = self._get_edge_features_and_index(datapoint)
graph = GraphData(node_features, edge_index, edge_features)
return graph
def _get_node_features(self, struct: PymatgenStructure) -> np.ndarray:
"""
Get the node feature from `atom_init.json`. The `atom_init.json` was collected
from `data/sample-regression/atom_init.json` in the CGCNN repository.
Parameters
----------
struct: pymatgen.core.Structure
A periodic crystal composed of a lattice and a sequence of atomic
sites with 3D coordinates and elements.
Returns
-------
node_features: np.ndarray
A numpy array of shape `(num_nodes, 92)`.
"""
node_features = []
for site in struct:
# check whether the atom feature exists or not
assert site.specie.number in self.valid_atom_number
node_features.append(self.atom_features[site.specie.number])
return np.vstack(node_features).astype(float)
def _get_edge_features_and_index(
self, struct: PymatgenStructure) -> Tuple[np.ndarray, np.ndarray]:
"""
Calculate the edge feature and edge index from pymatgen structure.
Parameters
----------
struct: pymatgen.core.Structure
A periodic crystal composed of a lattice and a sequence of atomic
sites with 3D coordinates and elements.
Returns
-------
edge_idx np.ndarray, dtype int
A numpy array of shape with `(2, num_edges)`.
edge_features: np.ndarray
A numpy array of shape with `(num_edges, filter_length)`. The `filter_length` is
(self.radius / self.step) + 1. The edge features were built by applying gaussian
filter to the distance between nodes.
"""
neighbors = struct.get_all_neighbors(self.radius, include_index=True)
neighbors = [sorted(n, key=lambda x: x[1]) for n in neighbors]
# construct bi-directed graph
src_idx, dest_idx = [], []
edge_distances = []
for node_idx, neighbor in enumerate(neighbors):
neighbor = neighbor[:self.max_neighbors]
src_idx.extend([node_idx] * len(neighbor))
dest_idx.extend([site[2] for site in neighbor])
edge_distances.extend([site[1] for site in neighbor])
edge_idx = np.array([src_idx, dest_idx], dtype=int)
edge_features = self._gaussian_filter(
np.array(edge_distances, dtype=float))
return edge_idx, edge_features
def _gaussian_filter(self, distances: np.ndarray) -> np.ndarray:
"""
Apply Gaussian filter to an array of interatomic distances.
Parameters
----------
distances : np.ndarray
A numpy array of the shape `(num_edges, )`.
Returns
-------
expanded_distances: np.ndarray
Expanded distance tensor after Gaussian filtering.
The shape is `(num_edges, filter_length)`. The `filter_length` is
(self.radius / self.step) + 1.
"""
filt = np.arange(0, self.radius + self.step, self.step)
# Increase dimension of distance tensor and apply filter
expanded_distances = np.exp(-(distances[..., np.newaxis] - filt)**2 /
self.step**2)
return expanded_distances
<file_sep>"""
Script that trains Tensorflow singletask models on QM7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
from deepchem.molnet import load_qm7b_from_mat
np.random.seed(123)
qm7_tasks, datasets, transformers = load_qm7b_from_mat(split='stratified')
train_dataset, valid_dataset, test_dataset = datasets
fit_transformers = [dc.trans.CoulombFitTransformer(train_dataset)]
regression_metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
model = dc.models.MultitaskFitTransformRegressor(
n_tasks=len(qm7_tasks),
n_features=[23, 23],
learning_rate=0.001,
momentum=.8,
batch_size=25,
weight_init_stddevs=[1 / np.sqrt(400), 1 / np.sqrt(100), 1 / np.sqrt(100)],
bias_init_consts=[0., 0., 0.],
layer_sizes=[400, 100, 100],
dropouts=[0.01, 0.01, 0.01],
fit_transformers=fit_transformers,
seed=123)
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
train_scores = model.evaluate(train_dataset, regression_metric, transformers)
print("Train scores [kcal/mol]")
print(train_scores)
valid_scores = model.evaluate(valid_dataset, regression_metric, transformers)
print("Valid scores [kcal/mol]")
print(valid_scores)
test_scores = model.evaluate(test_dataset, regression_metric, transformers)
print("Test scores [kcal/mol]")
print(test_scores)
<file_sep># Sweetlead example
Sweetlead is a dataset of chemical structures for approved
drugs, chemical isolates from traditional medicinal herbs, and
regulated chemicals. Resulting structures are filtered for the
active pharmaceutical ingredient, standardized, and differing
formulations of the same drug were combined in the final
database.
<NAME>., et al. "SWEETLEAD: an in silico database of approved drugs, regulated chemicals, and herbal isolates for computer-aided drug discovery." PLoS One 8.11 (2013).
<file_sep>"""
Tests for ImageDataset class
"""
import unittest
import numpy as np
import deepchem as dc
import os
class TestImageDataset(unittest.TestCase):
"""
Test ImageDataset class.
"""
def test_load_images(self):
"""Test that ImageDataset loads images."""
path = os.path.join(os.path.dirname(__file__), 'images')
files = [os.path.join(path, f) for f in os.listdir(path)]
# First try using images for X.
ds1 = dc.data.ImageDataset(files, np.random.random(10))
x_shape, y_shape, w_shape, ids_shape = ds1.get_shape()
np.testing.assert_array_equal([10, 28, 28], x_shape)
np.testing.assert_array_equal([10], y_shape)
np.testing.assert_array_equal([10], w_shape)
np.testing.assert_array_equal([10], ids_shape)
np.testing.assert_array_equal(ds1.X.shape, x_shape)
np.testing.assert_array_equal(ds1.y.shape, y_shape)
np.testing.assert_array_equal(ds1.w.shape, w_shape)
np.testing.assert_array_equal(ds1.ids.shape, ids_shape)
# Now try using images for y.
ds2 = dc.data.ImageDataset(np.random.random(10), files)
x_shape, y_shape, w_shape, ids_shape = ds2.get_shape()
np.testing.assert_array_equal([10], x_shape)
np.testing.assert_array_equal([10, 28, 28], y_shape)
np.testing.assert_array_equal([10, 1], w_shape)
np.testing.assert_array_equal([10], ids_shape)
np.testing.assert_array_equal(ds2.X.shape, x_shape)
np.testing.assert_array_equal(ds2.y.shape, y_shape)
np.testing.assert_array_equal(ds2.w.shape, w_shape)
np.testing.assert_array_equal(ds2.ids.shape, ids_shape)
np.testing.assert_array_equal(ds1.X, ds2.y)
def test_itersamples(self):
"""Test iterating samples of an ImageDataset."""
path = os.path.join(os.path.dirname(__file__), 'images')
files = [os.path.join(path, f) for f in os.listdir(path)]
ds = dc.data.ImageDataset(files, np.random.random(10))
X = ds.X
i = 0
for x, y, w, id in ds.itersamples():
np.testing.assert_array_equal(x, X[i])
assert y == ds.y[i]
assert w == ds.w[i]
assert id == ds.ids[i]
i += 1
assert i == 10
def test_iterbatches(self):
"""Test iterating batches of an ImageDataset."""
path = os.path.join(os.path.dirname(__file__), 'images')
files = [os.path.join(path, f) for f in os.listdir(path)]
ds = dc.data.ImageDataset(files, np.random.random(10))
X = ds.X
iterated_ids = set()
for x, y, w, ids in ds.iterbatches(2, epochs=2):
np.testing.assert_array_equal([2, 28, 28], x.shape)
np.testing.assert_array_equal([2], y.shape)
np.testing.assert_array_equal([2], w.shape)
np.testing.assert_array_equal([2], ids.shape)
for i in (0, 1):
assert ids[i] in files
if len(iterated_ids) < 10:
assert ids[i] not in iterated_ids
iterated_ids.add(ids[i])
else:
assert ids[i] in iterated_ids
index = files.index(ids[i])
np.testing.assert_array_equal(x[i], X[index])
assert len(iterated_ids) == 10
if __name__ == "__main__":
unittest.main()
<file_sep>"""
Tests for Coulomb matrix calculation.
"""
import numpy as np
import unittest
from deepchem.feat import CoulombMatrix, CoulombMatrixEig
from deepchem.utils import conformers
class TestCoulombMatrix(unittest.TestCase):
"""
Tests for CoulombMatrix.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
from rdkit.Chem import AllChem
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
mol = Chem.MolFromSmiles(smiles)
self.mol_with_no_conf = mol
# with one conformer
mol_with_one_conf = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol_with_one_conf, AllChem.ETKDG())
self.mol_with_one_conf = mol_with_one_conf
# with multiple conformers
self.num_confs = 4
engine = conformers.ConformerGenerator(max_conformers=self.num_confs)
self.mol_with_multi_conf = engine.generate_conformers(mol)
# include explicit hydrogens
self.num_atoms = mol_with_one_conf.GetNumAtoms()
assert self.num_atoms == 21
assert self.mol_with_one_conf.GetNumConformers() == 1
assert self.mol_with_multi_conf.GetNumConformers() == self.num_confs
def test_coulomb_matrix(self):
"""
Test CoulombMatrix.
"""
f = CoulombMatrix(self.num_atoms)
rval = f([self.mol_with_no_conf])
assert rval.shape == (1, self.num_atoms, self.num_atoms)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, self.num_atoms, self.num_atoms)
rval = f([self.mol_with_multi_conf])
assert rval.shape == (1, self.num_confs, self.num_atoms, self.num_atoms)
def test_coulomb_matrix_padding(self):
"""
Test CoulombMatrix with padding.
"""
max_atoms = self.num_atoms * 2
f = CoulombMatrix(max_atoms=max_atoms)
rval = f([self.mol_with_no_conf])
assert rval.shape == (1, max_atoms, max_atoms)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, max_atoms, max_atoms)
rval = f([self.mol_with_multi_conf])
assert rval.shape == (1, self.num_confs, max_atoms, max_atoms)
def test_upper_tri_coulomb_matrix(self):
"""
Test upper triangular CoulombMatrix.
"""
f = CoulombMatrix(self.num_atoms, upper_tri=True)
size = np.triu_indices(self.num_atoms)[0].size
rval = f([self.mol_with_no_conf])
assert rval.shape == (1, size)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, size)
rval = f([self.mol_with_multi_conf])
assert rval.shape == (1, self.num_confs, size)
def test_upper_tri_coulomb_matrix_padding(self):
"""
Test upper triangular CoulombMatrix with padding.
"""
max_atoms = self.num_atoms * 2
f = CoulombMatrix(max_atoms=max_atoms, upper_tri=True)
size = np.triu_indices(max_atoms)[0].size
rval = f([self.mol_with_no_conf])
assert rval.shape == (1, size)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, size)
rval = f([self.mol_with_multi_conf])
assert rval.shape == (1, self.num_confs, size)
def test_coulomb_matrix_no_hydrogens(self):
"""
Test hydrogen removal.
"""
num_atoms_with_no_H = self.mol_with_no_conf.GetNumAtoms()
assert num_atoms_with_no_H < self.num_atoms
f = CoulombMatrix(max_atoms=num_atoms_with_no_H,
remove_hydrogens=True,
upper_tri=True)
size = np.triu_indices(num_atoms_with_no_H)[0].size
rval = f([self.mol_with_no_conf])
assert rval.shape == (1, size)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, size)
rval = f([self.mol_with_multi_conf])
assert rval.shape == (1, self.num_confs, size)
def test_coulomb_matrix_hydrogens(self):
"""
Test no hydrogen removal.
"""
f = CoulombMatrix(max_atoms=self.num_atoms,
remove_hydrogens=False,
upper_tri=True)
size = np.triu_indices(self.num_atoms)[0].size
rval = f([self.mol_with_no_conf])
assert rval.shape == (1, size)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, size)
rval = f([self.mol_with_multi_conf])
assert rval.shape == (1, self.num_confs, size)
class TestCoulombMatrixEig(unittest.TestCase):
"""
Tests for CoulombMatrixEig.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
from rdkit.Chem import AllChem
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
mol = Chem.MolFromSmiles(smiles)
self.mol_with_no_conf = mol
# with one conformer
mol_with_one_conf = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol_with_one_conf, AllChem.ETKDG())
self.mol_with_one_conf = mol_with_one_conf
# with multiple conformers
self.num_confs = 4
engine = conformers.ConformerGenerator(max_conformers=self.num_confs)
self.mol_with_multi_conf = engine.generate_conformers(mol)
# include explicit hydrogens
self.num_atoms = mol_with_one_conf.GetNumAtoms()
assert self.num_atoms == 21
assert self.mol_with_one_conf.GetNumConformers() == 1
assert self.mol_with_multi_conf.GetNumConformers() == self.num_confs
def test_coulomb_matrix_eig(self):
"""
Test CoulombMatrixEig.
"""
f = CoulombMatrixEig(self.num_atoms)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, self.num_atoms)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, self.num_atoms)
rval = f([self.mol_with_multi_conf])
assert rval.shape == (1, self.num_confs, self.num_atoms)
def test_coulomb_matrix_eig_padding(self):
"""
Test padding of CoulombMatixEig
"""
max_atoms = 2 * self.num_atoms
f = CoulombMatrixEig(max_atoms=max_atoms)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, max_atoms)
rval = f([self.mol_with_one_conf])
assert rval.shape == (1, max_atoms)
rval = f([self.mol_with_multi_conf])
assert rval.shape == (1, self.num_confs, max_atoms)
<file_sep>import deepchem as dc
import numpy as np
import pytest
try:
import torch
import deepchem.models.torch_models.layers as torch_layers
has_torch = True
except:
has_torch = False
@pytest.mark.torch
def test_weave_gather_gaussian_histogram():
"""Test invoking the torch equivalent of Gaussian Histograms."""
from rdkit import Chem
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
gather = torch_layers.WeaveGather(batch_size=2, n_input=75)
atom_feat = []
atom_split = []
for im, mol in enumerate(mols):
n_atoms = mol.get_num_atoms()
atom_split.extend([im] * n_atoms)
# atom features
atom_feat.append(mol.get_atom_features())
inputs = [
np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),
np.array(atom_split)
]
torch.set_printoptions(precision=8)
outputs = gather.gaussian_histogram(inputs[0])
# Gaussian histograms expands into 11 Gaussian buckets.
assert np.array(outputs).shape == (
4,
11 * 75,
)
assert np.allclose(
outputs.numpy(),
np.load("deepchem/models/tests/assets/gaussian_histogram_outputs.npy"),
atol=1e-4)
<file_sep>"""
Script that trains Tensorflow Progressive Multitask models on UV datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import tempfile
import shutil
import numpy as np
import deepchem as dc
# Set numpy seed
np.random.seed(123)
###Load data###
shard_size = 2000
print("About to load MERCK data.")
UV_tasks, datasets, transformers = dc.molnet.load_uv(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
###Create model###
n_layers = 3
nb_epoch = 50
model = dc.models.ProgressiveMultitaskRegressor(
len(UV_tasks),
train_dataset.get_data_shape()[0],
layer_sizes=[25] * n_layers,
dropouts=[.25] * n_layers,
alpha_init_stddevs=[.02] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
learning_rate=.0003,
batch_size=100,
random_seed=123)
#Use R2 classification metric
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
print("Training model")
model.fit(train_dataset, nb_epoch=nb_epoch)
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
#Only use for final evaluation
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
print("Test scores")
print(test_scores)
<file_sep>"""
Conformer generation.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
class ConformerGenerator(object):
"""
Generate molecule conformers.
Notes
-----
Procedure
1. Generate a pool of conformers.
2. Minimize conformers.
3. Prune conformers using an RMSD threshold.
Note that pruning is done _after_ minimization, which differs from the
protocol described in the references [1]_ [2]_.
References
----------
.. [1] http://rdkit.org/docs/GettingStartedInPython.html#working-with-3d-molecules
.. [2] http://pubs.acs.org/doi/full/10.1021/ci2004658
Notes
-----
This class requires RDKit to be installed.
"""
def __init__(self,
max_conformers: int = 1,
rmsd_threshold: float = 0.5,
force_field: str = 'uff',
pool_multiplier: int = 10):
"""
Parameters
----------
max_conformers: int, optional (default 1)
Maximum number of conformers to generate (after pruning).
rmsd_threshold: float, optional (default 0.5)
RMSD threshold for pruning conformers. If None or negative, no
pruning is performed.
force_field: str, optional (default 'uff')
Force field to use for conformer energy calculation and
minimization. Options are 'uff', 'mmff94', and 'mmff94s'.
pool_multiplier: int, optional (default 10)
Factor to multiply by max_conformers to generate the initial
conformer pool. Since conformers are pruned after energy
minimization, increasing the size of the pool increases the chance
of identifying max_conformers unique conformers.
"""
self.max_conformers = max_conformers
if rmsd_threshold is None or rmsd_threshold < 0:
rmsd_threshold = -1.
self.rmsd_threshold = rmsd_threshold
self.force_field = force_field
self.pool_multiplier = pool_multiplier
def __call__(self, mol: RDKitMol) -> RDKitMol:
"""
Generate conformers for a molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
mol: rdkit.Chem.rdchem.Mol
A new RDKit Mol object containing the chosen conformers, sorted by
increasing energy.
"""
return self.generate_conformers(mol)
def generate_conformers(self, mol: RDKitMol) -> RDKitMol:
"""
Generate conformers for a molecule.
This function returns a copy of the original molecule with embedded
conformers.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
mol: rdkit.Chem.rdchem.Mol
A new RDKit Mol object containing the chosen conformers, sorted by
increasing energy.
"""
# initial embedding
mol = self.embed_molecule(mol)
if not mol.GetNumConformers():
msg = 'No conformers generated for molecule'
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
msg += ' "{}".'.format(name)
else:
msg += '.'
raise RuntimeError(msg)
# minimization and pruning
self.minimize_conformers(mol)
mol = self.prune_conformers(mol)
return mol
def embed_molecule(self, mol: RDKitMol) -> RDKitMol:
"""
Generate conformers, possibly with pruning.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object with embedded multiple conformers.
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
mol = Chem.AddHs(mol) # add hydrogens
n_confs = self.max_conformers * self.pool_multiplier
AllChem.EmbedMultipleConfs(mol, numConfs=n_confs, pruneRmsThresh=-1.)
return mol
def get_molecule_force_field(self,
mol: RDKitMol,
conf_id: Optional[int] = None,
**kwargs) -> Any:
"""
Get a force field for a molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object with embedded conformers.
conf_id: int, optional
ID of the conformer to associate with the force field.
kwargs: dict, optional
Keyword arguments for force field constructor.
Returns
-------
ff: rdkit.ForceField.rdForceField.ForceField
RDKit force field instance for a molecule.
"""
try:
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
if self.force_field == 'uff':
ff = AllChem.UFFGetMoleculeForceField(mol, confId=conf_id, **kwargs)
elif self.force_field.startswith('mmff'):
AllChem.MMFFSanitizeMolecule(mol)
mmff_props = AllChem.MMFFGetMoleculeProperties(
mol, mmffVariant=self.force_field)
ff = AllChem.MMFFGetMoleculeForceField(mol,
mmff_props,
confId=conf_id,
**kwargs)
else:
raise ValueError("Invalid force_field " +
"'{}'.".format(self.force_field))
return ff
def minimize_conformers(self, mol: RDKitMol) -> None:
"""
Minimize molecule conformers.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object with embedded conformers.
"""
for conf in mol.GetConformers():
ff = self.get_molecule_force_field(mol, conf_id=conf.GetId())
ff.Minimize()
def get_conformer_energies(self, mol: RDKitMol) -> np.ndarray:
"""
Calculate conformer energies.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object with embedded conformers.
Returns
-------
energies : np.ndarray
Minimized conformer energies.
"""
energies = []
for conf in mol.GetConformers():
ff = self.get_molecule_force_field(mol, conf_id=conf.GetId())
energy = ff.CalcEnergy()
energies.append(energy)
return np.asarray(energies, dtype=float)
def prune_conformers(self, mol: RDKitMol) -> RDKitMol:
"""
Prune conformers from a molecule using an RMSD threshold, starting
with the lowest energy conformer.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
new_mol: rdkit.Chem.rdchem.Mol
A new rdkit.Chem.rdchem.Mol containing the chosen conformers, sorted by
increasing energy.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
if self.rmsd_threshold < 0 or mol.GetNumConformers() <= 1:
return mol
energies = self.get_conformer_energies(mol)
rmsd = self.get_conformer_rmsd(mol)
sort = np.argsort(energies) # sort by increasing energy
keep: List[float] = [] # always keep lowest-energy conformer
discard = []
for i in sort:
# always keep lowest-energy conformer
if len(keep) == 0:
keep.append(i)
continue
# discard conformers after max_conformers is reached
if len(keep) >= self.max_conformers:
discard.append(i)
continue
# get RMSD to selected conformers
this_rmsd = rmsd[i][np.asarray(keep, dtype=int)]
# discard conformers within the RMSD threshold
if np.all(this_rmsd >= self.rmsd_threshold):
keep.append(i)
else:
discard.append(i)
# create a new molecule to hold the chosen conformers
# this ensures proper conformer IDs and energy-based ordering
new_mol = Chem.Mol(mol)
new_mol.RemoveAllConformers()
conf_ids = [conf.GetId() for conf in mol.GetConformers()]
for i in keep:
conf = mol.GetConformer(conf_ids[i])
new_mol.AddConformer(conf, assignId=True)
return new_mol
@staticmethod
def get_conformer_rmsd(mol: RDKitMol) -> np.ndarray:
"""
Calculate conformer-conformer RMSD.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
rmsd: np.ndarray
A conformer-conformer RMSD value. The shape is `(NumConformers, NumConformers)`
"""
try:
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
rmsd = np.zeros((mol.GetNumConformers(), mol.GetNumConformers()),
dtype=float)
for i, ref_conf in enumerate(mol.GetConformers()):
for j, fit_conf in enumerate(mol.GetConformers()):
if i >= j:
continue
rmsd[i, j] = AllChem.GetBestRMS(mol, mol, ref_conf.GetId(),
fit_conf.GetId())
rmsd[j, i] = rmsd[i, j]
return rmsd
<file_sep>"""
Featurizers, transformers, and splitters for MolNet.
"""
import importlib
import inspect
import logging
from typing import Optional, Dict, Any
from deepchem.feat.base_classes import Featurizer
from deepchem.trans.transformers import Transformer
from deepchem.splits.splitters import Splitter
logger = logging.getLogger(__name__)
def get_defaults(module_name: Optional[str] = None) -> Dict[str, Any]:
"""Get featurizers, transformers, and splitters.
This function returns a dictionary with class names as keys and classes
as values. All MolNet ``load_x`` functions should specify which
featurizers, transformers, and splitters the dataset supports and
provide sensible defaults.
Parameters
----------
module_name : {"feat", "trans", "splits"}
Default classes from deepchem.`module_name` will be returned.
Returns
-------
defaults : Dict[str, Any]
Keys are class names and values are class constructors.
Examples
--------
>> splitter = get_defaults('splits')['RandomSplitter']()
>> transformer = get_defaults('trans')['BalancingTransformer'](dataset, {"transform_X": True})
>> featurizer = get_defaults('feat')["CoulombMatrix"](max_atoms=5)
"""
if module_name not in ["feat", "trans", "splits"]:
raise ValueError(
"Input argument must be either 'feat', 'trans', or 'splits'.")
if module_name == "feat":
sc: Any = Featurizer
elif module_name == "trans":
sc = Transformer
elif module_name == "splits":
sc = Splitter
module_name = "deepchem." + module_name
module = importlib.import_module(module_name, package="deepchem")
defaults = {
x[0]: x[1]
for x in inspect.getmembers(module, inspect.isclass)
if issubclass(x[1], sc)
}
return defaults
<file_sep>"""
FACTORS dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import time
import numpy as np
import deepchem as dc
from factors_features import factors_descriptors
def remove_missing_entries(dataset):
"""Remove missing entries.
Some of the datasets have missing entries that sneak in as zero'd out
feature vectors. Get rid of them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
print("Shard %d has %d missing entries."
% (i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
def get_transformers(train_dataset):
"""Get transformers applied to datasets."""
transformers = []
return transformers
def gen_factors(FACTORS_tasks, raw_train_dir, train_dir, valid_dir, test_dir,
shard_size=10000):
"""Load Factor datasets."""
train_files = ("FACTORS_training_disguised_combined_full.csv.gz")
valid_files = ("FACTORS_test1_disguised_combined_full.csv.gz")
test_files = ("FACTORS_test2_disguised_combined_full.csv.gz")
# Featurize FACTORS dataset
print("About to featurize FACTORS dataset.")
featurizer = dc.feat.UserDefinedFeaturizer(factors_descriptors)
loader = dc.data.UserCSVLoader(
tasks=FACTORS_tasks, id_field="Molecule", featurizer=featurizer)
train_datasets, valid_datasets, test_datasets = [], [], []
print("Featurizing train datasets")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
print("Featurizing valid datasets")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
print("Featurizing test datasets")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
print("Remove missing entries from datasets.")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
print("Transforming datasets with transformers.")
transformers = get_transformers(train_dataset)
raw_train_dataset = train_dataset
for transformer in transformers:
print("Performing transformations with %s"
% transformer.__class__.__name__)
print("Transforming datasets")
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
print("Shuffling order of train dataset.")
train_dataset.sparse_shuffle()
print("Moving directories")
raw_train_dataset.move(raw_train_dir)
train_dataset.move(train_dir)
valid_dataset.move(valid_dir)
test_dataset.move(test_dir)
return (raw_train_dataset, train_dataset, valid_dataset, test_dataset)
def load_factors(shard_size):
"""Loads factors datasets. Generates if not stored already."""
FACTORS_tasks = (['T_0000%d' % i for i in range(1, 10)]
+ ['T_000%d' % i for i in range(10, 13)])
current_dir = os.path.dirname(os.path.realpath(__file__))
raw_train_dir = os.path.join(current_dir, "raw_train_dir")
train_dir = os.path.join(current_dir, "train_dir")
valid_dir = os.path.join(current_dir, "valid_dir")
test_dir = os.path.join(current_dir, "test_dir")
if (os.path.exists(raw_train_dir) and
os.path.exists(train_dir) and
os.path.exists(valid_dir) and
os.path.exists(test_dir)):
print("Reloading existing datasets")
raw_train_dataset = dc.data.DiskDataset(raw_train_dir)
train_dataset = dc.data.DiskDataset(train_dir)
valid_dataset = dc.data.DiskDataset(valid_dir)
test_dataset = dc.data.DiskDataset(test_dir)
else:
print("Featurizing datasets")
(raw_train_dataset, train_dataset, valid_dataset, test_dataset) = \
gen_factors(FACTORS_tasks, raw_train_dir, train_dir, valid_dir, test_dir,
shard_size=shard_size)
transformers = get_transformers(raw_train_dataset)
return FACTORS_tasks, (train_dataset, valid_dataset, test_dataset), transformers
<file_sep>"""Test that MAML models can be reloaded."""
import deepchem as dc
import numpy as np
import pytest
try:
import tensorflow as tf
class SineLearner(dc.metalearning.MetaLearner):
def __init__(self):
self.batch_size = 10
self.w1 = tf.Variable(np.random.normal(size=[1, 40], scale=1.0))
self.w2 = tf.Variable(
np.random.normal(size=[40, 40], scale=np.sqrt(1 / 40)))
self.w3 = tf.Variable(
np.random.normal(size=[40, 1], scale=np.sqrt(1 / 40)))
self.b1 = tf.Variable(np.zeros(40))
self.b2 = tf.Variable(np.zeros(40))
self.b3 = tf.Variable(np.zeros(1))
def compute_model(self, inputs, variables, training):
x, y = inputs
w1, w2, w3, b1, b2, b3 = variables
dense1 = tf.nn.relu(tf.matmul(x, w1) + b1)
dense2 = tf.nn.relu(tf.matmul(dense1, w2) + b2)
output = tf.matmul(dense2, w3) + b3
loss = tf.reduce_mean(tf.square(output - y))
return loss, [output]
@property
def variables(self):
return [self.w1, self.w2, self.w3, self.b1, self.b2, self.b3]
def select_task(self):
self.amplitude = 5.0 * np.random.random()
self.phase = np.pi * np.random.random()
def get_batch(self):
x = np.random.uniform(-5.0, 5.0, (self.batch_size, 1))
return [x, self.amplitude * np.sin(x + self.phase)]
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def test_reload():
"""Test that a Metalearner can be reloaded."""
learner = SineLearner()
optimizer = dc.models.optimizers.Adam(learning_rate=5e-3)
maml = dc.metalearning.MAML(learner, meta_batch_size=4, optimizer=optimizer)
maml.fit(900)
learner.select_task()
batch = learner.get_batch()
loss, outputs = maml.predict_on_batch(batch)
reloaded = dc.metalearning.MAML(SineLearner(), model_dir=maml.model_dir)
reloaded.restore()
reloaded_loss, reloaded_outputs = maml.predict_on_batch(batch)
assert loss == reloaded_loss
assert len(outputs) == len(reloaded_outputs)
for output, reloaded_output in zip(outputs, reloaded_outputs):
assert np.all(output == reloaded_output)
<file_sep>"""
Test rdkit_grid_featurizer module.
"""
import os
import unittest
import numpy as np
from deepchem.feat import RdkitGridFeaturizer
np.random.seed(123)
class TestRdkitGridFeaturizer(unittest.TestCase):
"""
Test RdkitGridFeaturizer class defined in rdkit_grid_featurizer module.
"""
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
package_dir = os.path.dirname(os.path.dirname(current_dir))
self.protein_file = os.path.join(package_dir, 'dock', 'tests',
'1jld_protein.pdb')
self.ligand_file = os.path.join(package_dir, 'dock', 'tests',
'1jld_ligand.sdf')
def test_default_featurizer(self):
# test if default parameters work
featurizer = RdkitGridFeaturizer()
self.assertIsInstance(featurizer, RdkitGridFeaturizer)
feature_tensor = featurizer.featurize([(self.ligand_file,
self.protein_file)])
self.assertIsInstance(feature_tensor, np.ndarray)
def test_example_featurizer(self):
# check if use-case from examples works
featurizer = RdkitGridFeaturizer(
voxel_width=1.0,
box_width=75.0,
feature_types=['ecfp', 'splif', 'hbond', 'salt_bridge'],
ecfp_power=9,
splif_power=9,
flatten=True)
feature_tensor = featurizer.featurize([(self.ligand_file,
self.protein_file)])
self.assertIsInstance(feature_tensor, np.ndarray)
def test_force_flatten(self):
# test if input is flattened when flat features are used
featurizer = RdkitGridFeaturizer(feature_types=['ecfp_hashed'],
flatten=False)
featurizer.flatten = True # False should be ignored with ecfp_hashed
feature_tensor = featurizer.featurize([(self.ligand_file,
self.protein_file)])
self.assertIsInstance(feature_tensor, np.ndarray)
self.assertEqual(feature_tensor.shape,
(1, 2 * 2**featurizer.ecfp_power))
def test_combined(self):
ecfp_power = 5
splif_power = 5
box_width = 75.0
voxel_width = 1.0
voxels_per_edge = int(box_width / voxel_width)
# test voxel features
featurizer = RdkitGridFeaturizer(voxel_width=voxel_width,
box_width=box_width,
feature_types=['voxel_combined'],
ecfp_power=ecfp_power,
splif_power=splif_power,
flatten=False,
sanitize=True)
feature_tensor = featurizer.featurize([(self.ligand_file,
self.protein_file)])
self.assertIsInstance(feature_tensor, np.ndarray)
voxel_total_len = (
2**ecfp_power +
len(featurizer.cutoffs['splif_contact_bins']) * 2**splif_power +
len(featurizer.cutoffs['hbond_dist_bins']) + 5)
self.assertEqual(feature_tensor.shape,
(1, voxels_per_edge, voxels_per_edge, voxels_per_edge,
voxel_total_len))
# test flat features
featurizer = RdkitGridFeaturizer(voxel_width=1.0,
box_width=75.0,
feature_types=['flat_combined'],
ecfp_power=ecfp_power,
splif_power=splif_power,
sanitize=True)
feature_tensor = featurizer.featurize([(self.ligand_file,
self.protein_file)])
self.assertIsInstance(feature_tensor, np.ndarray)
flat_total_len = (
3 * 2**ecfp_power +
len(featurizer.cutoffs['splif_contact_bins']) * 2**splif_power +
len(featurizer.cutoffs['hbond_dist_bins']))
self.assertEqual(feature_tensor.shape, (1, flat_total_len))
# check if aromatic features are ignored if sanitize=False
featurizer = RdkitGridFeaturizer(voxel_width=1.0,
box_width=75.0,
feature_types=['all_combined'],
ecfp_power=ecfp_power,
splif_power=splif_power,
flatten=True,
sanitize=False)
self.assertTrue('pi_stack' not in featurizer.feature_types)
self.assertTrue('cation_pi' not in featurizer.feature_types)
feature_tensor = featurizer.featurize([(self.ligand_file,
self.protein_file)])
self.assertIsInstance(feature_tensor, np.ndarray)
self.assertEqual(feature_tensor.shape, (1, 56109538))
def test_custom_cutoffs(self):
custom_cutoffs = {
'hbond_dist_bins': [(2., 3.), (3., 3.5)],
'hbond_angle_cutoffs': [5, 90],
'splif_contact_bins': [(0, 3.5), (3.5, 6.0)],
'ecfp_cutoff': 5.0,
'sybyl_cutoff': 3.0,
'salt_bridges_cutoff': 4.0,
'pi_stack_dist_cutoff': 5.0,
'pi_stack_angle_cutoff': 15.0,
'cation_pi_dist_cutoff': 5.5,
'cation_pi_angle_cutoff': 20.0,
}
rgf_featurizer = RdkitGridFeaturizer(**custom_cutoffs)
self.assertEqual(rgf_featurizer.cutoffs, custom_cutoffs)
def test_rotations(self):
featurizer = RdkitGridFeaturizer(nb_rotations=3,
box_width=75.,
voxel_width=1.,
feature_types=['voxel_combined'],
flatten=False,
sanitize=True)
feature_tensors = featurizer.featurize([(self.ligand_file,
self.protein_file)])
self.assertEqual(feature_tensors.shape, (1, 300, 75, 75, 40))
featurizer = RdkitGridFeaturizer(nb_rotations=3,
box_width=75.,
voxel_width=1.,
feature_types=['flat_combined'],
flatten=True,
sanitize=True)
feature_tensors = featurizer.featurize([(self.ligand_file,
self.protein_file)])
self.assertEqual(feature_tensors.shape, (1, 204))
def test_failures(self):
# test flattened voxel features
featurizer = RdkitGridFeaturizer(nb_rotations=0,
box_width=75.,
voxel_width=1.,
feature_types=['voxel_combined'],
flatten=True,
sanitize=True)
features = featurizer.featurize([(self.ligand_file, self.protein_file),
('nan', 'nan')])
self.assertEqual(features.shape, (2, 16875000))
# test voxel features
featurizer = RdkitGridFeaturizer(nb_rotations=0,
box_width=75.,
voxel_width=1.,
feature_types=['voxel_combined'],
flatten=False,
sanitize=True)
features = featurizer.featurize([(self.ligand_file, self.protein_file),
('nan', 'nan')])
self.assertEqual(features.shape, (2, 75, 75, 75, 40))
# test flat features
featurizer = RdkitGridFeaturizer(nb_rotations=0,
box_width=75.,
voxel_width=1.,
feature_types=['flat_combined'],
flatten=True,
sanitize=True)
features = featurizer.featurize([(self.ligand_file, self.protein_file),
('nan', 'nan')])
self.assertEqual(features.shape, (2, 51))
# test rotations
featurizer = RdkitGridFeaturizer(nb_rotations=5,
box_width=75.,
voxel_width=1.,
feature_types=['flat_combined'],
flatten=True,
sanitize=True)
features = featurizer.featurize([(self.ligand_file, self.protein_file),
('nan', 'nan')])
self.assertEqual(features.shape, (2, 306))
<file_sep>import os
import subprocess
import tempfile
import nbformat
def _notebook_read(path):
"""
Parameters
----------
path: str
path to ipython notebook
Returns
-------
nb: notebook object
errors: list of Exceptions
"""
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = [
"jupyter-nbconvert", "--to", "notebook", "--execute",
"--ExecutePreprocessor.timeout=600", "--output", fout.name, path
]
subprocess.check_call(args)
fout.seek(0)
nb = nbformat.read(fout, nbformat.current_nbformat)
errors = [output for cell in nb.cells if "outputs" in cell
for output in cell["outputs"] \
if output.output_type == "error"]
return nb, errors
def test_protein_ligand_complex_notebook():
nb, errors = _notebook_read("protein_ligand_complex_notebook.ipynb")
assert errors == []
def test_bace():
nb, errors = _notebook_read("BACE.ipynb")
assert errors == []
def test_multitask_networks_on_muv():
nb, errors = _notebook_read("Multitask_Networks_on_MUV.ipynb")
assert errors == []
def test_mnist():
nb, errors = _notebook_read("mnist.ipynb")
assert errors == []
def test_solubility():
nb, errors = _notebook_read("solubility.ipynb")
assert errors == []
def test_quantum():
nb, errors = _notebook_read("quantum_machine_gdb1k.ipynb")
assert errors == []
def test_pong():
nb, errors = _notebook_read("pong.ipynb")
assert errors == []
def test_graph_conv():
nb, errors = _notebook_read("graph_convolutional_networks_for_tox21.ipynb")
assert errors == []
def test_tg_mechanics():
nb, errors = _notebook_read("TensorGraph_Mechanics.ipynb")
assert errors == []
def test_seqtoseq_fingerprint():
nb, errors = _notebook_read("seqtoseq_fingerprint.ipynb")
assert errors == []
def test_dataset_preparation():
nb, errors = _notebook_read("dataset_preparation.ipynb")
assert errors == []
def test_uncertainty():
nb, errors = _notebook_read("Uncertainty.ipynb")
assert errors == []
<file_sep>"""
Utilities to score protein-ligand poses using DeepChem.
"""
import numpy as np
def pairwise_distances(coords1: np.ndarray, coords2: np.ndarray) -> np.ndarray:
"""Returns matrix of pairwise Euclidean distances.
Parameters
----------
coords1: np.ndarray
A numpy array of shape `(N, 3)`
coords2: np.ndarray
A numpy array of shape `(M, 3)`
Returns
-------
np.ndarray
A `(N,M)` array with pairwise distances.
"""
return np.sum((coords1[None, :] - coords2[:, None])**2, -1)**0.5
def cutoff_filter(d: np.ndarray, x: np.ndarray, cutoff=8.0) -> np.ndarray:
"""Applies a cutoff filter on pairwise distances
Parameters
----------
d: np.ndarray
Pairwise distances matrix. A numpy array of shape `(N, M)`
x: np.ndarray
Matrix of shape `(N, M)`
cutoff: float, optional (default 8)
Cutoff for selection in Angstroms
Returns
-------
np.ndarray
A `(N,M)` array with values where distance is too large thresholded to 0.
"""
return np.where(d < cutoff, x, np.zeros_like(x))
def vina_nonlinearity(c: np.ndarray, w: float, Nrot: int) -> np.ndarray:
"""Computes non-linearity used in Vina.
Parameters
----------
c: np.ndarray
A numpy array of shape `(N, M)`
w: float
Weighting term
Nrot: int
Number of rotatable bonds in this molecule
Returns
-------
np.ndarray
A `(N, M)` array with activations under a nonlinearity.
"""
out_tensor = c / (1 + w * Nrot)
return out_tensor
def vina_repulsion(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's repulsion interaction term.
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array with repulsion terms.
"""
return np.where(d < 0, d**2, np.zeros_like(d))
def vina_hydrophobic(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's hydrophobic interaction term.
Here, d is the set of surface distances as defined in [1]_
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array of hydrophoboic interactions in a piecewise linear curve.
References
----------
.. [1] Jain, <NAME>. "Scoring noncovalent protein-ligand interactions:
a continuous differentiable function tuned to compute binding affinities."
Journal of computer-aided molecular design 10.5 (1996): 427-440.
"""
out_tensor = np.where(d < 0.5, np.ones_like(d),
np.where(d < 1.5, 1.5 - d, np.zeros_like(d)))
return out_tensor
def vina_hbond(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's hydrogen bond interaction term.
Here, d is the set of surface distances as defined in [1]_
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array of hydrophoboic interactions in a piecewise linear curve.
References
----------
.. [1] Jain, <NAME>. "Scoring noncovalent protein-ligand interactions:
a continuous differentiable function tuned to compute binding affinities."
Journal of computer-aided molecular design 10.5 (1996): 427-440.
"""
out_tensor = np.where(
d < -0.7, np.ones_like(d),
np.where(d < 0, (1.0 / 0.7) * (0 - d), np.zeros_like(d)))
return out_tensor
def vina_gaussian_first(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's first Gaussian interaction term.
Here, d is the set of surface distances as defined in [1]_
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array of gaussian interaction terms.
References
----------
.. [1] Jain, <NAME>. "Scoring noncovalent protein-ligand interactions:
a continuous differentiable function tuned to compute binding affinities."
Journal of computer-aided molecular design 10.5 (1996): 427-440.
"""
out_tensor = np.exp(-(d / 0.5)**2)
return out_tensor
def vina_gaussian_second(d: np.ndarray) -> np.ndarray:
"""Computes Autodock Vina's second Gaussian interaction term.
Here, d is the set of surface distances as defined in [1]_
Parameters
----------
d: np.ndarray
A numpy array of shape `(N, M)`.
Returns
-------
np.ndarray
A `(N, M)` array of gaussian interaction terms.
References
----------
.. [1] Jain, <NAME>. "Scoring noncovalent protein-ligand interactions:
a continuous differentiable function tuned to compute binding affinities."
Journal of computer-aided molecular design 10.5 (1996): 427-440.
"""
out_tensor = np.exp(-((d - 3) / 2)**2)
return out_tensor
def weighted_linear_sum(w: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Computes weighted linear sum.
Parameters
----------
w: np.ndarray
A numpy array of shape `(N,)`
x: np.ndarray
A numpy array of shape `(N, M, L)`
Returns
-------
np.ndarray
A numpy array of shape `(M, L)`
"""
return np.tensordot(w, x, axes=1)
def vina_energy_term(coords1: np.ndarray, coords2: np.ndarray,
weights: np.ndarray, wrot: float, Nrot: int) -> np.ndarray:
"""Computes the Vina Energy function for two molecular conformations
Parameters
----------
coords1: np.ndarray
Molecular coordinates of shape `(N, 3)`
coords2: np.ndarray
Molecular coordinates of shape `(M, 3)`
weights: np.ndarray
A numpy array of shape `(5,)`. The 5 values are weights for repulsion interaction term,
hydrophobic interaction term, hydrogen bond interaction term,
first Gaussian interaction term and second Gaussian interaction term.
wrot: float
The scaling factor for nonlinearity
Nrot: int
Number of rotatable bonds in this calculation
Returns
-------
np.ndarray
A scalar value with free energy
"""
# TODO(rbharath): The autodock vina source computes surface distances
# which take into account the van der Waals radius of each atom type.
dists = pairwise_distances(coords1, coords2)
repulsion = vina_repulsion(dists)
hydrophobic = vina_hydrophobic(dists)
hbond = vina_hbond(dists)
gauss_1 = vina_gaussian_first(dists)
gauss_2 = vina_gaussian_second(dists)
# Shape (N, M)
interactions = weighted_linear_sum(
weights, np.array([repulsion, hydrophobic, hbond, gauss_1, gauss_2]))
# Shape (N, M)
thresholded = cutoff_filter(dists, interactions)
free_energies = vina_nonlinearity(thresholded, wrot, Nrot)
return np.sum(free_energies)
<file_sep># DeepChem
[](https://anaconda.org/conda-forge/deepchem)
[](https://pypi.org/project/deepchem/)
[](https://deepchem.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/deepchem/deepchem/actions?query=workflow%3A%22Test+for+DeepChem+Core%22)
[](https://github.com/deepchem/deepchem/actions?query=workflow%3A%22Test+for+documents%22)
[](https://github.com/deepchem/deepchem/actions?query=workflow%3A%22Test+for+build+scripts%22)
[](https://codecov.io/gh/deepchem/deepchem)
[Website](https://deepchem.io/) | [Documentation](https://deepchem.readthedocs.io/en/latest/) | [Colab Tutorial](https://github.com/deepchem/deepchem/tree/master/examples/tutorials) | [Discussion Forum](https://forum.deepchem.io/) | [Gitter](https://gitter.im/deepchem/Lobby)
DeepChem aims to provide a high quality open-source toolchain
that democratizes the use of deep-learning in drug discovery,
materials science, quantum chemistry, and biology.
### Table of contents:
- [Requirements](#requirements)
- [Installation](#installation)
- [Stable version](#stable-version)
- [Nightly build version](#nightly-build-version)
- [Docker](#docker)
- [From source](#from-source)
- [From source lightweight](#from-source-lightweight)
- [Getting Started](#getting-started)
- [Gitter](#gitter)
- [About Us](#about-us)
- [Contributing to DeepChem](/CONTRIBUTING.md)
- [Citing DeepChem](#citing-deepchem)
## Requirements
DeepChem currently supports Python 3.7 through 3.10 and requires these packages on any condition.
- [joblib](https://pypi.python.org/pypi/joblib)
- [NumPy](https://numpy.org/)
- [pandas](http://pandas.pydata.org/)
- [scikit-learn](https://scikit-learn.org/stable/)
- [SciPy](https://www.scipy.org/)
- [rdkit](https://www.rdkit.org/)
### Soft Requirements
DeepChem has a number of "soft" requirements.
If you face some errors like `ImportError: This class requires XXXX`, you may need to install some packages.
Please check [the document](https://deepchem.readthedocs.io/en/latest/requirements.html#soft-requirements) about soft requirements.
## Installation
### Stable version
DeepChem stable version can be installed using pip or conda as
```bash
pip install deepchem
```
or
```
conda install -c conda-forge deepchem
```
Deepchem provides support for tensorflow, pytorch, jax and each require
a individual pip Installation.
For using models with tensorflow dependencies, you install using
```bash
pip install deepchem[tensorflow]
```
For using models with torch dependencies, you install using
```bash
pip install deepchem[torch]
```
For using models with jax dependencies, you install using
```bash
pip install deepchem[jax]
```
If GPU support is required, then make sure CUDA is installed and then install the desired deep learning framework using the links below before installing deepchem
1. tensorflow - just cuda installed
2. pytorch - https://pytorch.org/get-started/locally/#start-locally
3. jax - https://github.com/google/jax#pip-installation-gpu-cuda
In `zsh` square brackets are used for globbing/pattern matching. This means you
need to escape the square brackets in the above installation. You can do so
by including the dependencies in quotes like `pip install --pre 'deepchem[jax]'`
### Nightly build version
The nightly version is built by the HEAD of DeepChem. It can be installed using
```bash
pip install --pre deepchem
```
### Docker
If you want to install deepchem using a docker, you can pull two kinds of images.
DockerHub : https://hub.docker.com/repository/docker/deepchemio/deepchem
- `deepchemio/deepchem:x.x.x`
- Image built by using a conda (x.x.x is a version of deepchem)
- The x.x.x image is built when we push x.x.x. tag
- Dockerfile is put in `docker/tag` directory
- `deepchemio/deepchem:latest`
- Image built from source codes
- The latest image is built every time we commit to the master branch
- Dockerfile is put in `docker/nightly` directory
You pull the image like this.
```bash
docker pull deepchemio/deepchem:2.4.0
```
If you want to know docker usages with deepchem in more detail, please check [the document](https://deepchem.readthedocs.io/en/latest/installation.html#docker).
### From source
If you try install all soft dependencies at once or contribute to deepchem, we recommend you should install deepchem from source.
Please check [this introduction](https://deepchem.readthedocs.io/en/latest/installation.html#from-source-with-conda).
## Getting Started
The DeepChem project maintains an extensive collection of [tutorials](https://github.com/deepchem/deepchem/tree/master/examples/tutorials). All tutorials are designed to be run on Google colab (or locally if you prefer). Tutorials are arranged in a suggested learning sequence which will take you from beginner to proficient at molecular machine learning and computational biology more broadly.
After working through the tutorials, you can also go through other [examples](https://github.com/deepchem/deepchem/tree/master/examples). To apply `deepchem` to a new problem, try starting from one of the existing examples or tutorials and modifying it step by step to work with your new use-case. If you have questions or comments you can raise them on our [gitter](https://gitter.im/deepchem/Lobby).
### Supported Integrations
- [Weights & Biases](https://docs.wandb.ai/guides/integrations/other/deepchem): Track your DeepChem model's training and evaluation metrics.
### Gitter
Join us on gitter at [https://gitter.im/deepchem/Lobby](https://gitter.im/deepchem/Lobby). Probably the easiest place to ask simple questions or float requests for new features.
## About Us
DeepChem is managed by a team of open source contributors. Anyone is free to join and contribute!
## Citing DeepChem
If you have used DeepChem in the course of your research, we ask that you cite the "Deep Learning for the Life Sciences" book by the DeepChem core team.
To cite this book, please use this bibtex entry:
```
@book{Ramsundar-et-al-2019,
title={Deep Learning for the Life Sciences},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
publisher={O'Reilly Media},
note={\url{https://www.amazon.com/Deep-Learning-Life-Sciences-Microscopy/dp/1492039837}},
year={2019}
}
```
<file_sep>"""
TODO(LESWING) Remove h5py dependency
TODO(LESWING) Remove keras dependency and replace with functional keras API
"""
import warnings
from deepchem.models import Model
from deepchem.models.autoencoder_models.model import MoleculeVAE
from deepchem.feat.one_hot import zinc_charset
from deepchem.utils import download_url
import os
from subprocess import call
class TensorflowMoleculeEncoder(Model):
"""
Transform molecules from one hot encoding into a latent vector
representation.
https://arxiv.org/abs/1610.02415
"""
def __init__(self,
model_dir=None,
weights_file="model.h5",
verbose=True,
charset_length=len(zinc_charset),
latent_rep_size=292):
"""
Parameters
----------
model_dir: str
Folder to store cached weights
weights_file: str
File to store cached weights in model_dir
verbose: bool
True for more logging
charset_length: int
Length of one_hot_encoded vectors
latent_rep_size: int
How large a 1D Vector for latent representation
"""
warnings.warn("TensorflowMoleculeEncoder Deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
super(TensorflowMoleculeEncoder, self).__init__(
model_dir=model_dir, verbose=verbose)
weights_file = os.path.join(model_dir, weights_file)
if os.path.isfile(weights_file):
m = MoleculeVAE()
m.load(charset_length, weights_file, latent_rep_size=latent_rep_size)
self.model = m
else:
# TODO (LESWING) Lazy Load
raise ValueError("Model file %s doesn't exist" % weights_file)
@staticmethod
def zinc_encoder():
"""
Returns
-------
obj
An Encoder with weights that were trained on the zinc dataset
"""
current_dir = os.path.dirname(os.path.realpath(__file__))
weights_filename = "zinc_model.h5"
weights_file = os.path.join(current_dir, weights_filename)
if not os.path.exists(weights_file):
download_url("http://karlleswing.com/misc/keras-molecule/model.h5",
current_dir)
mv_cmd = "mv model.h5 %s" % weights_file
call(mv_cmd.split())
return TensorflowMoleculeEncoder(
model_dir=current_dir, weights_file=weights_filename)
def fit(self, dataset, nb_epoch=10, batch_size=50, **kwargs):
raise ValueError("Only can read in pre-trained models")
def predict_on_batch(self, X):
x_latent = self.model.encoder.predict(X)
return x_latent
class TensorflowMoleculeDecoder(Model):
"""
Transform molecules from a latent space feature vector into
a one hot encoding.
https://arxiv.org/abs/1610.02415
"""
def __init__(self,
model_dir=None,
weights_file="model.h5",
verbose=True,
charset_length=len(zinc_charset),
latent_rep_size=292):
"""
Parameters
----------
model_dir: str
Folder to store cached weights
weights_file: str
File to store cached weights in model_dir
verbose: bool
True for more logging
charset_length: int
Length of one_hot_encoded vectors
latent_rep_size: int
How large a 1D Vector for latent representation
"""
warnings.warn("TensorflowMoleculeDecoder Deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
super(TensorflowMoleculeDecoder, self).__init__(
model_dir=model_dir, verbose=verbose)
weights_file = os.path.join(model_dir, weights_file)
if os.path.isfile(weights_file):
m = MoleculeVAE()
m.load(charset_length, weights_file, latent_rep_size=latent_rep_size)
self.model = m
else:
# TODO (LESWING) Lazy Load
raise ValueError("Model file %s doesn't exist" % weights_file)
def fit(self, dataset, nb_epoch=10, batch_size=50, **kwargs):
raise ValueError("Only can read in pre-trained models")
@staticmethod
def zinc_decoder():
"""
Returns
-------
obj
A Decoder with weights that were trained on the zinc dataset
"""
current_dir = os.path.dirname(os.path.realpath(__file__))
weights_filename = "zinc_model.h5"
weights_file = os.path.join(current_dir, weights_filename)
if not os.path.exists(weights_file):
download_url("http://karlleswing.com/misc/keras-molecule/model.h5",
current_dir)
mv_cmd = "mv model.h5 %s" % weights_file
call(mv_cmd.split())
return TensorflowMoleculeDecoder(
model_dir=current_dir, weights_file=weights_filename)
def predict_on_batch(self, X):
x_latent = self.model.decoder.predict(X)
return x_latent
<file_sep>"""
Density Functional Theory Data
Derived from: https://github.com/mfkasim1/xcnn/blob/f2cb9777da2961ac553f256ecdcca3e314a538ca/xcdnn2/entry.py
"""
from __future__ import annotations
from abc import abstractmethod, abstractproperty
from typing import List, Dict, Optional
import numpy as np
# dqc depend
import dqc
from dqc.system.mol import Mol
from dqc.system.base_system import BaseSystem
from deepchem.utils.dftutils import KSCalc, BaseGrid
class DFTSystem():
"""
The DFTSystem class creates and returns the various systems in an entry object as dictionaries.
Examples
--------
>>> from deepchem.feat.dft_data import DFTSystem
>>> systems = {'moldesc': 'Li 1.5070 0 0; H -1.5070 0 0','basis': '6-311++G(3df,3pd)'}
>>> output = DFTSystem(systems)
Returns
-------
DFTSystem object for all the individual atoms/ions/molecules in an entry object.
References
----------
Kasim, <NAME>., and <NAME>. "Learning the exchange-correlation functional from nature with fully differentiable density functional theory." Physical Review Letters 127.12 (2021): 126403.
https://github.com/diffqc/dqc/blob/0fe821fc92cb3457fb14f6dff0c223641c514ddb/dqc/system/base_system.py
"""
def __init__(self, system: Dict):
self.system = system
self.moldesc = system["moldesc"]
self.basis = system["basis"]
self.spin = 0
self.charge = 0
self.no = 1
if 'spin' in system.keys():
self.spin = int(system["spin"])
if 'charge' in system.keys():
self.charge = int(system["charge"])
if 'number' in system.keys():
self.no = int(system["number"])
"""
Parameters
----------
system: Dict
system is a dictionary containing information on the atomic positions,
atomic numbers, and basis set used for the DFT calculation.
"""
def get_dqc_mol(self, pos_reqgrad: bool = False) -> BaseSystem:
"""
This method converts the system dictionary to a DQC system and returns it.
Parameters
----------
pos_reqgrad: bool
decides if the atomic position require gradient calculation.
Returns
-------
mol
DQC mol object
"""
atomzs, atomposs = dqc.parse_moldesc(self.moldesc)
if pos_reqgrad:
atomposs.requires_grad_()
mol = Mol(self.moldesc, self.basis, spin=self.spin, charge=self.charge)
return mol
class DFTEntry():
"""
Handles creating and initialising DFTEntry objects from the dataset. This object contains information about the various systems in the datapoint (atoms, molecules and ions) along with the ground truth
values.
Notes
-----
Entry class should not be initialized directly, but created through
``Entry.create``
Example
-------
>>> from deepchem.feat.dft_data import DFTEntry
>>> e_type= 'dm'
>>> true_val= 'deepchem/data/tests/dftHF_output.npy'
>>> systems = [{'moldesc': 'H 0.86625 0 0; F -0.86625 0 0','basis': '6-311++G(3df,3pd)'}]
>>> dm_entry_for_HF = DFTEntry.create(e_type, true_val, systems)
"""
@classmethod
def create(self,
e_type: str,
true_val: Optional[str],
systems: List[Dict],
weight: Optional[int] = 1):
"""
This method is used to initialise the DFTEntry class. The entry objects are created
based on their entry type.
Parameters
----------
e_type: str
Determines the type of calculation to be carried out on the entry
object. Accepts the following values: "ae", "ie", "dm", "dens", that stand for atomization energy,
ionization energy, density matrix and density profile respectively.
true_val: str
Ground state energy values for the entry object as a string (for ae
and ie), or a .npy file containing a matrix ( for dm and dens).
systems: List[Dict]
List of dictionaries contains "moldesc", "basis" and "spin"
of all the atoms/molecules. These values are to be entered in
the DQC or PYSCF format. The systems needs to be entered in a
specific order, i.e ; the main atom/molecule needs to be the
first element. (This is for objects containing equations, such
as ae and ie entry objects). Spin and charge of the system are
optional parameters and are considered '0' if not specified.
The system number refers to the number of times the systems is
present in the molecule - this is for polyatomic molecules and the
default value is 1. For example ; system number of Hydrogen in water
is 2.
weight: int
Weight of the entry object.
Returns
-------
DFTEntry object based on entry type
"""
if true_val is None:
true_val = '0.0'
if e_type == "ae":
return _EntryAE(e_type, true_val, systems, weight)
elif e_type == "ie":
return _EntryIE(e_type, true_val, systems, weight)
elif e_type == "dm":
return _EntryDM(e_type, true_val, systems, weight)
elif e_type == "dens":
return _EntryDens(e_type, true_val, systems, weight)
else:
raise NotImplementedError("Unknown entry type: %s" % e_type)
def __init__(self,
e_type: str,
true_val: Optional[str],
systems: List[Dict],
weight: Optional[int] = 1):
self._systems = [DFTSystem(p) for p in systems]
self._weight = weight
def get_systems(self) -> List[DFTSystem]:
"""
Parameters
----------
systems: List[DFTSystem]
Returns
-------
List of systems in the entry
"""
return self._systems
@abstractproperty
def entry_type(self) -> str:
"""
Returns
-------
The type of entry ;
1) Atomic Ionization Potential (IP/IE)
2) Atomization Energy (AE)
3) Density Profile (DENS)
4) Density Matrix (DM)
"""
pass
def get_true_val(self) -> np.ndarray:
"""
Get the true value of the DFTEntry.
For the AE and IP entry types, the experimental values are collected from the NIST CCCBDB/ASD
databases.
The true values of density profiles are calculated using PYSCF-CCSD calculations. This method simply loads the value, no calculation is performed.
"""
return np.array(0)
@abstractmethod
def get_val(self, qcs: List[KSCalc]) -> np.ndarray:
"""
Return the energy value of the entry, using a DQC-DFT calculation, where the XC has been
replaced by the trained neural network. This method does not carry out any calculations, it is
an interface to the KSCalc utility.
"""
pass
def get_weight(self):
"""
Returns
-------
Weight of the entry object
"""
return self._weight
class _EntryDM(DFTEntry):
"""
Entry for Density Matrix (DM)
The density matrix is used to express total energy of both non-interacting and
interacting systems.
Notes
-----
dm entry can only have 1 system
"""
def __init__(self, e_type, true_val, systems, weight):
"""
Parameters
----------
e_type: str
true_val: str
must be a .npy file containing the pre-calculated density matrix
systems: List[Dict]
"""
super().__init__(e_type, true_val, systems)
self.true_val = true_val
assert len(self.get_systems()) == 1
self._weight = weight
@property
def entry_type(self) -> str:
return "dm"
def get_true_val(self) -> np.ndarray:
# get the density matrix from PySCF's CCSD calculation
dm = np.load(self.true_val)
return dm
def get_val(self, qcs: List[KSCalc]) -> np.ndarray:
val = qcs[0].aodmtot()
return np.array([val.tolist()])
class _EntryDens(DFTEntry):
"""
Entry for density profile (dens), compared with CCSD calculation
"""
def __init__(self, e_type, true_val, systems, weight):
"""
Parameters
----------
e_type: str
true_val: str
must be a .npy file containing the pre-calculated density profile.
systems: List[Dict]
"""
super().__init__(e_type, true_val, systems)
self.true_val = true_val
assert len(self.get_systems()) == 1
self._grid: Optional[BaseGrid] = None
self._weight = weight
@property
def entry_type(self) -> str:
return "dens"
def get_true_val(self) -> np.ndarray:
dens = np.load(self.true_val)
return dens
def get_val(self, qcs: List[KSCalc]) -> np.ndarray:
"""
This method calculates the integration grid which is then used to calculate the
density profile of an entry object.
Parameters
----------
qcs: List[KSCalc]
Returns
-------
Numpy array containing calculated density profile
"""
qc = qcs[0]
grid = self.get_integration_grid()
rgrid = grid.get_rgrid()
val = qc.dens(rgrid)
return np.array(val.tolist())
def get_integration_grid(self) -> BaseGrid:
"""
This method is used to calculate the integration grid required for a system
in order to calculate it's density profile using Differentiable DFT.
Returns
-------
grid: BaseGrid
References
----------
https://github.com/diffqc/dqc/blob/0fe821fc92cb3457fb14f6dff0c223641c514ddb/dqc/grid/base_grid.py
"""
if self._grid is None:
system = self.get_systems()[0]
dqc_mol = system.get_dqc_mol()
dqc_mol.setup_grid()
grid = dqc_mol.get_grid()
assert grid.coord_type == "cart"
self._grid = grid
return self._grid
class _EntryIE(DFTEntry):
"""
Entry for Ionization Energy (IE)
"""
def __init__(self, e_type, true_val, systems, weight):
"""
Parameters
----------
e_type: str
true_val: str
systems: List[Dict]
"""
super().__init__(e_type, true_val, systems)
self.true_val = float(true_val)
self._weight = weight
@property
def entry_type(self) -> str:
return "ie"
def get_true_val(self) -> np.ndarray:
return np.array([self.true_val])
def get_val(self, qcs: List[KSCalc]) -> np.ndarray:
"""
This method calculates the energy of an entry based on the systems and command present
in the data object. For example; for a Lithium hydride molecule the total energy
would be ; energy(Li) + energy(H) - energy(LiH)
Parameters
----------
qcs: List[KSCalc]
Returns
-------
Total Energy of a data object for entry types IE and AE
"""
systems = [i.no for i in self.get_systems()]
e_1 = [m.energy() for m in qcs]
e = [item1 * item2 for item1, item2 in zip(systems, e_1)]
val = sum(e) - 2 * e[0]
return np.array([val.tolist()])
class _EntryAE(_EntryIE):
"""
Entry for Atomization Energy (AE)
"""
@property
def entry_type(self) -> str:
return "ae"
<file_sep>"""
Testing singletask/multitask data loading capabilities.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import shutil
import logging
import unittest
import tempfile
import deepchem as dc
import numpy as np
logger = logging.getLogger(__name__)
class TestLoad(unittest.TestCase):
"""
Test singletask/multitask data loading.
"""
def test_move_load(self):
"""Test that datasets can be moved and loaded."""
current_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = tempfile.mkdtemp()
data_dir = os.path.join(base_dir, "data")
moved_data_dir = os.path.join(base_dir, "moved_data")
dataset_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file, data_dir)
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
shutil.move(data_dir, moved_data_dir)
moved_dataset = dc.data.DiskDataset(moved_data_dir)
X_moved, y_moved, w_moved, ids_moved = (moved_dataset.X,
moved_dataset.y,
moved_dataset.w,
moved_dataset.ids)
np.testing.assert_allclose(X, X_moved)
np.testing.assert_allclose(y, y_moved)
np.testing.assert_allclose(w, w_moved)
np.testing.assert_array_equal(ids, ids_moved)
def test_multiload(self):
"""Check can re-use featurization for multiple task selections.
"""
# Only for debug!
np.random.seed(123)
current_dir = os.path.dirname(os.path.realpath(__file__))
# Make directories to store the raw and featurized datasets.
data_dir = tempfile.mkdtemp()
# Load dataset
logger.info("About to load dataset.")
dataset_file = os.path.join(
current_dir, "../../models/tests/assets/multitask_example.csv")
# Featurize tox21 dataset
logger.info("About to featurize dataset.")
featurizer = dc.feat.CircularFingerprint(size=1024)
all_tasks = ["task%d" % i for i in range(17)]
# featurization
loader = dc.data.CSVLoader(tasks=all_tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file, data_dir)
# train/valid split.
_, y_multi, w_multi, _ = (dataset.X, dataset.y, dataset.w, dataset.ids)
# singletask load
y_tasks, w_tasks, = [], []
dataset = dc.data.DiskDataset(data_dir)
for ind, task in enumerate(all_tasks):
logger.info("Processing task %s" % task)
_, y_task, w_task, _ = (dataset.X, dataset.y, dataset.w,
dataset.ids)
y_tasks.append(y_task[:, ind])
w_tasks.append(w_task[:, ind])
# comparison
for ind, task in enumerate(all_tasks):
y_multi_task = y_multi[:, ind]
w_multi_task = w_multi[:, ind]
y_task = y_tasks[ind]
w_task = w_tasks[ind]
np.testing.assert_allclose(y_multi_task.flatten(), y_task.flatten())
np.testing.assert_allclose(w_multi_task.flatten(), w_task.flatten())
def test_singletask_matches_multitask_load(self):
"""Check that singletask load and multitask load of dataset are same."""
# Only for debug!
np.random.seed(123)
current_dir = os.path.dirname(os.path.realpath(__file__))
# Make directories to store the raw and featurized datasets.
data_dir = tempfile.mkdtemp()
# Load dataset
logger.info("About to load dataset.")
dataset_file = os.path.join(
current_dir, "../../models/tests/assets/multitask_example.csv")
# Featurize tox21 dataset
logger.info("About to featurize dataset.")
featurizer = dc.feat.CircularFingerprint(size=1024)
all_tasks = ["task%d" % i for i in range(17)]
# For debugging purposes
n_tasks = 17
tasks = all_tasks[0:n_tasks]
# multitask load
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file, data_dir)
# Do train/valid split.
_, y_multi, w_multi, _ = (dataset.X, dataset.y, dataset.w, dataset.ids)
# singletask load
y_tasks, w_tasks, ids_tasks = [], [], []
for task in tasks:
logger.info("Processing task %s" % task)
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
loader = dc.data.CSVLoader(tasks=[task],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file, data_dir)
_, y_task, w_task, ids_task = (dataset.X, dataset.y, dataset.w,
dataset.ids)
y_tasks.append(y_task)
w_tasks.append(w_task)
ids_tasks.append(ids_task)
# comparison
for ind, task in enumerate(tasks):
y_multi_task = y_multi[:, ind]
w_multi_task = w_multi[:, ind]
y_task = y_tasks[ind]
w_task = w_tasks[ind]
ids_task = ids_tasks[ind]
np.testing.assert_allclose(y_multi_task.flatten(), y_task.flatten())
np.testing.assert_allclose(w_multi_task.flatten(), w_task.flatten())
<file_sep>import unittest
from deepchem.feat import PubChemFingerprint
class TestPubChemFingerprint(unittest.TestCase):
"""
Test PubChemFingerprint.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
def test_pubchem_fingerprint(self):
"""
Test simple fingerprint.
"""
featurizer = PubChemFingerprint()
feature_sum = featurizer([self.mol])
assert feature_sum.shape == (1, 881)
<file_sep>"""
Generates protein-ligand docked poses.
"""
import platform
import logging
import os
import tempfile
import numpy as np
from subprocess import Popen, PIPE
from typing import List, Optional, Tuple, Union
from deepchem.dock.binding_pocket import BindingPocketFinder
from deepchem.utils.data_utils import download_url, get_data_dir
from deepchem.utils.typing import RDKitMol
from deepchem.utils.geometry_utils import compute_centroid, compute_protein_range
from deepchem.utils.rdkit_utils import load_molecule, write_molecule
from deepchem.utils.docking_utils import load_docked_ligands, write_vina_conf, write_gnina_conf, read_gnina_log
logger = logging.getLogger(__name__)
DOCKED_POSES = List[Tuple[RDKitMol, RDKitMol]]
class PoseGenerator(object):
"""A Pose Generator computes low energy conformations for molecular complexes.
Many questions in structural biophysics reduce to that of computing
the binding free energy of molecular complexes. A key step towards
computing the binding free energy of two complexes is to find low
energy "poses", that is energetically favorable conformations of
molecules with respect to each other. One application of this
technique is to find low energy poses for protein-ligand
interactions.
"""
def generate_poses(self,
molecular_complex: Tuple[str, str],
centroid: Optional[np.ndarray] = None,
box_dims: Optional[np.ndarray] = None,
exhaustiveness: int = 10,
num_modes: int = 9,
num_pockets: Optional[int] = None,
out_dir: Optional[str] = None,
generate_scores: bool = False):
"""Generates a list of low energy poses for molecular complex
Parameters
----------
molecular_complexes: Tuple[str, str]
A representation of a molecular complex. This tuple is
(protein_file, ligand_file).
centroid: np.ndarray, optional (default None)
The centroid to dock against. Is computed if not specified.
box_dims: np.ndarray, optional (default None)
A numpy array of shape `(3,)` holding the size of the box to dock.
If not specified is set to size of molecular complex plus 5 angstroms.
exhaustiveness: int, optional (default 10)
Tells pose generator how exhaustive it should be with pose
generation.
num_modes: int, optional (default 9)
Tells pose generator how many binding modes it should generate at
each invocation.
num_pockets: int, optional (default None)
If specified, `self.pocket_finder` must be set. Will only
generate poses for the first `num_pockets` returned by
`self.pocket_finder`.
out_dir: str, optional (default None)
If specified, write generated poses to this directory.
generate_score: bool, optional (default False)
If `True`, the pose generator will return scores for complexes.
This is used typically when invoking external docking programs
that compute scores.
Returns
-------
A list of molecular complexes in energetically favorable poses.
"""
raise NotImplementedError
class GninaPoseGenerator(PoseGenerator):
"""Use GNINA to generate binding poses.
This class uses GNINA (a deep learning framework for molecular
docking) to generate binding poses. It downloads the GNINA
executable to DEEPCHEM_DATA_DIR (an environment variable you set)
and invokes the executable to perform pose generation.
GNINA uses pre-trained convolutional neural network (CNN) scoring
functions to rank binding poses based on learned representations of
3D protein-ligand interactions. It has been shown to outperform
AutoDock Vina in virtual screening applications [1]_.
If you use the GNINA molecular docking engine, please cite the relevant
papers: https://github.com/gnina/gnina#citation
The primary citation for GNINA is [1]_.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"Protein–Ligand Scoring with Convolutional Neural Networks."
Journal of chemical information and modeling (2017).
Note
----
* GNINA currently only works on Linux operating systems.
* GNINA requires CUDA >= 10.1 for fast CNN scoring.
* Almost all dependencies are included in the most compatible way
possible, which reduces performance. Build GNINA from source
for production use.
"""
def __init__(self):
"""Initialize GNINA pose generator."""
data_dir = get_data_dir()
if platform.system() == 'Linux':
url = "https://github.com/gnina/gnina/releases/download/v1.0/gnina"
filename = 'gnina'
self.gnina_dir = data_dir
self.gnina_cmd = os.path.join(self.gnina_dir, filename)
else:
raise ValueError(
"GNINA currently only runs on Linux. Try using a cloud platform to run this code instead."
)
if not os.path.exists(self.gnina_cmd):
logger.info("GNINA not available. Downloading...")
download_url(url, data_dir)
downloaded_file = os.path.join(data_dir, filename)
os.chmod(downloaded_file, 755)
logger.info("Downloaded GNINA.")
def generate_poses(
self,
molecular_complex: Tuple[str, str],
centroid: Optional[np.ndarray] = None,
box_dims: Optional[np.ndarray] = None,
exhaustiveness: int = 10,
num_modes: int = 9,
num_pockets: Optional[int] = None,
out_dir: Optional[str] = None,
generate_scores: bool = True,
**kwargs) -> Union[Tuple[DOCKED_POSES, np.ndarray], DOCKED_POSES]:
"""Generates the docked complex and outputs files for docked complex.
Parameters
----------
molecular_complexes: Tuple[str, str]
A representation of a molecular complex. This tuple is
(protein_file, ligand_file).
centroid: np.ndarray, optional (default None)
The centroid to dock against. Is computed if not specified.
box_dims: np.ndarray, optional (default None)
A numpy array of shape `(3,)` holding the size of the box to dock.
If not specified is set to size of molecular complex plus 4 angstroms.
exhaustiveness: int (default 8)
Tells GNINA how exhaustive it should be with pose
generation.
num_modes: int (default 9)
Tells GNINA how many binding modes it should generate at
each invocation.
out_dir: str, optional
If specified, write generated poses to this directory.
generate_scores: bool, optional (default True)
If `True`, the pose generator will return scores for complexes.
This is used typically when invoking external docking programs
that compute scores.
kwargs:
Any args supported by GNINA as documented
https://github.com/gnina/gnina#usage
Returns
-------
Tuple[`docked_poses`, `scores`] or `docked_poses`
Tuple of `(docked_poses, scores)` or `docked_poses`. `docked_poses`
is a list of docked molecular complexes. Each entry in this list
contains a `(protein_mol, ligand_mol)` pair of RDKit molecules.
`scores` is an array of binding affinities (kcal/mol),
CNN pose scores, and CNN affinities predicted by GNINA.
"""
if out_dir is None:
out_dir = tempfile.mkdtemp()
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Parse complex
if len(molecular_complex) > 2:
raise ValueError(
"GNINA can only dock protein-ligand complexes and not more general molecular complexes."
)
(protein_file, ligand_file) = molecular_complex
# check filetypes
if not protein_file.endswith('.pdb'):
raise ValueError('Protein file must be in .pdb format.')
if not ligand_file.endswith('.sdf'):
raise ValueError('Ligand file must be in .sdf format.')
protein_mol = load_molecule(protein_file,
calc_charges=True,
add_hydrogens=True)
ligand_name = os.path.basename(ligand_file).split(".")[0]
# Define locations of log and output files
log_file = os.path.join(out_dir, "%s_log.txt" % ligand_name)
out_file = os.path.join(out_dir, "%s_docked.pdbqt" % ligand_name)
logger.info("About to call GNINA.")
# Write GNINA conf file
conf_file = os.path.join(out_dir, "conf.txt")
write_gnina_conf(protein_filename=protein_file,
ligand_filename=ligand_file,
conf_filename=conf_file,
num_modes=num_modes,
exhaustiveness=exhaustiveness,
**kwargs)
# Run GNINA
args = [
self.gnina_cmd, "--config", conf_file, "--log", log_file, "--out",
out_file
]
process = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
# read output and log
ligands, _ = load_docked_ligands(out_file)
docked_complexes = [(protein_mol[1], ligand) for ligand in ligands]
scores = read_gnina_log(log_file)
if generate_scores:
return docked_complexes, scores
else:
return docked_complexes
class VinaPoseGenerator(PoseGenerator):
"""Uses Autodock Vina to generate binding poses.
This class uses Autodock Vina to make make predictions of
binding poses.
Example
-------
>> import deepchem as dc
>> vpg = dc.dock.VinaPoseGenerator(pocket_finder=None)
>> protein_file = '1jld_protein.pdb'
>> ligand_file = '1jld_ligand.sdf'
>> poses, scores = vpg.generate_poses(
.. (protein_file, ligand_file),
.. exhaustiveness=1,
.. num_modes=1,
.. out_dir=tmp,
.. generate_scores=True)
Note
----
This class requires RDKit and vina to be installed. As on 9-March-22,
Vina is not available on Windows. Hence, this utility is currently
available only on Ubuntu and MacOS.
"""
def __init__(self, pocket_finder: Optional[BindingPocketFinder] = None):
"""Initializes Vina Pose Generator
Parameters
----------
pocket_finder: BindingPocketFinder, optional (default None)
If specified should be an instance of
`dc.dock.BindingPocketFinder`.
"""
self.pocket_finder = pocket_finder
def generate_poses(
self,
molecular_complex: Tuple[str, str],
centroid: Optional[np.ndarray] = None,
box_dims: Optional[np.ndarray] = None,
exhaustiveness: int = 10,
num_modes: int = 9,
num_pockets: Optional[int] = None,
out_dir: Optional[str] = None,
generate_scores: Optional[bool] = False,
**kwargs) -> Union[Tuple[DOCKED_POSES, List[float]], DOCKED_POSES]:
"""Generates the docked complex and outputs files for docked complex.
Parameters
----------
molecular_complexes: Tuple[str, str]
A representation of a molecular complex. This tuple is
(protein_file, ligand_file). The protein should be a pdb file
and the ligand should be an sdf file.
centroid: np.ndarray, optional
The centroid to dock against. Is computed if not specified.
box_dims: np.ndarray, optional
A numpy array of shape `(3,)` holding the size of the box to dock. If not
specified is set to size of molecular complex plus 5 angstroms.
exhaustiveness: int, optional (default 10)
Tells Autodock Vina how exhaustive it should be with pose generation. A
higher value of exhaustiveness implies more computation effort for the
docking experiment.
num_modes: int, optional (default 9)
Tells Autodock Vina how many binding modes it should generate at
each invocation.
num_pockets: int, optional (default None)
If specified, `self.pocket_finder` must be set. Will only
generate poses for the first `num_pockets` returned by
`self.pocket_finder`.
out_dir: str, optional
If specified, write generated poses to this directory.
generate_score: bool, optional (default False)
If `True`, the pose generator will return scores for complexes.
This is used typically when invoking external docking programs
that compute scores.
kwargs:
The kwargs - cpu, min_rmsd, max_evals, energy_range supported by VINA
are as documented in https://autodock-vina.readthedocs.io/en/latest/vina.html
Returns
-------
Tuple[`docked_poses`, `scores`] or `docked_poses`
Tuple of `(docked_poses, scores)` or `docked_poses`. `docked_poses`
is a list of docked molecular complexes. Each entry in this list
contains a `(protein_mol, ligand_mol)` pair of RDKit molecules.
`scores` is a list of binding free energies predicted by Vina.
Raises
------
`ValueError` if `num_pockets` is set but `self.pocket_finder is None`.
"""
if "cpu" in kwargs:
cpu = kwargs["cpu"]
else:
cpu = 0
if "min_rmsd" in kwargs:
min_rmsd = kwargs["min_rmsd"]
else:
min_rmsd = 1.0
if "max_evals" in kwargs:
max_evals = kwargs["max_evals"]
else:
max_evals = 0
if "energy_range" in kwargs:
energy_range = kwargs["energy_range"]
else:
energy_range = 3.0
try:
from vina import Vina
except ModuleNotFoundError:
raise ImportError("This function requires vina to be installed")
if out_dir is None:
out_dir = tempfile.mkdtemp()
if num_pockets is not None and self.pocket_finder is None:
raise ValueError(
"If num_pockets is specified, pocket_finder must have been provided at construction time."
)
# Parse complex
if len(molecular_complex) > 2:
raise ValueError(
"Autodock Vina can only dock protein-ligand complexes and not more general molecular complexes."
)
(protein_file, ligand_file) = molecular_complex
# Prepare protein
protein_name = os.path.basename(protein_file).split(".")[0]
protein_hyd = os.path.join(out_dir, "%s_hyd.pdb" % protein_name)
protein_pdbqt = os.path.join(out_dir, "%s.pdbqt" % protein_name)
protein_mol = load_molecule(protein_file,
calc_charges=True,
add_hydrogens=True)
write_molecule(protein_mol[1], protein_hyd, is_protein=True)
write_molecule(protein_mol[1], protein_pdbqt, is_protein=True)
# Get protein centroid and range
if centroid is not None and box_dims is not None:
centroids = [centroid]
dimensions = [box_dims]
else:
if self.pocket_finder is None:
logger.info(
"Pockets not specified. Will use whole protein to dock")
centroids = [compute_centroid(protein_mol[0])]
dimensions = [compute_protein_range(protein_mol[0]) + 5.0]
else:
logger.info("About to find putative binding pockets")
pockets = self.pocket_finder.find_pockets(protein_file)
logger.info("%d pockets found in total" % len(pockets))
logger.info("Computing centroid and size of proposed pockets.")
centroids, dimensions = [], []
for pocket in pockets:
(x_min, x_max), (y_min, y_max), (
z_min,
z_max) = pocket.x_range, pocket.y_range, pocket.z_range
# TODO(rbharath: Does vina divide box dimensions by 2?
x_box = (x_max - x_min) / 2.
y_box = (y_max - y_min) / 2.
z_box = (z_max - z_min) / 2.
centroids.append(pocket.center())
dimensions.append(np.array((x_box, y_box, z_box)))
if num_pockets is not None:
logger.info(
"num_pockets = %d so selecting this many pockets for docking." %
num_pockets)
centroids = centroids[:num_pockets]
dimensions = dimensions[:num_pockets]
# Prepare ligand
ligand_name = os.path.basename(ligand_file).split(".")[0]
ligand_pdbqt = os.path.join(out_dir, "%s.pdbqt" % ligand_name)
ligand_mol = load_molecule(ligand_file,
calc_charges=True,
add_hydrogens=True)
write_molecule(ligand_mol[1], ligand_pdbqt)
docked_complexes = []
all_scores = []
vpg = Vina(sf_name='vina',
cpu=cpu,
seed=0,
no_refine=False,
verbosity=1)
for i, (protein_centroid,
box_dims) in enumerate(zip(centroids, dimensions)):
logger.info("Docking in pocket %d/%d" % (i + 1, len(centroids)))
logger.info("Docking with center: %s" % str(protein_centroid))
logger.info("Box dimensions: %s" % str(box_dims))
# Write Vina conf file
conf_file = os.path.join(out_dir, "conf.txt")
write_vina_conf(protein_pdbqt,
ligand_pdbqt,
protein_centroid,
box_dims,
conf_file,
num_modes=num_modes,
exhaustiveness=exhaustiveness)
# Define locations of output files
out_pdbqt = os.path.join(out_dir, "%s_docked.pdbqt" % ligand_name)
logger.info("About to call Vina")
vpg.set_receptor(protein_pdbqt)
vpg.set_ligand_from_file(ligand_pdbqt)
vpg.compute_vina_maps(center=protein_centroid, box_size=box_dims)
vpg.dock(exhaustiveness=exhaustiveness,
n_poses=num_modes,
min_rmsd=min_rmsd,
max_evals=max_evals)
vpg.write_poses(out_pdbqt,
n_poses=num_modes,
energy_range=energy_range,
overwrite=True)
ligands, scores = load_docked_ligands(out_pdbqt)
docked_complexes += [(protein_mol[1], ligand) for ligand in ligands]
all_scores += scores
if generate_scores:
return docked_complexes, all_scores
else:
return docked_complexes
<file_sep>"""
Implementation of the Ferminet class in pytorch
"""
from typing import List, Optional
# import torch.nn as nn
from rdkit import Chem
import numpy as np
from deepchem.utils.molecule_feature_utils import ALLEN_ELECTRONEGATIVTY
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.models.torch_models import TorchModel
from deepchem.models.losses import L2Loss
import deepchem.models.optimizers as optimizers
import torch
from deepchem.utils.electron_sampler import ElectronSampler
# TODO look for the loss function(Hamiltonian)
def test_f(x: np.ndarray) -> np.ndarray:
# dummy function which can be passed as the parameter f. f gives the log probability
# TODO replace this function with forward pass of the model in future
return 2 * np.log(np.random.uniform(low=0, high=1.0, size=np.shape(x)[0]))
class Ferminet(torch.nn.Module):
"""Approximates the log probability of the wave function of a molecule system using DNNs.
"""
def __init__(self,
nucleon_pos: torch.Tensor,
nuclear_charge: torch.Tensor,
spin: tuple,
inter_atom: torch.Tensor,
n_one: List = [256, 256, 256, 256],
n_two: List = [32, 32, 32, 32],
determinant: int = 16) -> None:
"""
Parameters:
-----------
nucleon_pos: torch.Tensor
Torch tensor containing nucleus information of the molecule
nuclear_charge: torch.Tensor
Torch tensor containing the number of electron for each atom in the molecule
spin: tuple
Tuple in the format of (up_spin, down_spin)
inter_atom: torch.Tensor
Torch tensor containing the pairwise distances between the atoms in the molecule
n_one: List
List of hidden units for the one-electron stream in each layer
n_two: List
List of hidden units for the two-electron stream in each layer
determinant: int
Number of determinants for the final solution
"""
super(Ferminet, self).__init__()
if len(n_one) != len(n_two):
raise ValueError(
"The number of layers in one-electron and two-electron stream should be equal"
)
else:
self.layers = len(n_one)
self.nucleon_pos = nucleon_pos
self.determinant = determinant
self.spin = spin
self.inter_atom = inter_atom
self.n_one = n_one
self.n_two = n_two
self.determinant = determinant
class FerminetModel(TorchModel):
"""A deep-learning based Variational Monte Carlo method [1]_ for calculating the ab-initio
solution of a many-electron system.
This model aims to calculate the ground state energy of a multi-electron system
using a baseline solution as the Hartree-Fock. An MCMC technique is used to sample
electrons and DNNs are used to caluclate the square magnitude of the wavefunction,
in which electron-electron repulsions also are included in the calculation(in the
form of Jastrow factor envelopes). The model requires only the nucleus' coordinates
as input.
This method is based on the following paper:
References
----------
.. [1] <NAME>., et al. Better, Faster Fermionic Neural Networks. arXiv:2011.07125, arXiv, 13 Nov. 2020. arXiv.org, http://arxiv.org/abs/2011.07125.
Note
----
This class requires pySCF to be installed.
"""
def __init__(
self,
nucleon_coordinates: List[List],
spin: int,
ion_charge: int,
seed: Optional[int] = None,
batch_no: int = 10,
pretrain=True,
):
"""
Parameters:
-----------
nucleon_coordinates: List[List]
A list containing nucleon coordinates as the values with the keys as the element's symbol.
spin: int
The total spin of the molecule system.
ion_charge: int
The total charge of the molecule system.
seed_no: int, optional (default None)
Random seed to use for electron initialization.
batch_no: int, optional (default 10)
Number of batches of the electron's positions to be initialized.
Attributes:
-----------
nucleon_pos: np.ndarray
numpy array value of nucleon_coordinates
electron_no: np.ndarray
Torch tensor containing electrons for each atom in the nucleus
molecule: ElectronSampler
ElectronSampler object which performs MCMC and samples electrons
"""
self.nucleon_coordinates = nucleon_coordinates
self.seed = seed
self.batch_no = batch_no
self.spin = spin
self.ion_charge = ion_charge
self.batch_no = batch_no
no_electrons = []
nucleons = []
electronegativity = []
table = Chem.GetPeriodicTable()
index = 0
for i in self.nucleon_coordinates:
atomic_num = table.GetAtomicNumber(i[0])
electronegativity.append([index, ALLEN_ELECTRONEGATIVTY[i[0]]])
no_electrons.append([atomic_num])
nucleons.append(i[1])
index += 1
self.electron_no: np.ndarray = np.array(no_electrons)
charge: np.ndarray = self.electron_no.reshape(
np.shape(self.electron_no)[0])
self.nucleon_pos: np.ndarray = np.array(nucleons)
electro_neg = np.array(electronegativity)
# Initialization for ionic molecules
if np.sum(self.electron_no) < self.ion_charge:
raise ValueError("Given charge is not initializable")
# Initialization for ionic molecules
if self.ion_charge != 0:
if len(nucleons
) == 1: # for an atom, directly the charge is applied
self.electron_no[0][0] -= self.ion_charge
else: # for a multiatomic molecule, the most electronegative atom gets a charge of -1 and vice versa. The remaining charges are assigned in terms of decreasing(for anionic charge) and increasing(for cationic charge) electronegativity.
electro_neg = electro_neg[electro_neg[:, 1].argsort()]
if self.ion_charge > 0:
for iter in range(self.ion_charge):
self.electron_no[int(electro_neg[iter][0])][0] -= 1
else:
for iter in range(-self.ion_charge):
self.electron_no[int(electro_neg[-1 - iter][0])][0] += 1
total_electrons = np.sum(self.electron_no)
if self.spin >= 0:
self.up_spin = (total_electrons + 2 * self.spin) // 2
self.down_spin = total_electrons - self.up_spin
else:
self.down_spin = (total_electrons - 2 * self.spin) // 2
self.up_spin = total_electrons - self.down_spin
if self.up_spin - self.down_spin != self.spin:
raise ValueError("Given spin is not feasible")
nucl = torch.from_numpy(self.nucleon_pos)
model = Ferminet(nucl,
spin=(self.up_spin, self.down_spin),
nuclear_charge=torch.tensor(charge),
inter_atom=torch.tensor(
compute_pairwise_distances(self.nucleon_pos,
self.nucleon_pos)))
self.molecule: ElectronSampler = ElectronSampler(
batch_no=self.batch_no,
central_value=self.nucleon_pos,
seed=self.seed,
f=lambda x: test_f(x), # Will be replaced in successive PR
steps=1000,
steps_per_update=20
) # sample the electrons using the electron sampler
self.molecule.gauss_initialize_position(
self.electron_no) # initialize the position of the electrons
adam = optimizers.AdamW()
super(FerminetModel, self).__init__(
model, optimizer=adam,
loss=L2Loss()) # will update the loss in successive PR
def prepare_hf_solution(self, x: np.ndarray) -> np.ndarray:
"""Prepares the HF solution for the molecule system which is to be used in pretraining
Parameters
----------
x: np.ndarray
Numpy array of shape (number of electrons,3), which indicates the sampled electron's positions
Returns
-------
hf_value: np.ndarray
Numpy array of shape (number of electrons, number of electrons ) where ith row & jth value corresponds to the ith hartree fock orbital at the jth electron's coordinate
"""
try:
import pyscf
except ModuleNotFoundError:
raise ImportError("This module requires pySCF")
molecule = ""
for i in range(len(self.nucleon_pos)):
molecule = molecule + self.nucleon_coordinates[i][0] + " " + str(
self.nucleon_coordinates[i][1][0]) + " " + str(
self.nucleon_coordinates[i][1][1]) + " " + str(
self.nucleon_coordinates[i][1][2]) + ";"
mol = pyscf.gto.Mole(atom=molecule, basis='sto-3g')
mol.parse_arg = False
mol.unit = 'Bohr'
mol.spin = (self.up_spin - self.down_spin)
mol.charge = self.ion_charge
mol.build(parse_arg=False)
mf = pyscf.scf.RHF(mol)
mf.kernel()
coefficients_all = mf.mo_coeff[:, :mol.nelectron]
# Get the positions of all the electrons
electron_positions = mol.atom_coords()[:mol.nelectron]
# Evaluate all molecular orbitals at the positions of all the electrons
orbital_values = np.dot(mol.eval_gto("GTOval", electron_positions),
coefficients_all)
return orbital_values
<file_sep>"""
ChEMBL dataset loader, for training ChemNet
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
CHEMBL25_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/chembl_25.csv.gz"
CHEMBL25_TASKS = [
"MolWt", "HeavyAtomMolWt", "MolLogP", "MolMR", "TPSA", "LabuteASA",
"HeavyAtomCount", "NHOHCount", "NOCount", "NumHAcceptors", "NumHDonors",
"NumHeteroatoms", "NumRotatableBonds", "NumRadicalElectrons",
"NumValenceElectrons", "NumAromaticRings", "NumSaturatedRings",
"NumAliphaticRings", "NumAromaticCarbocycles", "NumSaturatedCarbocycles",
"NumAliphaticCarbocycles", "NumAromaticHeterocycles",
"NumSaturatedHeterocycles", "NumAliphaticHeterocycles", "PEOE_VSA1",
"PEOE_VSA2", "PEOE_VSA3", "PEOE_VSA4", "PEOE_VSA5", "PEOE_VSA6",
"PEOE_VSA7", "PEOE_VSA8", "PEOE_VSA9", "PEOE_VSA10", "PEOE_VSA11",
"PEOE_VSA12", "PEOE_VSA13", "PEOE_VSA14", "SMR_VSA1", "SMR_VSA2",
"SMR_VSA3", "SMR_VSA4", "SMR_VSA5", "SMR_VSA6", "SMR_VSA7", "SMR_VSA8",
"SMR_VSA9", "SMR_VSA10", "SlogP_VSA1", "SlogP_VSA2", "SlogP_VSA3",
"SlogP_VSA4", "SlogP_VSA5", "SlogP_VSA6", "SlogP_VSA7", "SlogP_VSA8",
"SlogP_VSA9", "SlogP_VSA10", "SlogP_VSA11", "SlogP_VSA12", "EState_VSA1",
"EState_VSA2", "EState_VSA3", "EState_VSA4", "EState_VSA5", "EState_VSA6",
"EState_VSA7", "EState_VSA8", "EState_VSA9", "EState_VSA10", "EState_VSA11",
"VSA_EState1", "VSA_EState2", "VSA_EState3", "VSA_EState4", "VSA_EState5",
"VSA_EState6", "VSA_EState7", "VSA_EState8", "VSA_EState9", "VSA_EState10",
"BalabanJ", "BertzCT", "Ipc", "Kappa1", "Kappa2", "Kappa3", "HallKierAlpha",
"Chi0", "Chi1", "Chi0n", "Chi1n", "Chi2n", "Chi3n", "Chi4n", "Chi0v",
"Chi1v", "Chi2v", "Chi3v", "Chi4v"
]
class _Chembl25Loader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "chembl_25.csv.gz")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=CHEMBL25_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_chembl25(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Loads the ChEMBL25 dataset, featurizes it, and does a split.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
loader = _Chembl25Loader(featurizer, splitter, transformers, CHEMBL25_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('chembl25', reload)
<file_sep>"""
Test featurizers for inorganic crystals.
"""
import unittest
import numpy as np
from deepchem.feat import ElementPropertyFingerprint, SineCoulombMatrix, CGCNNFeaturizer, ElemNetFeaturizer
class TestMaterialFeaturizers(unittest.TestCase):
"""
Test material featurizers.
"""
def setUp(self):
"""
Set up tests.
"""
self.formula = 'MoS2'
self.struct_dict = {
'@module':
'pymatgen.core.structure',
'@class':
'Structure',
'charge':
None,
'lattice': {
'matrix': [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
'a': 1.0,
'b': 1.0,
'c': 1.0,
'alpha': 90.0,
'beta': 90.0,
'gamma': 90.0,
'volume': 1.0
},
'sites': [{
'species': [{
'element': 'Fe',
'occu': 1
}],
'abc': [0.0, 0.0, 0.0],
'xyz': [0.0, 0.0, 0.0],
'label': 'Fe',
'properties': {}
}]
}
def test_element_property_fingerprint(self):
"""
Test Element Property featurizer.
"""
featurizer = ElementPropertyFingerprint(data_source='matminer')
features = featurizer.featurize([self.formula])
assert len(features[0]) == 65
assert np.allclose(features[0][:5],
[2.16, 2.58, 0.42, 2.44, 0.29698485],
atol=0.1)
def test_sine_coulomb_matrix(self):
"""
Test SCM featurizer.
"""
featurizer = SineCoulombMatrix(max_atoms=3)
features = featurizer.featurize([self.struct_dict])
assert len(features) == 1
assert features.shape == (1, 3)
assert np.isclose(features[0][0], 1244, atol=.5)
def test_cgcnn_featurizer(self):
"""
Test CGCNNFeaturizer.
"""
featurizer = CGCNNFeaturizer(radius=3.0, max_neighbors=6, step=0.3)
graph_features = featurizer.featurize([self.struct_dict])
assert graph_features[0].num_nodes == 1
assert graph_features[0].num_edges == 6
assert graph_features[0].node_features.shape == (1, 92)
assert graph_features[0].edge_index.shape == (2, 6)
assert graph_features[0].edge_features.shape == (6, 11)
def test_elemnet_featurizer(self):
"""
Test ElemNetFeaturizer.
"""
featurizer = ElemNetFeaturizer()
features = featurizer.featurize([self.formula])
assert features.shape[1] == 86
assert np.isclose(features[0][13], 0.6666667, atol=0.01)
assert np.isclose(features[0][38], 0.33333334, atol=0.01)
assert np.isclose(features.sum(), 1.0, atol=0.01)
<file_sep>"""
Train low-data Sider models with random forests. Test last fold only.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import tempfile
import numpy as np
import deepchem as dc
from datasets import load_sider_ecfp
from sklearn.ensemble import RandomForestClassifier
# 4-fold splits
K = 4
# num positive/negative ligands
n_pos = 10
n_neg = 10
n_trials = 20
sider_tasks, dataset, transformers = load_sider_ecfp()
# Define metric
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode="classification")
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
train_folds = fold_datasets[:-1]
train_dataset = dc.splits.merge_fold_datasets(train_folds)
test_dataset = fold_datasets[-1]
# Get supports on test-set
support_generator = dc.data.SupportGenerator(
test_dataset, n_pos, n_neg, n_trials)
# Compute accuracies
task_scores = {task: [] for task in range(len(test_dataset.get_task_names()))}
for (task, support) in support_generator:
# Train model on support
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=100)
model = dc.models.SklearnModel(sklearn_model)
model.fit(support)
# Test model
task_dataset = dc.data.get_task_dataset_minus_support(
test_dataset, support, task)
y_pred = model.predict_proba(task_dataset)
score = metric.compute_metric(
task_dataset.y, y_pred, task_dataset.w)
print("Score on task %s is %s" % (str(task), str(score)))
task_scores[task].append(score)
# Join information for all tasks.
mean_task_scores = {}
std_task_scores = {}
for task in range(len(test_dataset.get_task_names())):
mean_task_scores[task] = np.mean(np.array(task_scores[task]))
std_task_scores[task] = np.std(np.array(task_scores[task]))
print("Mean scores")
print(mean_task_scores)
print("Standard Deviations")
print(std_task_scores)
print("Median of Mean Scores")
print(np.median(np.array(mean_task_scores.values())))
<file_sep># flake8: noqa
try:
from deepchem.metalearning.maml import MAML, MetaLearner
except ModuleNotFoundError:
pass
<file_sep>"""
KAGGLE dataset loader.
"""
import os
import logging
import time
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
from deepchem.utils import remove_missing_entries
logger = logging.getLogger(__name__)
def get_transformers(train_dataset):
"""Get transformers applied to datasets."""
transformers = []
# transformers = [
# deepchem.trans.LogTransformer(transform_X=True),
# deepchem.trans.NormalizationTransformer(transform_y=True,
# dataset=train_dataset)]
return transformers
# Set shard size low to avoid memory problems.
def gen_kaggle(KAGGLE_tasks,
train_dir,
valid_dir,
test_dir,
data_dir,
shard_size=2000):
"""Load KAGGLE datasets. Does not do train/test split"""
# TIMING
time1 = time.time()
# TIMING
# Set some global variables up top
train_files = os.path.join(
data_dir, "KAGGLE_training_disguised_combined_full.csv.gz")
valid_files = os.path.join(data_dir,
"KAGGLE_test1_disguised_combined_full.csv.gz")
test_files = os.path.join(data_dir,
"KAGGLE_test2_disguised_combined_full.csv.gz")
if not os.path.exists(train_files):
deepchem.utils.data_utils.download_url(
"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/KAGGLE_training_disguised_combined_full.csv.gz",
dest_dir=data_dir)
deepchem.utils.data_utils.download_url(
"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/KAGGLE_test1_disguised_combined_full.csv.gz",
dest_dir=data_dir)
deepchem.utils.data_utils.download_url(
"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/KAGGLE_test2_disguised_combined_full.csv.gz",
dest_dir=data_dir)
# Featurize KAGGLE dataset
logger.info("About to featurize KAGGLE dataset.")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(tasks=KAGGLE_tasks,
id_field="Molecule",
featurizer=featurizer)
logger.info("Featurizing train datasets")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
logger.info("Featurizing valid datasets")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
logger.info("Featurizing test datasets")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
logger.info("Remove missing entries from datasets.")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
logger.info("Shuffling order of train dataset.")
train_dataset.sparse_shuffle()
logger.info("Transforming datasets with transformers.")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info("Performing transformations with %s" %
transformer.__class__.__name__)
logger.info("Transforming datasets")
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
logger.info("Moving directories")
train_dataset.move(train_dir)
valid_dataset.move(valid_dir)
test_dataset.move(test_dir)
# TIMING
time2 = time.time()
logger.info("TIMING: KAGGLE fitting took %0.3f s" % (time2 - time1))
# TIMING
return train_dataset, valid_dataset, test_dataset
def load_kaggle(shard_size=2000, featurizer=None, split=None, reload=True):
"""Loads kaggle datasets. Generates if not stored already.
The Kaggle dataset is an in-house dataset from Merck that was first introduced in the following paper:
<NAME>, et al. "Deep neural nets as a method for quantitative structure–activity relationships." Journal of chemical information and modeling 55.2 (2015): 263-274.
It contains 100,000 unique Merck in-house compounds that were
measured on 15 enzyme inhibition and ADME/TOX datasets.
Unlike most of the other datasets featured in MoleculeNet,
the Kaggle collection does not have structures for the
compounds tested since they were proprietary Merck compounds.
However, the collection does feature pre-computed descriptors
for these compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
Parameters
----------
shard_size: int, optional
Size of the DiskDataset shards to write on disk
featurizer: optional
Ignored since featurization pre-computed
split: optional
Ignored since split pre-computed
reload: bool, optional
Whether to automatically re-load from disk
"""
KAGGLE_tasks = [
'3A4', 'CB1', 'DPP4', 'HIVINT', 'HIV_PROT', 'LOGD', 'METAB', 'NK1',
'OX1', 'OX2', 'PGP', 'PPB', 'RAT_F', 'TDI', 'THROMBIN'
]
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "kaggle")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = \
gen_kaggle(KAGGLE_tasks, train_dir, valid_dir, test_dir, data_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return KAGGLE_tasks, (train_dataset, valid_dataset,
test_dataset), transformers
<file_sep>Layers
======
Deep learning models are often said to be made up of "layers".
Intuitively, a "layer" is a function which transforms some
tensor into another tensor. DeepChem maintains an extensive
collection of layers which perform various useful scientific
transformations. For now, most layers are Keras only but over
time we expect this support to expand to other types of models
and layers.
.. include:: layers_cheatsheet.rst
Keras Layers
------------
.. autoclass:: deepchem.models.layers.InteratomicL2Distances
:members:
.. autoclass:: deepchem.models.layers.GraphConv
:members:
.. autoclass:: deepchem.models.layers.GraphPool
:members:
.. autoclass:: deepchem.models.layers.GraphGather
:members:
.. autoclass:: deepchem.models.layers.MolGANConvolutionLayer
:members:
.. autoclass:: deepchem.models.layers.MolGANAggregationLayer
:members:
.. autoclass:: deepchem.models.layers.MolGANMultiConvolutionLayer
:members:
.. autoclass:: deepchem.models.layers.MolGANEncoderLayer
:members:
.. autoclass:: deepchem.models.layers.LSTMStep
:members:
.. autoclass:: deepchem.models.layers.AttnLSTMEmbedding
:members:
.. autoclass:: deepchem.models.layers.IterRefLSTMEmbedding
:members:
.. autoclass:: deepchem.models.layers.SwitchedDropout
:members:
.. autoclass:: deepchem.models.layers.WeightedLinearCombo
:members:
.. autoclass:: deepchem.models.layers.CombineMeanStd
:members:
.. autoclass:: deepchem.models.layers.Stack
:members:
.. autoclass:: deepchem.models.layers.VinaFreeEnergy
:members:
.. autoclass:: deepchem.models.layers.NeighborList
:members:
.. autoclass:: deepchem.models.layers.AtomicConvolution
:members:
.. autoclass:: deepchem.models.layers.AlphaShareLayer
:members:
.. autoclass:: deepchem.models.layers.SluiceLoss
:members:
.. autoclass:: deepchem.models.layers.BetaShare
:members:
.. autoclass:: deepchem.models.layers.ANIFeat
:members:
.. autoclass:: deepchem.models.layers.GraphEmbedPoolLayer
:members:
.. autoclass:: deepchem.models.layers.GraphCNN
:members:
.. autoclass:: deepchem.models.layers.Highway
:members:
.. autoclass:: deepchem.models.layers.WeaveLayer
:members:
.. autoclass:: deepchem.models.layers.WeaveGather
:members:
.. autoclass:: deepchem.models.layers.DTNNEmbedding
:members:
.. autoclass:: deepchem.models.layers.DTNNStep
:members:
.. autoclass:: deepchem.models.layers.DTNNGather
:members:
.. autoclass:: deepchem.models.layers.DAGLayer
:members:
.. autoclass:: deepchem.models.layers.DAGGather
:members:
.. autoclass:: deepchem.models.layers.MessagePassing
:members:
.. autoclass:: deepchem.models.layers.EdgeNetwork
:members:
.. autoclass:: deepchem.models.layers.GatedRecurrentUnit
:members:
.. autoclass:: deepchem.models.layers.SetGather
:members:
Torch Layers
------------
.. autoclass:: deepchem.models.torch_models.layers.MultilayerPerceptron
:members:
.. autoclass:: deepchem.models.torch_models.layers.CNNModule
:members:
.. autoclass:: deepchem.models.torch_models.layers.ScaleNorm
:members:
.. autoclass:: deepchem.models.torch_models.layers.MATEncoderLayer
:members:
.. autoclass:: deepchem.models.torch_models.layers.MultiHeadedMATAttention
:members:
.. autoclass:: deepchem.models.torch_models.layers.SublayerConnection
:members:
.. autoclass:: deepchem.models.torch_models.layers.PositionwiseFeedForward
:members:
.. autoclass:: deepchem.models.torch_models.layers.MATEmbedding
:members:
.. autoclass:: deepchem.models.torch_models.layers.MATGenerator
:members:
.. autofunction:: deepchem.models.layers.cosine_dist
.. autoclass:: deepchem.models.torch_models.layers.GraphNetwork
:members:
.. autoclass:: deepchem.models.torch_models.layers.Affine
:members:
.. autoclass:: deepchem.models.torch_models.layers.RealNVPLayer
:members:
.. autoclass:: deepchem.models.torch_models.layers.DMPNNEncoderLayer
:members:
.. autoclass:: deepchem.models.torch_models.InfoGraphEncoder
:members:
.. autoclass:: deepchem.models.torch_models.GINEncoder
:members:
.. autoclass:: deepchem.models.torch_models.layers.SetGather
:members:
.. autoclass:: deepchem.models.torch_models.gnn.GNN
:members:
.. autoclass:: deepchem.models.torch_models.gnn.GNNHead
:members:
.. autoclass:: deepchem.models.torch_models.gnn.LocalGlobalDiscriminator
:members:
.. autoclass:: deepchem.models.torch_models.pna_gnn.AtomEncoder
:members:
.. autoclass:: deepchem.models.torch_models.pna_gnn.BondEncoder
:members:
.. autoclass:: deepchem.models.torch_models.pna_gnn.PNALayer
:members:
.. autoclass:: deepchem.models.torch_models.pna_gnn.PNAGNN
:members:
.. autoclass:: deepchem.models.torch_models.PNA
:members:
.. autoclass:: deepchem.models.torch_models.gnn3d.Net3DLayer
:members:
.. autoclass:: deepchem.models.torch_models.gnn3d.Net3D
:members:
.. autoclass:: deepchem.models.torch_models.layers.DTNNEmbedding
:members:
.. autoclass:: deepchem.models.torch_models.layers.DTNNStep
:members:
.. autoclass:: deepchem.models.torch_models.layers.DTNNGather
:members:
.. autoclass:: deepchem.models.torch_models.layers.MolGANConvolutionLayer
:members:
.. autoclass:: deepchem.models.torch_models.layers.MolGANAggregationLayer
:members:
.. autoclass:: deepchem.models.torch_models.layers.MolGANMultiConvolutionLayer
:members:
.. autoclass:: deepchem.models.torch_models.layers.MolGANEncoderLayer
:members:
.. autoclass:: deepchem.models.torch_models.layers.EdgeNetwork
:members:
.. autoclass:: deepchem.models.torch_models.layers.WeaveLayer
:members:
.. autoclass:: deepchem.models.torch_models.layers.WeaveGather
:members:
.. autoclass:: deepchem.models.torch_models.layers.MXMNetGlobalMessagePassing
:members:
.. autoclass:: deepchem.models.torch_models.layers.MXMNetBesselBasisLayer
:members:
.. autoclass:: deepchem.models.torch_models.dtnn.DTNN
:members:
.. autoclass:: deepchem.models.torch_models.layers.EncoderRNN
:members:
.. autoclass:: deepchem.models.torch_models.layers.FerminetElectronFeature
:members:
.. autoclass:: deepchem.models.torch_models.layers.FerminetEnvelope
:members:
.. autoclass:: deepchem.models.torch_models.layers.MXMNetLocalMessagePassing
:members:
Grover Layers
^^^^^^^^^^^^^
The following layers are used for implementing GROVER model as described in the paper `<Self-Supervised Graph Transformer on Large-Scale Molecular Data <https://drug.ai.tencent.com/publications/GROVER.pdf>_`
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverMPNEncoder
:members:
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverAttentionHead
:members:
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverMTBlock
:members:
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverTransEncoder
:members:
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverEmbedding
:members:
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverEmbedding
:members:
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverAtomVocabPredictor
:members:
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverBondVocabPredictor
:members:
.. autoclass:: deepchem.models.torch_models.grover_layers.GroverFunctionalGroupPredictor
:members:
.. autoclass:: deepchem.models.torch_models.grover.GroverPretrain
:members:
.. autoclass:: deepchem.models.torch_models.grover.GroverFinetune
:members:
Attention Layers
^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.models.torch_models.attention.ScaledDotProductAttention
:members:
.. autoclass:: deepchem.models.torch_models.attention.SelfAttention
:members:
Readout Layers
^^^^^^^^^^^^^^
.. autoclass:: deepchem.models.torch_models.readout.GroverReadout
:members:
Jax Layers
----------
.. autoclass:: deepchem.models.jax_models.layers.Linear
:members:
Density Functional Theory Layers
--------------------------------
.. autoclass:: deepchem.models.dft.nnxc.BaseNNXC
:members:
.. autoclass:: deepchem.models.dft.nnxc.NNLDA
:members:
.. autoclass:: deepchem.models.dft.nnxc.NNPBE
:members:
.. autoclass:: deepchem.models.dft.nnxc.HybridXC
:members:
.. autoclass:: deepchem.models.dft.scf.XCNNSCF
:members:
.. autoclass:: deepchem.models.dft.dftxc.DFTXC
:members:
<file_sep>"""
UV Dataset loader
"""
import os
import logging
import time
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
from deepchem.molnet.load_function.uv_tasks import UV_tasks
from deepchem.utils import remove_missing_entries
logger = logging.getLogger(__name__)
TRAIN_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/UV_training_disguised_combined_full.csv.gz"
VALID_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/UV_test1_disguised_combined_full.csv.gz"
TEST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/UV_test2_disguised_combined_full.csv.gz"
TRAIN_FILENAME = "UV_training_disguised_combined_full.csv.gz"
VALID_FILENAME = "UV_test1_disguised_combined_full.csv.gz"
TEST_FILENAME = "UV_test2_disguised_combined_full.csv.gz"
def get_transformers(train_dataset):
"Gets transformations applied on the dataset"
transformers = list()
return transformers
def gen_uv(UV_tasks, data_dir, train_dir, valid_dir, test_dir, shard_size=2000):
"""Loading the UV dataset; does not do train/test split"""
time1 = time.time()
train_files = os.path.join(data_dir, TRAIN_FILENAME)
valid_files = os.path.join(data_dir, VALID_FILENAME)
test_files = os.path.join(data_dir, TEST_FILENAME)
# Download files if they don't exist
if not os.path.exists(train_files):
logger.info("Downloading training file...")
deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)
logger.info("Training file download complete.")
logger.info("Downloading validation file...")
deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)
logger.info("Validation file download complete.")
logger.info("Downloading test file...")
deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)
logger.info("Test file download complete")
# Featurizing datasets
logger.info("About to featurize UV dataset.")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(tasks=UV_tasks,
id_field="Molecule",
featurizer=featurizer)
logger.info("Featurizing train datasets...")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
logger.info("Featurizing validation datasets...")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
logger.info("Featurizing test datasets....")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
# Missing entry removal
logger.info("Removing missing entries from dataset.")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
# Shuffle the training data
logger.info("Shuffling the training dataset")
train_dataset.sparse_shuffle()
# Apply transformations
logger.info("Starting transformations")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info("Performing transformations with {}".format(
transformer.__class__.__name__))
logger.info("Transforming the training dataset...")
train_dataset = transformer.transform(train_dataset)
logger.info("Transforming the validation dataset...")
valid_dataset = transformer.transform(valid_dataset)
logger.info("Transforming the test dataset...")
test_dataset = transformer.transform(test_dataset)
logger.info("Transformations complete.")
logger.info("Moving datasets to corresponding directories")
train_dataset.move(train_dir)
logger.info("Train dataset moved.")
valid_dataset.move(valid_dir)
logger.info("Validation dataset moved.")
test_dataset.move(test_dir)
logger.info("Test dataset moved.")
time2 = time.time()
# TIMING
logger.info("TIMING: UV fitting took %0.3f s" % (time2 - time1))
return train_dataset, valid_dataset, test_dataset
def load_uv(shard_size=2000, featurizer=None, split=None, reload=True):
"""Load UV dataset; does not do train/test split
The UV dataset is an in-house dataset from Merck that was first introduced in the following paper:
Ramsundar, Bharath, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
The UV dataset tests 10,000 of Merck's internal compounds on
190 absorption wavelengths between 210 and 400 nm. Unlike
most of the other datasets featured in MoleculeNet, the UV
collection does not have structures for the compounds tested
since they were proprietary Merck compounds. However, the
collection does feature pre-computed descriptors for these
compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
Parameters
----------
shard_size: int, optional
Size of the DiskDataset shards to write on disk
featurizer: optional
Ignored since featurization pre-computed
split: optional
Ignored since split pre-computed
reload: bool, optional
Whether to automatically re-load from disk
"""
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "UV")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = gen_uv(
UV_tasks=UV_tasks,
data_dir=data_dir,
train_dir=train_dir,
valid_dir=valid_dir,
test_dir=test_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return UV_tasks, (train_dataset, valid_dataset, test_dataset), transformers
<file_sep>import torch
import torch.nn as nn
import numpy as np
from deepchem.models.torch_models import layers
from deepchem.models.torch_models.torch_model import TorchModel
from deepchem.models.losses import L2Loss
from typing import Any
class MAT(nn.Module):
"""An internal TorchModel class.
In this class, we define the various layers and establish a sequential model for the Molecular Attention Transformer.
We also define the forward call of this model in the forward function.
References
----------
.. [1] <NAME> et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> import deepchem as dc
>>> import pandas as pd
>>> import numpy as np
>>> smiles = ['CC', 'CCC', 'CCCC', 'CCCCC', 'CCCCCCC']
>>> vals = [1.35, 6.72, 5.67, 1.23, 1.76]
>>> df = pd.DataFrame(list(zip(smiles, vals)), columns = ['smiles', 'y'])
>>> loader = dc.data.CSVLoader(tasks=['y'], feature_field='smiles', featurizer=dc.feat.MATFeaturizer())
>>> df.to_csv('test.csv')
>>> dataset = loader.create_dataset('test.csv')
>>> model = dc.models.torch_models.MAT()
>>> # To simulate input data, we will generate matrices for a single molecule.
>>> vals = dataset.X[0]
>>> node = vals.node_features
>>> adj = vals.adjacency_matrix
>>> dist = vals.distance_matrix
>>> # We will now utilize a helper function defined in MATModel to get our matrices ready, and convert them into a batch consisting of a single molecule.
>>> helper = dc.models.torch_models.MATModel()
>>> node_features = helper.pad_sequence(torch.tensor(node).unsqueeze(0).float())
>>> adjacency = helper.pad_sequence(torch.tensor(adj).unsqueeze(0).float())
>>> distance = helper.pad_sequence(torch.tensor(dist).unsqueeze(0).float())
>>> inputs = [node_features, adjacency, distance]
>>> inputs = [x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs]
>>> # Get the forward call of the model for this batch.
>>> output = model(inputs)
"""
def __init__(self,
dist_kernel: str = 'softmax',
n_encoders=8,
lambda_attention: float = 0.33,
lambda_distance: float = 0.33,
h: int = 16,
sa_hsize: int = 1024,
sa_dropout_p: float = 0.0,
output_bias: bool = True,
d_input: int = 1024,
d_hidden: int = 1024,
d_output: int = 1024,
activation: str = 'leakyrelu',
n_layers: int = 1,
ff_dropout_p: float = 0.0,
encoder_hsize: int = 1024,
encoder_dropout_p: float = 0.0,
embed_input_hsize: int = 36,
embed_dropout_p: float = 0.0,
gen_aggregation_type: str = 'mean',
gen_dropout_p: float = 0.0,
gen_n_layers: int = 1,
gen_attn_hidden: int = 128,
gen_attn_out: int = 4,
gen_d_output: int = 1,
**kwargs):
"""
Initialization for the internal MAT class.
Parameters
----------
dist_kernel: str
Kernel activation to be used. Can be either 'softmax' for softmax or 'exp' for exponential, for the self-attention layer.
n_encoders: int
Number of encoder layers in the encoder block.
lambda_attention: float
Constant to be multiplied with the attention matrix in the self-attention layer.
lambda_distance: float
Constant to be multiplied with the distance matrix in the self-attention layer.
h: int
Number of attention heads for the self-attention layer.
sa_hsize: int
Size of dense layer in the self-attention layer.
sa_dropout_p: float
Dropout probability for the self-attention layer.
output_bias: bool
If True, dense layers will use bias vectors in the self-attention layer.
d_input: int
Size of input layer in the feed-forward layer.
d_hidden: int
Size of hidden layer in the feed-forward layer. Will also be used as d_output for the MATEmbedding layer.
d_output: int
Size of output layer in the feed-forward layer.
activation: str
Activation function to be used in the feed-forward layer.
Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, 'elu' for ELU and 'linear' for linear activation.
n_layers: int
Number of layers in the feed-forward layer.
ff_dropout_p: float
Dropout probability in the feeed-forward layer.
encoder_hsize: int
Size of Dense layer for the encoder itself.
encoder_dropout_p: float
Dropout probability for connections in the encoder layer.
embed_input_hsize: int
Size of input layer for the MATEmbedding layer.
embed_dropout_p: float
Dropout probability for the MATEmbedding layer.
gen_aggregation_type: str
Type of aggregation to be used. Can be 'grover', 'mean' or 'contextual'.
gen_dropout_p: float
Dropout probability for the MATGenerator layer.
gen_n_layers: int
Number of layers in MATGenerator.
gen_attn_hidden: int
Size of hidden attention layer in the MATGenerator layer.
gen_attn_out: int
Size of output attention layer in the MATGenerator layer.
gen_d_output: int
Size of output layer in the MATGenerator layer.
"""
super(MAT, self).__init__()
self.embedding = layers.MATEmbedding(d_input=embed_input_hsize,
d_output=d_hidden,
dropout_p=embed_dropout_p)
self.encoder = nn.ModuleList([
layers.MATEncoderLayer(dist_kernel=dist_kernel,
lambda_attention=lambda_attention,
lambda_distance=lambda_distance,
h=h,
sa_hsize=sa_hsize,
sa_dropout_p=sa_dropout_p,
output_bias=output_bias,
d_input=d_input,
d_hidden=d_hidden,
d_output=d_output,
activation=activation,
n_layers=n_layers,
ff_dropout_p=ff_dropout_p,
encoder_hsize=encoder_hsize,
encoder_dropout_p=encoder_dropout_p)
for _ in range(n_encoders)
])
self.generator = layers.MATGenerator(
hsize=d_input,
aggregation_type=gen_aggregation_type,
d_output=gen_d_output,
n_layers=gen_n_layers,
dropout_p=gen_dropout_p,
attn_hidden=gen_attn_hidden,
attn_out=gen_attn_out)
def forward(self, data: np.ndarray, **kwargs):
node_features = torch.tensor(data[0]).float()
adjacency_matrix = torch.tensor(data[1]).float()
distance_matrix = torch.tensor(data[2]).float()
mask = torch.sum(torch.abs(node_features), dim=-1) != 0
output = self.embedding(node_features)
for layer in self.encoder:
output = layer(output, mask, adjacency_matrix, distance_matrix)
output = self.generator(output, mask)
return output
class MATModel(TorchModel):
"""Molecular Attention Transformer.
This class implements the Molecular Attention Transformer [1]_.
The MATFeaturizer (deepchem.feat.MATFeaturizer) is intended to work with this class.
The model takes a batch of MATEncodings (from MATFeaturizer) as input, and returns an array of size Nx1, where N is the number of molecules in the batch.
Each molecule is broken down into its Node Features matrix, adjacency matrix and distance matrix.
A mask tensor is calculated for the batch. All of this goes as input to the MATEmbedding, MATEncoder and MATGenerator layers, which are defined in deepchem.models.torch_models.layers.py
Currently, MATModel is intended to be a regression model for the freesolv dataset.
References
----------
.. [1] <NAME> et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> import deepchem as dc
>>> import pandas as pd
>>> smiles = ['CC', 'CCC', 'CCCC', 'CCCCC', 'CCCCCCC']
>>> vals = [1.35, 6.72, 5.67, 1.23, 1.76]
>>> df = pd.DataFrame(list(zip(smiles, vals)), columns = ['smiles', 'y'])
>>> loader = dc.data.CSVLoader(tasks=['y'], feature_field='smiles', featurizer=dc.feat.MATFeaturizer())
>>> df.to_csv('test.csv')
>>> dataset = loader.create_dataset('test.csv')
>>> model = dc.models.torch_models.MATModel(batch_size = 2)
>>> out = model.fit(dataset, nb_epoch = 1)
"""
def __init__(self,
dist_kernel: str = 'softmax',
n_encoders=8,
lambda_attention: float = 0.33,
lambda_distance: float = 0.33,
h: int = 16,
sa_hsize: int = 1024,
sa_dropout_p: float = 0.0,
output_bias: bool = True,
d_input: int = 1024,
d_hidden: int = 1024,
d_output: int = 1024,
activation: str = 'leakyrelu',
n_layers: int = 1,
ff_dropout_p: float = 0.0,
encoder_hsize: int = 1024,
encoder_dropout_p: float = 0.0,
embed_input_hsize: int = 36,
embed_dropout_p: float = 0.0,
gen_aggregation_type: str = 'mean',
gen_dropout_p: float = 0.0,
gen_n_layers: int = 1,
gen_attn_hidden: int = 128,
gen_attn_out: int = 4,
gen_d_output: int = 1,
**kwargs):
"""The wrapper class for the Molecular Attention Transformer.
Since we are using a custom data class as input (MATEncoding), we have overriden the default_generator function from DiskDataset and customized it to work with a batch of MATEncoding classes.
Parameters
----------
dist_kernel: str
Kernel activation to be used. Can be either 'softmax' for softmax or 'exp' for exponential, for the self-attention layer.
n_encoders: int
Number of encoder layers in the encoder block.
lambda_attention: float
Constant to be multiplied with the attention matrix in the self-attention layer.
lambda_distance: float
Constant to be multiplied with the distance matrix in the self-attention layer.
h: int
Number of attention heads for the self-attention layer.
sa_hsize: int
Size of dense layer in the self-attention layer.
sa_dropout_p: float
Dropout probability for the self-attention layer.
output_bias: bool
If True, dense layers will use bias vectors in the self-attention layer.
d_input: int
Size of input layer in the feed-forward layer.
d_hidden: int
Size of hidden layer in the feed-forward layer. Will also be used as d_output for the MATEmbedding layer.
d_output: int
Size of output layer in the feed-forward layer.
activation: str
Activation function to be used in the feed-forward layer.
Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, 'elu' for ELU and 'linear' for linear activation.
n_layers: int
Number of layers in the feed-forward layer.
ff_dropout_p: float
Dropout probability in the feeed-forward layer.
encoder_hsize: int
Size of Dense layer for the encoder itself.
encoder_dropout_p: float
Dropout probability for connections in the encoder layer.
embed_input_hsize: int
Size of input layer for the MATEmbedding layer.
embed_dropout_p: float
Dropout probability for the MATEmbedding layer.
gen_aggregation_type: str
Type of aggregation to be used. Can be 'grover', 'mean' or 'contextual'.
gen_dropout_p: float
Dropout probability for the MATGenerator layer.
gen_n_layers: int
Number of layers in MATGenerator.
gen_attn_hidden: int
Size of hidden attention layer in the MATGenerator layer.
gen_attn_out: int
Size of output attention layer in the MATGenerator layer.
gen_d_output: int
Size of output layer in the MATGenerator layer.
"""
model = MAT(dist_kernel=dist_kernel,
n_encoders=n_encoders,
lambda_attention=lambda_attention,
lambda_distance=lambda_distance,
h=h,
sa_hsize=sa_hsize,
sa_dropout_p=sa_dropout_p,
output_bias=output_bias,
d_input=d_input,
d_hidden=d_hidden,
d_output=d_output,
activation=activation,
n_layers=n_layers,
ff_dropout_p=ff_dropout_p,
encoder_hsize=encoder_hsize,
encoder_dropout_p=encoder_dropout_p,
embed_input_hsize=embed_input_hsize,
embed_dropout_p=embed_dropout_p,
gen_aggregation_type=gen_aggregation_type,
gen_dropout_p=gen_dropout_p,
gen_n_layers=gen_n_layers,
gen_attn_hidden=gen_attn_hidden,
gen_attn_out=gen_attn_out,
gen_d_output=gen_d_output)
loss = L2Loss()
output_types = ['prediction']
super(MATModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
def pad_array(self, array: np.ndarray, shape: Any) -> np.ndarray:
"""
Pads an array to the desired shape.
Parameters
----------
array: np.ndarray
Array to be padded.
shape: int or Tuple
Shape the array is padded to.
Returns
----------
array: np.ndarray
Array padded to input shape.
"""
result = np.zeros(shape=shape)
slices = tuple(slice(s) for s in array.shape)
result[slices] = array
return result
def pad_sequence(self, sequence: np.ndarray) -> np.ndarray:
"""
Pads a given sequence using the pad_array function.
Parameters
----------
sequence: np.ndarray
Arrays in this sequence are padded to the largest shape in the sequence.
Returns
----------
array: np.ndarray
Sequence with padded arrays.
"""
shapes = np.stack([np.array(t.shape) for t in sequence])
max_shape = tuple(np.max(shapes, axis=0))
return np.stack([self.pad_array(t, shape=max_shape) for t in sequence])
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True,
**kwargs):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
node_features = self.pad_sequence(
[torch.tensor(data.node_features).float() for data in X_b])
adjacency_matrix = self.pad_sequence([
torch.tensor(data.adjacency_matrix).float() for data in X_b
])
distance_matrix = self.pad_sequence([
torch.tensor(data.distance_matrix).float() for data in X_b
])
inputs = [node_features, adjacency_matrix, distance_matrix]
yield (inputs, [y_b], [w_b])
<file_sep># Requirements - transformers, tokenizers
import os
import unittest
from unittest import TestCase
import pytest
try:
from transformers import RobertaForMaskedLM
from deepchem.feat.smiles_tokenizer import SmilesTokenizer
has_transformers = True
except:
has_transformers = False
class TestSmilesTokenizer(TestCase):
"""Tests the SmilesTokenizer to load the USPTO vocab file and a ChemBERTa Masked LM model with pre-trained weights.."""
@unittest.skipIf(not has_transformers, 'transformers are not installed')
@pytest.mark.torch
def test_tokenize(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
vocab_path = os.path.join(current_dir, 'data', 'vocab.txt')
tokenized_smiles = [
12, 16, 16, 16, 17, 16, 16, 18, 16, 19, 16, 17, 22, 19, 18, 33, 17,
16, 18, 23, 181, 17, 22, 19, 18, 17, 19, 16, 33, 20, 19, 55, 17, 16,
38, 23, 18, 17, 33, 17, 19, 18, 35, 20, 19, 18, 16, 20, 22, 16, 16,
22, 16, 21, 23, 20, 23, 22, 16, 23, 22, 16, 21, 23, 18, 19, 16, 20,
22, 16, 16, 22, 16, 16, 22, 16, 20, 13
]
model = RobertaForMaskedLM.from_pretrained(
'seyonec/SMILES_tokenized_PubChem_shard00_50k')
model.num_parameters()
tokenizer = SmilesTokenizer(
vocab_path, max_len=model.config.max_position_embeddings)
assert tokenized_smiles == tokenizer.encode(
"CCC(CC)COC(=O)[C@H](C)N[P@](=O)(OC[C@H]1O[C@](C#N)([C@H](O)[C@@H]1O)C1=CC=C2N1N=CN=C2N)OC1=CC=CC=C1"
)
<file_sep>"""
Contains implementations of layers used in ChemCeption and Smiles2Vec models.
"""
__author__ = "<NAME>"
__license__ = "MIT"
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Concatenate, ReLU, Add, MaxPool2D
class Stem(tf.keras.layers.Layer):
"""
Stem Layer as defined in https://arxiv.org/abs/1710.02238. The structure is
significantly altered from the original Inception-ResNet architecture,
(https://arxiv.org/abs/1602.07261) but the idea behind this layer is to
downsample the image as a preprocessing step for the Inception-ResNet layers,
and reduce computational complexity.
"""
def __init__(self, num_filters, **kwargs):
"""
Parameters
----------
num_filters: int,
Number of convolutional filters
"""
self.num_filters = num_filters
self._build_layer_components()
super(Stem, self).__init__(**kwargs)
def _build_layer_components(self):
"""Builds the layers components and set _layers attribute."""
self.conv_layer = Conv2D(filters=self.num_filters,
kernel_size=4,
strides=2,
activation=tf.nn.relu)
self.activation_layer = ReLU()
self._layers = [self.conv_layer, self.activation_layer]
def call(self, inputs):
"""Invoked when __call__ method of the layer is used."""
conv1 = self.conv_layer(inputs)
return self.activation_layer(conv1)
class InceptionResnetA(tf.keras.layers.Layer):
"""
Variant A of the three InceptionResNet layers described in
https://arxiv.org/abs/1710.02238. All variants use multiple
convolutional blocks with varying kernel sizes and number of filters. This
allows capturing patterns over different scales in the inputs. Residual
connections are additionally used and have been shown previously to improve
convergence and training in deep networks. A 1x1 convolution is used on the
concatenated feature maps from the different convolutional blocks, to ensure
shapes of inputs and feature maps are same for the residual connection.
"""
def __init__(self, num_filters, input_dim, **kwargs):
"""
Parameters
----------
num_filters: int,
Number of convolutional filters
input_dim: int,
Number of channels in the input.
"""
self.num_filters = num_filters
self.input_dim = input_dim
self._build_layer_components()
super(InceptionResnetA, self).__init__(**kwargs)
def _build_layer_components(self):
"""Builds the layers components and set _layers attribute."""
self.conv_block1 = [
Conv2D(self.num_filters,
kernel_size=(1, 1),
strides=1,
padding="same",
activation=tf.nn.relu)
]
self.conv_block2 = [
Conv2D(filters=self.num_filters,
kernel_size=(1, 1),
strides=1,
activation=tf.nn.relu,
padding="same")
]
self.conv_block2.append(
Conv2D(filters=self.num_filters,
kernel_size=(3, 3),
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block3 = [
Conv2D(filters=self.num_filters,
kernel_size=1,
strides=1,
activation=tf.nn.relu,
padding="same")
]
self.conv_block3.append(
Conv2D(filters=int(self.num_filters * 1.5),
kernel_size=(3, 3),
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block3.append(
Conv2D(filters=self.num_filters * 2,
kernel_size=(3, 3),
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block4 = [
Conv2D(filters=self.input_dim,
kernel_size=(1, 1),
strides=1,
padding="same")
]
self.concat_layer = Concatenate()
self.add_layer = Add()
self.activation_layer = ReLU()
self._layers = self.conv_block1 + self.conv_block2 + self.conv_block3 + self.conv_block4
self._layers.extend(
[self.concat_layer, self.add_layer, self.activation_layer])
def call(self, inputs):
"""Invoked when __call__ method of the layer is used."""
conv1 = inputs
for layer in self.conv_block1:
conv1 = layer(conv1)
conv2 = inputs
for layer in self.conv_block2:
conv2 = layer(conv2)
conv3 = inputs
for layer in self.conv_block3:
conv3 = layer(conv3)
concat_conv = self.concat_layer([conv1, conv2, conv3])
conv4 = concat_conv
for layer in self.conv_block4:
conv4 = layer(conv4)
output = self.add_layer([conv4, inputs])
return self.activation_layer(output)
class InceptionResnetB(tf.keras.layers.Layer):
"""
Variant B of the three InceptionResNet layers described in
https://arxiv.org/abs/1710.02238. All variants use multiple
convolutional blocks with varying kernel sizes and number of filters. This
allows capturing patterns over different scales in the inputs. Residual
connections are additionally used and have been shown previously to improve
convergence and training in deep networks. A 1x1 convolution is used on the
concatenated feature maps from the different convolutional blocks, to ensure
shapes of inputs and feature maps are same for the residual connection.
"""
def __init__(self, num_filters, input_dim, **kwargs):
"""
Parameters
----------
num_filters: int,
Number of convolutional filters
input_dim: int,
Number of channels in the input.
"""
self.num_filters = num_filters
self.input_dim = input_dim
self._build_layer_components()
super(InceptionResnetB, self).__init__(**kwargs)
def _build_layer_components(self):
"""Builds the layers components and set _layers attribute."""
self.conv_block1 = [
Conv2D(self.num_filters,
kernel_size=1,
strides=1,
padding="same",
activation=tf.nn.relu)
]
self.conv_block2 = [
Conv2D(filters=self.num_filters,
kernel_size=(1, 1),
strides=1,
activation=tf.nn.relu,
padding="same")
]
self.conv_block2.append(
Conv2D(filters=int(self.num_filters * 1.25),
kernel_size=(1, 7),
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block2.append(
Conv2D(filters=int(self.num_filters * 1.5),
kernel_size=(7, 1),
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block3 = [
Conv2D(filters=self.input_dim,
kernel_size=1,
strides=1,
padding="same")
]
self.concat_layer = Concatenate()
self.add_layer = Add()
self.activation_layer = ReLU()
self._layers = self.conv_block1 + self.conv_block2 + self.conv_block3
self._layers.extend(
[self.concat_layer, self.add_layer, self.activation_layer])
def call(self, inputs):
"""Invoked when __call__ method of the layer is used."""
conv1 = inputs
for layer in self.conv_block1:
conv1 = layer(conv1)
conv2 = inputs
for layer in self.conv_block2:
conv2 = layer(conv2)
concat_conv = self.concat_layer([conv1, conv2])
conv3 = concat_conv
for layer in self.conv_block3:
conv3 = layer(conv3)
output = self.add_layer([conv3, inputs])
output = self.activation_layer(output)
return output
class InceptionResnetC(tf.keras.layers.Layer):
"""
Variant C of the three InceptionResNet layers described in
https://arxiv.org/abs/1710.02238. All variants use multiple
convolutional blocks with varying kernel sizes and number of filters. This
allows capturing patterns over different scales in the inputs. Residual
connections are additionally used and have been shown previously to improve
convergence and training in deep networks. A 1x1 convolution is used on the
concatenated feature maps from the different convolutional blocks, to ensure
shapes of inputs and feature maps are same for the residual connection.
"""
def __init__(self, num_filters, input_dim, **kwargs):
"""
Parameters
----------
num_filters: int,
Number of convolutional filters
input_dim: int,
Number of channels in the input.
"""
self.num_filters = num_filters
self.input_dim = input_dim
self._build_layer_components()
super(InceptionResnetC, self).__init__(**kwargs)
def _build_layer_components(self):
"""Builds the layers components and set _layers attribute."""
self.conv_block1 = [
Conv2D(self.num_filters,
kernel_size=(1, 1),
strides=1,
padding="same",
activation=tf.nn.relu)
]
self.conv_block2 = [
Conv2D(filters=self.num_filters,
kernel_size=1,
strides=1,
activation=tf.nn.relu,
padding="same")
]
self.conv_block2.append(
Conv2D(filters=int(self.num_filters * 1.16),
kernel_size=(1, 3),
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block2.append(
Conv2D(filters=int(self.num_filters * 1.33),
kernel_size=(3, 1),
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block3 = [
Conv2D(filters=self.input_dim,
kernel_size=(1, 1),
strides=1,
padding="same")
]
self.concat_layer = Concatenate()
self.add_layer = Add()
self.activation_layer = ReLU()
self._layers = self.conv_block1 + self.conv_block2 + self.conv_block3
self._layers.extend(
[self.concat_layer, self.add_layer, self.activation_layer])
def call(self, inputs):
"""Invoked when __call__ method of the layer is used."""
conv1 = inputs
for layer in self.conv_block1:
conv1 = layer(conv1)
conv2 = inputs
for layer in self.conv_block2:
conv2 = layer(conv2)
concat_conv = self.concat_layer([conv1, conv2])
conv3 = concat_conv
for layer in self.conv_block3:
conv3 = layer(conv3)
output = self.add_layer([conv3, inputs])
output = self.activation_layer(output)
return output
class ReductionA(tf.keras.layers.Layer):
"""
Variant A of the two Reduction layers described in
https://arxiv.org/abs/1710.02238. All variants use multiple convolutional
blocks with varying kernel sizes and number of filters, to reduce the spatial
extent of the image and reduce computational complexity for downstream layers.
"""
def __init__(self, num_filters, **kwargs):
"""
Parameters
----------
num_filters: int,
Number of convolutional filters
"""
self.num_filters = num_filters
self._build_layer_components()
super(ReductionA, self).__init__(**kwargs)
def _build_layer_components(self):
"""Builds the layers components and set _layers attribute."""
self.max_pool1 = MaxPool2D(pool_size=(3, 3), strides=2, padding="valid")
self.conv_block1 = [
Conv2D(int(self.num_filters * 1.5),
kernel_size=(3, 3),
strides=2,
padding="valid",
activation=tf.nn.relu)
]
self.conv_block2 = [
Conv2D(filters=self.num_filters,
kernel_size=1,
strides=1,
activation=tf.nn.relu,
padding="same")
]
self.conv_block2.append(
Conv2D(filters=self.num_filters,
kernel_size=3,
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block2.append(
Conv2D(filters=int(self.num_filters * 1.5),
kernel_size=3,
strides=2,
activation=tf.nn.relu,
padding="valid"))
self.concat_layer = Concatenate()
self.activation_layer = ReLU()
self._layers = self.conv_block1 + self.conv_block2
self._layers.extend(
[self.max_pool1, self.concat_layer, self.activation_layer])
def call(self, inputs):
"""Invoked when __call__ method of the layer is used."""
maxpool1 = self.max_pool1(inputs)
conv1 = inputs
for layer in self.conv_block1:
conv1 = layer(conv1)
conv2 = inputs
for layer in self.conv_block2:
conv2 = layer(conv2)
output = self.concat_layer([maxpool1, conv1, conv2])
output = self.activation_layer(output)
return output
class ReductionB(tf.keras.layers.Layer):
"""
Variant B of the two Reduction layers described in
https://arxiv.org/abs/1710.02238. All variants use multiple convolutional
blocks with varying kernel sizes and number of filters, to reduce the spatial
extent of the image and reduce computational complexity for downstream layers.
"""
def __init__(self, num_filters, **kwargs):
"""
Parameters
----------
num_filters: int,
Number of convolutional filters
"""
self.num_filters = num_filters
self._build_layer_components()
super(ReductionB, self).__init__(**kwargs)
def _build_layer_components(self):
"""Builds the layers components and set _layers attribute."""
self.max_pool1 = MaxPool2D(pool_size=(3, 3), strides=2, padding="valid")
self.conv_block1 = [
Conv2D(self.num_filters,
kernel_size=1,
strides=1,
padding="same",
activation=tf.nn.relu)
]
self.conv_block1.append(
Conv2D(int(self.num_filters * 1.5),
kernel_size=3,
strides=2,
padding="valid",
activation=tf.nn.relu))
self.conv_block2 = [
Conv2D(filters=self.num_filters,
kernel_size=1,
strides=1,
activation=tf.nn.relu,
padding="same")
]
self.conv_block2.append(
Conv2D(filters=int(self.num_filters * 1.125),
kernel_size=3,
strides=2,
activation=tf.nn.relu,
padding="valid"))
self.conv_block3 = [
Conv2D(filters=self.num_filters,
kernel_size=1,
strides=1,
activation=tf.nn.relu,
padding="same")
]
self.conv_block3.append(
Conv2D(filters=int(self.num_filters * 1.125),
kernel_size=(3, 1),
strides=1,
activation=tf.nn.relu,
padding="same"))
self.conv_block3.append(
Conv2D(filters=int(self.num_filters * 1.25),
kernel_size=(3, 3),
strides=2,
activation=tf.nn.relu,
padding="valid"))
self.concat_layer = Concatenate()
self.activation_layer = ReLU()
self._layers = self.conv_block1 + self.conv_block2 + self.conv_block3
self._layers.extend(
[self.max_pool1, self.concat_layer, self.activation_layer])
def call(self, inputs):
"""Invoked when __call__ method of the layer is used."""
maxpool1 = self.max_pool1(inputs)
conv1 = inputs
for layer in self.conv_block1:
conv1 = layer(conv1)
conv2 = inputs
for layer in self.conv_block2:
conv2 = layer(conv2)
conv3 = inputs
for layer in self.conv_block3:
conv3 = layer(conv3)
concat = self.concat_layer([maxpool1, conv1, conv2, conv3])
output = self.activation_layer(concat)
return output
<file_sep>The DeepChem Project
====================
.. raw:: html
<embed>
<a href="https://github.com/deepchem/deepchem">
<img style="position: absolute; top: 0; right: 0; border: 0;" src="https://camo.githubusercontent.com/365986a132ccd6a44c23a9169022c0b5c890c387/68747470733a2f2f73332e616d617a6f6e6177732e636f6d2f6769746875622f726962626f6e732f666f726b6d655f72696768745f7265645f6161303030302e706e67" alt="Fork me on GitHub" data-canonical-src="https://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png">
</a>
</embed>
**The DeepChem project aims to democratize deep learning for science.**
What is DeepChem?
-----------------
The DeepChem project aims to build high quality tools to democratize
the use of deep learning in the sciences. The origin of DeepChem
focused on applications of deep learning to chemistry, but the project
has slowly evolved past its roots to broader applications of deep
learning to the sciences.
The core `DeepChem Repo`_ serves as a monorepo that organizes the DeepChem suite of scientific tools.
As the project matures, smaller more focused tool will be surfaced in more targeted repos.
DeepChem is primarily developed in Python, but we are experimenting with adding support for other languages.
What are some of the things you can use DeepChem to do? Here's a few examples:
- Predict the solubility of small drug-like molecules
- Predict binding affinity for small molecule to protein targets
- Predict physical properties of simple materials
- Analyze protein structures and extract useful descriptors
- Count the number of cells in a microscopy image
- More coming soon...
We should clarify one thing up front though. DeepChem is a machine
learning library, so it gives you the tools to solve each of the
applications mentioned above yourself. DeepChem may or may not have
prebaked models which can solve these problems out of the box.
Over time, we hope to grow the set of scientific applications DeepChem
can address. This means we need lots of help! If you're a scientist
who's interested in open source, please pitch on building DeepChem.
.. _`DeepChem Repo`: https://github.com/deepchem/deepchem
Quick Start
-----------
The fastest way to get up and running with DeepChem is to run it on
Google Colab. Check out one of the `DeepChem Tutorials`_.
If you'd like to install DeepChem locally,
.. code-block:: bash
pip install deepchem
Then open your IDE or text editor of choice and try running the following code with python.
.. code-block:: python
import deepchem
.. _`DeepChem Tutorials`: https://github.com/deepchem/deepchem/tree/master/examples/tutorials
.. _`forum post`: https://forum.deepchem.io/t/getting-deepchem-running-in-colab/81/7
About Us
--------
DeepChem is managed by a team of open source contributors. Anyone is free to join and contribute!
DeepChem has weekly developer calls. You can find `meeting minutes`_ on our `forums`_.
DeepChem developer calls are open to the public!
To listen in, please email <EMAIL>, where X=bharath and Y=ramsundar to introduce yourself and ask for an invite.
.. important::
| Join our `community gitter <https://gitter.im/deepchem/Lobby>`_ to discuss DeepChem.
| Sign up for our `forums <https://forum.deepchem.io/>`_ to talk about research, development, and general questions.
.. _`meeting minutes`: https://forum.deepchem.io/search?q=Minutes%20order%3Alatest
.. _`forums`: https://forum.deepchem.io/
.. toctree::
:glob:
:maxdepth: 1
:caption: Get Started
get_started/installation
get_started/requirements
get_started/tutorials
get_started/examples
get_started/issues
get_started/Docker-tutorial
.. toctree::
:glob:
:maxdepth: 1
:caption: API Reference
api_reference/data
api_reference/moleculenet
api_reference/featurizers
api_reference/splitters
api_reference/transformers
api_reference/models
api_reference/layers
api_reference/metrics
api_reference/hyper
api_reference/metalearning
api_reference/rl
api_reference/docking
api_reference/utils
.. toctree::
:glob:
:maxdepth: 1
:caption: Development Guide
development_guide/licence
development_guide/scientists
development_guide/coding
development_guide/ci
development_guide/infra
<file_sep>import deepchem as dc
import tensorflow as tf
import numpy as np
import os
import json
import time
def input_fn(dataset, epochs):
x, y, weights = dataset.make_iterator(batch_size=100, epochs=epochs).get_next()
return {'x': x, 'weights': weights}, y
def mean_auc(labels, predictions, weights):
metric_ops = []
update_ops = []
for i in range(n_tasks):
metric, update = tf.metrics.auc(labels[:,i], predictions[:,i], weights[:,i])
metric_ops.append(metric)
update_ops.append(update)
mean_metric = tf.reduce_mean(tf.stack(metric_ops))
update_all = tf.group(*update_ops)
return mean_metric, update_all
def run():
os.environ['GRPC_POLL_STRATEGY'] = 'poll'
tf.logging.set_verbosity(tf.logging.DEBUG)
try:
task_type = os.environ['JOB_NAME']
task_index = int(os.environ['TASK_INDEX'])
ps_hosts = os.environ['PS_HOSTS'].split(',')
worker_hosts = os.environ['WORKER_HOSTS'].split(',')
TF_CONFIG = {
'task': {'type': task_type, 'index': task_index},
'cluster': {
'chief': [worker_hosts[0]],
'worker': worker_hosts,
'ps': ps_hosts
},
'environment': 'cloud'
}
local_ip = 'localhost:' + TF_CONFIG['cluster'][task_type][task_index].split(':')[1]
TF_CONFIG['cluster'][task_type][task_index] = local_ip
if (task_type in ('chief', 'master')) or (task_type == 'worker' and task_index == 0):
TF_CONFIG['cluster']['worker'][task_index] = local_ip
TF_CONFIG['task']['type'] = 'chief'
os.environ['TF_CONFIG'] = json.dumps(TF_CONFIG)
except KeyError as ex:
print(ex)
job_name = None
task_index = 0
ps_hosts = None
worker_hosts = None
tasks, datasets, transformers = dc.molnet.load_tox21()
train_dataset, valid_dataset, test_dataset = datasets
n_tasks = len(tasks)
n_features = train_dataset.X.shape[1]
model = dc.models.MultitaskClassifier(n_tasks, n_features, layer_sizes=[1000], dropout=0.25)
print "featurizing columns"
x_col = tf.feature_column.numeric_column('x', shape=(n_features,))
weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
print "entering estimator"
estimator = model.make_estimator(feature_columns=[x_col], weight_column=weight_col, metrics={'mean_auc': mean_auc},
model_dir='/logs')
# following lines added to run train_and_evaluate function of deepchem which is compatible for distributed training
train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(train_dataset, 100), max_steps=100000)
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(test_dataset, 1), steps=None, start_delay_secs=0,
throttle_secs=30)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if __name__ == "__main__":
run()
<file_sep>"""
checking jax imports for new CI build
"""
import deepchem as dc
import pytest
try:
import jax.numpy as jnp
from jax import random
import numpy as np
has_jax = True
except:
has_jax = False
@pytest.mark.jax
def test_jax_import():
"""Used to check if Jax is imported correctly. Will be useful in Mac and Windows build"""
key = random.PRNGKey(0)
x = random.normal(key, (10, 10), dtype=jnp.float32)
y = random.normal(key, (10, 10), dtype=jnp.float32)
assert jnp.all(x == y)
n_data_points = 10
n_features = 2
np.random.seed(1234)
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
assert dataset.X.shape == (10, 2)
<file_sep>import numpy as np
from deepchem.utils.typing import PymatgenComposition
from deepchem.feat import MaterialCompositionFeaturizer
from typing import Any
class ElementPropertyFingerprint(MaterialCompositionFeaturizer):
"""
Fingerprint of elemental properties from composition.
Based on the data source chosen, returns properties and statistics
(min, max, range, mean, standard deviation, mode) for a compound
based on elemental stoichiometry. E.g., the average electronegativity
of atoms in a crystal structure. The chemical fingerprint is a
vector of these statistics. For a full list of properties and statistics,
see ``matminer.featurizers.composition.ElementProperty(data_source).feature_labels()``.
This featurizer requires the optional dependencies pymatgen and
matminer. It may be useful when only crystal compositions are available
(and not 3D coordinates).
See references [1]_, [2]_, [3]_, [4]_ for more details.
References
----------
.. [1] MagPie data: Ward, L. et al. npj Comput Mater 2, 16028 (2016).
https://doi.org/10.1038/npjcompumats.2016.28
.. [2] Deml data: Deml, A. et al. Physical Review B 93, 085142 (2016).
10.1103/PhysRevB.93.085142
.. [3] Matminer: Ward, L. et al. Comput. Mater. Sci. 152, 60-69 (2018).
.. [4] Pymatgen: Ong, S.P. et al. Comput. Mater. Sci. 68, 314-319 (2013).
Examples
--------
>>> import deepchem as dc
>>> import pymatgen as mg
>>> comp = mg.core.Composition("Fe2O3")
>>> featurizer = dc.feat.ElementPropertyFingerprint()
>>> features = featurizer.featurize([comp])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(65,)
Note
----
This class requires matminer and Pymatgen to be installed.
`NaN` feature values are automatically converted to 0 by this featurizer.
"""
def __init__(self, data_source: str = 'matminer'):
"""
Parameters
----------
data_source: str of "matminer", "magpie" or "deml" (default "matminer")
Source for element property data.
"""
self.data_source = data_source
self.ep_featurizer: Any = None
def _featurize(self, datapoint: PymatgenComposition,
**kwargs) -> np.ndarray:
"""
Calculate chemical fingerprint from crystal composition.
Parameters
----------
datapoint: pymatgen.core.Composition object
Composition object.
Returns
-------
feats: np.ndarray
Vector of properties and statistics derived from chemical
stoichiometry. Some values may be NaN.
"""
if 'composition' in kwargs and datapoint is None:
datapoint = kwargs.get("composition")
raise DeprecationWarning(
'Composition is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.ep_featurizer is None:
try:
from matminer.featurizers.composition import ElementProperty
self.ep_featurizer = ElementProperty.from_preset(
self.data_source)
except ModuleNotFoundError:
raise ImportError(
"This class requires matminer to be installed.")
try:
feats = self.ep_featurizer.featurize(datapoint)
except:
feats = []
return np.nan_to_num(np.array(feats))
<file_sep>"""
DGL-based GAT for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
from typing import Optional
class GAT(nn.Module):
"""Model for Graph Property Prediction Based on Graph Attention Networks (GAT).
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT
* For each graph, compute its representation by 1) a weighted sum of the node
representations in the graph, where the weights are computed by applying a
gating function to the node representations 2) a max pooling of the node
representations 3) concatenating the output of 1) and 2)
* Perform the final prediction using an MLP
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models import GAT
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.MolGraphConvFeaturizer()
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph(self_loop=True) for i in range(len(graphs))]
>>> # Batch two graphs into a graph of two connected components
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = GAT(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. "Graph Attention Networks." ICLR 2018.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
graph_attention_layers: Optional[list] = None,
n_attention_heads: int = 8,
agg_modes: Optional[list] = None,
activation=F.elu,
residual: bool = True,
dropout: float = 0.,
alpha: float = 0.2,
predictor_hidden_feats: int = 128,
predictor_dropout: float = 0.,
mode: str = 'regression',
number_atom_features: int = 30,
n_classes: int = 2,
nfeat_name: str = 'x'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
graph_attention_layers: list of int
Width of channels per attention head for GAT layers. graph_attention_layers[i]
gives the width of channel for each attention head for the i-th GAT layer. If
both ``graph_attention_layers`` and ``agg_modes`` are specified, they should have
equal length. If not specified, the default value will be [8, 8].
n_attention_heads: int
Number of attention heads in each GAT layer.
agg_modes: list of str
The way to aggregate multi-head attention results for each GAT layer, which can be
either 'flatten' for concatenating all-head results or 'mean' for averaging all-head
results. ``agg_modes[i]`` gives the way to aggregate multi-head attention results for
the i-th GAT layer. If both ``graph_attention_layers`` and ``agg_modes`` are
specified, they should have equal length. If not specified, the model will flatten
multi-head results for intermediate GAT layers and compute mean of multi-head results
for the last GAT layer.
activation: activation function or None
The activation function to apply to the aggregated multi-head results for each GAT
layer. If not specified, the default value will be ELU.
residual: bool
Whether to add a residual connection within each GAT layer. Default to True.
dropout: float
The dropout probability within each GAT layer. Default to 0.
alpha: float
A hyperparameter in LeakyReLU, which is the slope for negative values. Default to 0.2.
predictor_hidden_feats: int
The size for hidden representations in the output MLP predictor. Default to 128.
predictor_dropout: float
The dropout probability in the output MLP predictor. Default to 0.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
"""
try:
import dgl # noqa: F401
except:
raise ImportError('This class requires dgl.')
try:
import dgllife # noqa: F401
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
super(GAT, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import GATPredictor as DGLGATPredictor
if isinstance(graph_attention_layers, list) and isinstance(
agg_modes, list):
assert len(graph_attention_layers) == len(agg_modes), \
'Expect graph_attention_layers and agg_modes to have equal length, ' \
'got {:d} and {:d}'.format(len(graph_attention_layers), len(agg_modes))
# Decide first number of GAT layers
if graph_attention_layers is not None:
num_gnn_layers = len(graph_attention_layers)
elif agg_modes is not None:
num_gnn_layers = len(agg_modes)
else:
num_gnn_layers = 2
if graph_attention_layers is None:
graph_attention_layers = [8] * num_gnn_layers
if agg_modes is None:
agg_modes = ['flatten' for _ in range(num_gnn_layers - 1)]
agg_modes.append('mean')
if activation is not None:
activation = [activation] * num_gnn_layers
self.model = DGLGATPredictor(
in_feats=number_atom_features,
hidden_feats=graph_attention_layers,
num_heads=[n_attention_heads] * num_gnn_layers,
feat_drops=[dropout] * num_gnn_layers,
attn_drops=[dropout] * num_gnn_layers,
alphas=[alpha] * num_gnn_layers,
residuals=[residual] * num_gnn_layers,
agg_modes=agg_modes,
activations=activation,
n_tasks=out_size,
predictor_hidden_feats=predictor_hidden_feats,
predictor_dropout=predictor_dropout)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be
``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1;
its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
out = self.model(g, node_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class GATModel(TorchModel):
"""Model for Graph Property Prediction Based on Graph Attention Networks (GAT).
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT
* For each graph, compute its representation by 1) a weighted sum of the node
representations in the graph, where the weights are computed by applying a
gating function to the node representations 2) a max pooling of the node
representations 3) concatenating the output of 1) and 2)
* Perform the final prediction using an MLP
Examples
--------
>>> import deepchem as dc
>>> from deepchem.models import GATModel
>>> # preparing dataset
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> labels = [0., 1.]
>>> featurizer = dc.feat.MolGraphConvFeaturizer()
>>> X = featurizer.featurize(smiles)
>>> dataset = dc.data.NumpyDataset(X=X, y=labels)
>>> # training model
>>> model = GATModel(mode='classification', n_tasks=1,
... batch_size=16, learning_rate=0.001)
>>> loss = model.fit(dataset, nb_epoch=5)
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. "Graph Attention Networks." ICLR 2018.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
graph_attention_layers: Optional[list] = None,
n_attention_heads: int = 8,
agg_modes: Optional[list] = None,
activation=F.elu,
residual: bool = True,
dropout: float = 0.,
alpha: float = 0.2,
predictor_hidden_feats: int = 128,
predictor_dropout: float = 0.,
mode: str = 'regression',
number_atom_features: int = 30,
n_classes: int = 2,
self_loop: bool = True,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
graph_attention_layers: list of int
Width of channels per attention head for GAT layers. graph_attention_layers[i]
gives the width of channel for each attention head for the i-th GAT layer. If
both ``graph_attention_layers`` and ``agg_modes`` are specified, they should have
equal length. If not specified, the default value will be [8, 8].
n_attention_heads: int
Number of attention heads in each GAT layer.
agg_modes: list of str
The way to aggregate multi-head attention results for each GAT layer, which can be
either 'flatten' for concatenating all-head results or 'mean' for averaging all-head
results. ``agg_modes[i]`` gives the way to aggregate multi-head attention results for
the i-th GAT layer. If both ``graph_attention_layers`` and ``agg_modes`` are
specified, they should have equal length. If not specified, the model will flatten
multi-head results for intermediate GAT layers and compute mean of multi-head results
for the last GAT layer.
activation: activation function or None
The activation function to apply to the aggregated multi-head results for each GAT
layer. If not specified, the default value will be ELU.
residual: bool
Whether to add a residual connection within each GAT layer. Default to True.
dropout: float
The dropout probability within each GAT layer. Default to 0.
alpha: float
A hyperparameter in LeakyReLU, which is the slope for negative values. Default to 0.2.
predictor_hidden_feats: int
The size for hidden representations in the output MLP predictor. Default to 128.
predictor_dropout: float
The dropout probability in the output MLP predictor. Default to 0.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
self_loop: bool
Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
When input graphs have isolated nodes, self loops allow preserving the original feature
of them in message passing. Default to True.
kwargs
This can include any keyword argument of TorchModel.
"""
model = GAT(n_tasks=n_tasks,
graph_attention_layers=graph_attention_layers,
n_attention_heads=n_attention_heads,
agg_modes=agg_modes,
activation=activation,
residual=residual,
dropout=dropout,
alpha=alpha,
predictor_hidden_feats=predictor_hidden_feats,
predictor_dropout=predictor_dropout,
mode=mode,
number_atom_features=number_atom_features,
n_classes=n_classes)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(GATModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
self._self_loop = self_loop
def _prepare_batch(self, batch):
"""Create batch data for GAT.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [
graph.to_dgl_graph(self_loop=self._self_loop) for graph in inputs[0]
]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(GATModel, self)._prepare_batch(
([], labels, weights))
return inputs, labels, weights
<file_sep>"""Test normalization of input."""
import numpy as np
import deepchem as dc
from deepchem.metrics import to_one_hot
from deepchem.metrics import from_one_hot
from deepchem.metrics import threshold_predictions
from deepchem.metrics import handle_classification_mode
from deepchem.metrics import normalize_prediction_shape
from deepchem.metrics import normalize_weight_shape
def test_one_hot():
"""Test the one hot encoding."""
y = np.array([0, 0, 1, 0, 1, 1, 0])
y_hot = to_one_hot(y)
expected = np.array([[1, 0], [1, 0], [0, 1], [1, 0], [0, 1], [0, 1], [1,
0]])
yp = from_one_hot(y_hot)
assert np.array_equal(expected, y_hot)
assert np.array_equal(y, yp)
def test_handle_classification_mode_direct():
"""Test proper thresholding."""
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y = np.expand_dims(y, 1)
y_expected = y
y_out = handle_classification_mode(y, "direct")
assert y_out.shape == (10, 1, 2)
assert np.array_equal(y_out, y_expected)
def test_handle_classification_mode_threshold():
"""Test proper thresholding."""
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y = np.expand_dims(y, 1)
y_expected = np.argmax(np.squeeze(y), axis=1)[:, np.newaxis]
y_out = handle_classification_mode(y, "threshold", threshold_value=0.5)
assert y_out.shape == (10, 1)
assert np.array_equal(y_out, y_expected)
def test_handle_classification_mode_threshold_nonstandard():
"""Test proper thresholding."""
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y_expected = np.where(y[:, 1] >= 0.3, np.ones(10), np.zeros(10))[:,
np.newaxis]
y = np.expand_dims(y, 1)
y_out = handle_classification_mode(y, "threshold", threshold_value=0.3)
assert y_out.shape == (10, 1)
assert np.array_equal(y_out, y_expected)
def test_handle_classification_mode_threshold_one_hot():
"""Test proper thresholding."""
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y = np.expand_dims(y, 1)
y_expected = np.expand_dims(
to_one_hot(np.argmax(np.squeeze(y), axis=1), n_classes=2), 1)
y_out = handle_classification_mode(y,
"threshold-one-hot",
threshold_value=0.5)
assert y_out.shape == (10, 1, 2)
assert np.array_equal(y_out, y_expected)
def test_threshold_predictions_binary():
"""Test thresholding of binary predictions."""
# Get a random prediction matrix
y = np.random.rand(10, 2)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y_thresh = threshold_predictions(y, 0.5)
assert y_thresh.shape == (10,)
assert (y_thresh == np.argmax(y, axis=1)).all()
def test_threshold_predictions_multiclass():
"""Test thresholding of multiclass predictions."""
y = np.random.rand(10, 5)
y = y / np.sum(y, axis=1)[:, np.newaxis]
y_thresh = threshold_predictions(y)
assert y_thresh.shape == (10,)
assert (y_thresh == np.argmax(y, axis=1)).all()
def test_normalize_1d_classification_binary():
"""Tests 1d classification normalization."""
y = np.array([0, 0, 1, 0, 1, 1, 0])
expected = np.array([[[1., 0.]], [[1., 0.]], [[0., 1.]], [[1., 0.]],
[[0., 1.]], [[0., 1.]], [[1., 0.]]])
y_out = normalize_prediction_shape(y,
mode="classification",
n_tasks=1,
n_classes=2)
assert y_out.shape == (7, 1, 2)
assert np.array_equal(expected, y_out)
def test_normalize_1d_classification_multiclass():
"""Tests 1d classification normalization."""
y = np.random.randint(5, size=(200,))
y_expected = np.expand_dims(to_one_hot(y, n_classes=5), 1)
y_out = normalize_prediction_shape(y,
mode="classification",
n_tasks=1,
n_classes=5)
assert y_out.shape == (200, 1, 5)
assert np.array_equal(y_expected, y_out)
def test_normalize_1d_classification_multiclass_explicit_nclasses():
"""Tests 1d classification normalization."""
y = np.random.randint(5, size=(10,))
y_expected = np.expand_dims(to_one_hot(y, n_classes=10), 1)
y_out = normalize_prediction_shape(y,
mode="classification",
n_classes=10,
n_tasks=1)
assert y_out.shape == (10, 1, 10)
assert np.array_equal(y_expected, y_out)
def test_normalize_2d_classification_binary():
"""Tests 2d classification normalization."""
# Of shape (N, n_classes)
y = np.random.randint(2, size=(10, 1))
y_expected = np.expand_dims(dc.metrics.to_one_hot(np.squeeze(y)), 1)
y_out = normalize_prediction_shape(y,
mode="classification",
n_tasks=1,
n_classes=2)
assert y_out.shape == (10, 1, 2)
assert np.array_equal(y_expected, y_out)
def test_normalize_3d_classification_binary():
"""Tests 1d classification normalization."""
# Of shape (N, 1, n_classes)
y = np.random.randint(2, size=(10,))
y = dc.metrics.to_one_hot(y, n_classes=2)
y = np.expand_dims(y, 1)
y_expected = y
y_out = normalize_prediction_shape(y,
mode="classification",
n_tasks=1,
n_classes=2)
assert y_out.shape == (10, 1, 2)
assert np.array_equal(y_expected, y_out)
def test_normalize_1d_regression():
"""Tests 1d regression normalization."""
y = np.random.rand(10)
y_expected = y[:, np.newaxis]
y_out = normalize_prediction_shape(y, mode="regression", n_tasks=1)
assert y_out.shape == (10, 1)
assert np.array_equal(y_expected, y_out)
def test_normalize_2d_regression():
"""Tests 2d regression normalization."""
y = np.random.rand(10, 5)
y_expected = y
y_out = normalize_prediction_shape(y, mode="regression", n_tasks=5)
assert y_out.shape == (10, 5)
assert np.array_equal(y_expected, y_out)
def test_normalize_3d_regression():
"""Tests 3d regression normalization."""
y = np.random.rand(10, 5, 1)
y_expected = np.squeeze(y)
y_out = normalize_prediction_shape(y, mode="regression", n_tasks=5)
assert y_out.shape == (10, 5)
assert np.array_equal(y_expected, y_out)
def test_scalar_weight_normalization():
"""Test normalization of weights."""
w_out = normalize_weight_shape(w=5, n_samples=10, n_tasks=5)
assert w_out.shape == (10, 5)
assert np.all(w_out == 5 * np.ones((10, 5)))
def test_1d_weight_normalization():
"""Test normalization of weights."""
w = np.random.rand(10)
# This has w for each task.
w_expected = np.array([w, w, w, w, w]).T
w_out = normalize_weight_shape(w, n_samples=10, n_tasks=5)
assert w_out.shape == (10, 5)
assert np.all(w_out == w_expected)
def test_2d_weight_normalization():
"""Test normalization of weights."""
w = np.random.rand(10, 5)
w_out = normalize_weight_shape(w, n_samples=10, n_tasks=5)
assert w_out.shape == (10, 5)
assert np.all(w_out == w)
<file_sep>echo "Pulling featurized core pdbbind dataset from deepchem"
wget -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz
echo "Extracting core pdbbind"
tar -zxvf core_grid.tar.gz
echo "Pulling featurized refined pdbbind dataset from deepchem"
wget -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/refined_grid.tar.gz
echo "Extracting refined pdbbind"
tar -zxvf refined_grid.tar.gz
echo "Pulling featurized full pdbbind dataset from deepchem"
wget -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/full_grid.tar.gz
echo "Extracting full pdbbind"
tar -zxvf full_grid.tar.gz
<file_sep>"""A collection of utilities for dealing with Molecular Fragments"""
import itertools
import numpy as np
from typing import List, Optional, Sequence, Set, Tuple, Union
import logging
from deepchem.utils.typing import RDKitAtom, RDKitMol
from deepchem.utils.geometry_utils import compute_pairwise_distances
logger = logging.getLogger(__name__)
class MoleculeLoadException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(*args, **kwargs)
class AtomShim(object):
"""This is a shim object wrapping an atom.
We use this class instead of raw RDKit atoms since manipulating a
large number of rdkit Atoms seems to result in segfaults. Wrapping
the basic information in an AtomShim seems to avoid issues.
"""
def __init__(self, atomic_num: int, partial_charge: float,
atom_coords: np.ndarray):
"""Initialize this object
Parameters
----------
atomic_num: int
Atomic number for this atom.
partial_charge: float
The partial Gasteiger charge for this atom
atom_coords: np.ndarray
Of shape (3,) with the coordinates of this atom
"""
self.atomic_num = atomic_num
self.partial_charge = partial_charge
self.coords = atom_coords
def GetAtomicNum(self) -> int:
"""Returns atomic number for this atom.
Returns
-------
int
Atomic number for this atom.
"""
return self.atomic_num
def GetPartialCharge(self) -> float:
"""Returns partial charge for this atom.
Returns
-------
float
A partial Gasteiger charge for this atom.
"""
return self.partial_charge
def GetCoords(self) -> np.ndarray:
"""Returns 3D coordinates for this atom as numpy array.
Returns
-------
np.ndarray
Numpy array of shape `(3,)` with coordinates for this atom.
"""
return self.coords
class MolecularFragment(object):
"""A class that represents a fragment of a molecule.
It's often convenient to represent a fragment of a molecule. For
example, if two molecules form a molecular complex, it may be useful
to create two fragments which represent the subsets of each molecule
that's close to the other molecule (in the contact region).
Ideally, we'd be able to do this in RDKit direct, but manipulating
molecular fragments doesn't seem to be supported functionality.
Examples
--------
>>> import numpy as np
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles("C")
>>> coords = np.array([[0.0, 0.0, 0.0]])
>>> atom = mol.GetAtoms()[0]
>>> fragment = MolecularFragment([atom], coords)
"""
def __init__(self, atoms: Sequence[RDKitAtom], coords: np.ndarray):
"""Initialize this object.
Parameters
----------
atoms: Iterable[rdkit.Chem.rdchem.Atom]
Each entry in this list should be a RDKit Atom.
coords: np.ndarray
Array of locations for atoms of shape `(N, 3)` where `N ==
len(atoms)`.
"""
if not isinstance(coords, np.ndarray):
raise ValueError("Coords must be a numpy array of shape (N, 3)")
if coords.shape != (len(atoms), 3):
raise ValueError(
"Coords must be a numpy array of shape `(N, 3)` where `N == len(atoms)`."
)
self.atoms = [
AtomShim(x.GetAtomicNum(), get_partial_charge(x), coords[ind])
for ind, x in enumerate(atoms)
]
self.coords = coords
def GetAtoms(self) -> List[AtomShim]:
"""Returns the list of atoms
Returns
-------
List[AtomShim]
list of atoms in this fragment.
"""
return self.atoms
def GetNumAtoms(self) -> int:
"""Returns the number of atoms
Returns
-------
int
Number of atoms in this fragment.
"""
return len(self.atoms)
def GetCoords(self) -> np.ndarray:
"""Returns 3D coordinates for this fragment as numpy array.
Returns
-------
np.ndarray
A numpy array of shape `(N, 3)` with coordinates for this fragment.
Here, N is the number of atoms.
"""
return self.coords
def get_partial_charge(atom: Union[RDKitAtom, AtomShim]) -> float:
"""Get partial charge of a given atom (rdkit Atom object)
Parameters
----------
atom: rdkit.Chem.rdchem.Atom or AtomShim
Either a rdkit.Atom object or `AtomShim`
Returns
-------
float
A partial Gasteiger charge of a given atom.
Notes
-----
This function requires RDKit to be installed.
Examples
--------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles("CC")
>>> atom = mol.GetAtoms()[0]
>>> get_partial_charge(atom)
0.0
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
if isinstance(atom, Chem.Atom):
try:
value = atom.GetProp(str("_GasteigerCharge"))
if value == '-nan':
return 0.0
return float(value)
except KeyError:
return 0.0
else:
return atom.GetPartialCharge()
def merge_molecular_fragments(
molecules: List[MolecularFragment]) -> Optional[MolecularFragment]:
"""Helper method to merge two molecular fragments.
Parameters
----------
molecules: List[MolecularFragment]
List of `MolecularFragment` objects.
Returns
-------
Optional[MolecularFragment]
Returns a merged `MolecularFragment`
"""
if len(molecules) == 0:
return None
if len(molecules) == 1:
return molecules[0]
else:
all_atoms = []
all_coords = []
for mol_frag in molecules:
all_atoms += mol_frag.GetAtoms()
all_coords.append(mol_frag.GetCoords())
return MolecularFragment(all_atoms, np.concatenate(all_coords))
def get_mol_subset(
coords: np.ndarray, mol: Union[RDKitMol, MolecularFragment],
atom_indices_to_keep: List[int]
) -> Tuple[np.ndarray, MolecularFragment]:
"""Strip a subset of the atoms in this molecule
Parameters
----------
coords: np.ndarray
Must be of shape (N, 3) and correspond to coordinates of mol.
mol: rdkit.Chem.rdchem.Mol or MolecularFragment
The molecule to strip
atom_indices_to_keep: list
List of the indices of the atoms to keep. Each index is a unique
number between `[0, N)`.
Returns
-------
Tuple[np.ndarray, MolecularFragment]
A tuple of `(coords, mol_frag)` where `coords` is a numpy array of
coordinates with hydrogen coordinates. `mol_frag` is a `MolecularFragment`.
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
indexes_to_keep = []
atoms_to_keep = []
# Compute partial charges on molecule if RDKit Mol
if isinstance(mol, Chem.Mol):
compute_charges(mol)
atoms = list(mol.GetAtoms())
for index in atom_indices_to_keep:
indexes_to_keep.append(index)
atoms_to_keep.append(atoms[index])
coords = coords[indexes_to_keep]
mol_frag = MolecularFragment(atoms_to_keep, coords)
return coords, mol_frag
def strip_hydrogens(
coords: np.ndarray, mol: Union[RDKitMol, MolecularFragment]
) -> Tuple[np.ndarray, MolecularFragment]:
"""Strip the hydrogens from input molecule
Parameters
----------
coords: np.ndarray
The coords must be of shape (N, 3) and correspond to coordinates of mol.
mol: rdkit.Chem.rdchem.Mol or MolecularFragment
The molecule to strip
Returns
-------
Tuple[np.ndarray, MolecularFragment]
A tuple of `(coords, mol_frag)` where `coords` is a numpy array of
coordinates with hydrogen coordinates. `mol_frag` is a `MolecularFragment`.
Notes
-----
This function requires RDKit to be installed.
"""
mol_atoms = mol.GetAtoms()
atomic_numbers = [atom.GetAtomicNum() for atom in mol_atoms]
atom_indices_to_keep = [
ind for (ind, atomic_number) in enumerate(atomic_numbers)
if (atomic_number != 1)
]
return get_mol_subset(coords, mol, atom_indices_to_keep)
def get_contact_atom_indices(fragments: List[Tuple[np.ndarray, RDKitMol]],
cutoff: float = 4.5) -> List[List[int]]:
"""Compute that atoms close to contact region.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function computes pairwise distances between all pairs of molecules
in the molecular complex. If an atom is within cutoff distance of
any atom on another molecule in the complex, it is regarded as a
contact atom. Otherwise it is trimmed.
Parameters
----------
fragments: List[Tuple[np.ndarray, rdkit.Chem.rdchem.Mol]]
As returned by `rdkit_utils.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float, optional (default 4.5)
The cutoff distance in angstroms.
Returns
-------
List[List[int]]
A list of length `len(molecular_complex)`. Each entry in this list
is a list of atom indices from that molecule which should be kept, in
sorted order.
"""
# indices to atoms to keep
keep_inds: List[Set[int]] = [set([]) for _ in fragments]
for (ind1, ind2) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[ind1], fragments[ind2]
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
keep_inds[ind1] = keep_inds[ind1].union(frag1_atoms)
keep_inds[ind2] = keep_inds[ind2].union(frag2_atoms)
sorted_keep_inds = [sorted(list(keep)) for keep in keep_inds]
return sorted_keep_inds
def reduce_molecular_complex_to_contacts(
fragments: List[Tuple[np.ndarray, RDKitMol]],
cutoff: float = 4.5) -> List[Tuple[np.ndarray, MolecularFragment]]:
"""Reduce a molecular complex to only those atoms near a contact.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function takes in a molecular complex and returns a new molecular
complex representation that contains only contact atoms. The contact
atoms are computed by calling `get_contact_atom_indices` under the
hood.
Parameters
----------
fragments: List[Tuple[np.ndarray, rdkit.Chem.rdchem.Mol]]
As returned by `rdkit_utils.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
List[Tuple[np.ndarray, MolecularFragment]]
A list of length `len(molecular_complex)`. Each entry in this list
is a tuple of `(coords, MolecularFragment)`. The coords is stripped
down to `(N_contact_atoms, 3)` where `N_contact_atoms` is the number
of contact atoms for this complex. `MolecularFragment` is used since
it's tricky to make a RDKit sub-molecule.
"""
atoms_to_keep = get_contact_atom_indices(fragments, cutoff)
reduced_complex = []
for frag, keep in zip(fragments, atoms_to_keep):
contact_frag = get_mol_subset(frag[0], frag[1], keep)
reduced_complex.append(contact_frag)
return reduced_complex
# TODO: This is duplicated! Clean up
def compute_charges(mol):
"""Attempt to compute Gasteiger Charges on Mol
This also has the side effect of calculating charges on mol. The
mol passed into this function has to already have been sanitized
Parameters
----------
mol: rdkit molecule
Returns
-------
No return since updates in place.
Note
----
This function requires RDKit to be installed.
"""
from rdkit.Chem import AllChem
try:
# Updates charges in place
AllChem.ComputeGasteigerCharges(mol)
except Exception as e:
logger.exception("Unable to compute charges for mol")
raise MoleculeLoadException(e)
<file_sep>import numpy as np
from flaky import flaky
import pytest
try:
import tensorflow as tf
from tensorflow.keras import layers
has_tensorflow = True
except:
has_tensorflow = False
import deepchem as dc
from deepchem.data import NumpyDataset
@flaky
@pytest.mark.tensorflow
def test_compute_model_performance_multitask_classifier():
n_data_points = 20
n_features = 1
n_tasks = 2
n_classes = 2
X = np.ones(shape=(n_data_points // 2, n_features)) * -1
X1 = np.ones(shape=(n_data_points // 2, n_features))
X = np.concatenate((X, X1))
class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
y1 = np.concatenate((class_0, class_1))
y2 = np.concatenate((class_1, class_0))
y = np.stack([y1, y2], axis=1)
dataset = NumpyDataset(X, y)
features = layers.Input(shape=(n_features))
dense = layers.Dense(n_tasks * n_classes)(features)
logits = layers.Reshape((n_tasks, n_classes))(dense)
output = layers.Softmax()(logits)
keras_model = tf.keras.Model(inputs=features, outputs=[output, logits])
model = dc.models.KerasModel(keras_model,
dc.models.losses.SoftmaxCrossEntropy(),
output_types=['prediction', 'loss'],
learning_rate=0.01,
batch_size=n_data_points)
model.fit(dataset, nb_epoch=1000)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
scores = model.evaluate_generator(model.default_generator(dataset),
[metric],
per_task_metrics=True)
scores = list(scores[1].values())
# Loosening atol to see if tests stop failing sporadically
assert np.all(np.isclose(scores, [1.0, 1.0], atol=0.50))
@pytest.mark.tensorflow
def test_compute_model_performance_singletask_classifier():
"""Computes model performance on singletask dataset with one-hot label encoding."""
n_data_points = 20
n_features = 10
X = np.ones(shape=(int(n_data_points / 2), n_features)) * -1
X1 = np.ones(shape=(int(n_data_points / 2), n_features))
X = np.concatenate((X, X1))
class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
y = np.concatenate((class_0, class_1))
dataset = NumpyDataset(X, y)
features = layers.Input(shape=(n_features,))
dense = layers.Dense(2)(features)
output = layers.Softmax()(dense)
keras_model = tf.keras.Model(inputs=features, outputs=[output])
model = dc.models.KerasModel(keras_model,
dc.models.losses.SoftmaxCrossEntropy(),
learning_rate=0.1)
model.fit(dataset, nb_epoch=1000)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification",
n_tasks=1)
scores = model.evaluate_generator(model.default_generator(dataset),
[metric],
per_task_metrics=True)
scores = list(scores[1].values())
assert np.isclose(scores, [1.0], atol=0.05)
@pytest.mark.tensorflow
def test_compute_model_performance_multitask_regressor():
random_seed = 42
n_data_points = 20
n_features = 2
n_tasks = 2
np.random.seed(seed=random_seed)
X = np.random.rand(n_data_points, n_features)
y1 = np.array([0.5 for x in range(n_data_points)])
y2 = np.array([-0.5 for x in range(n_data_points)])
y = np.stack([y1, y2], axis=1)
dataset = NumpyDataset(X, y)
features = layers.Input(shape=(n_features,))
dense = layers.Dense(n_tasks)(features)
keras_model = tf.keras.Model(inputs=features, outputs=[dense])
model = dc.models.KerasModel(keras_model,
dc.models.losses.L2Loss(),
learning_rate=0.1)
model.fit(dataset, nb_epoch=1000)
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error,
np.mean,
mode="regression"),
]
scores = model.evaluate_generator(model.default_generator(dataset),
metric,
per_task_metrics=True)
scores = list(scores[1].values())
assert np.all(np.isclose(scores, [0.0, 0.0], atol=1.0))
<file_sep>"""
Platinum Adsorbtion structure for N and NO along with their formation energies
"""
import numpy as np
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
PLATINUM_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/Platinum_adsorption.tar.gz"
PLATINUM_TASKS = ["Formation Energy"]
PRIMITIVE_CELL = {
"lattice": [[2.818528, 0.0, 0.0], [-1.409264, 2.440917, 0.0],
[0.0, 0.0, 25.508255]],
"coords": [[0.66667, 0.33333, 0.090221], [0.33333, 0.66667, 0.18043936],
[0.0, 0.0, 0.27065772], [0.66667, 0.33333, 0.36087608],
[0.33333, 0.66667, 0.45109444], [0.0, 0.0, 0.49656991]],
"species": ['H', 'H', 'H', 'H', 'H', 'He'],
"site_properties": {
'SiteTypes': ['S1', 'S1', 'S1', 'S1', 'S1', 'A1']
}
}
PRIMITIVE_CELL_INF0 = {
"cutoff": np.around(6.00),
"structure": PRIMITIVE_CELL,
"aos": ['1', '0', '2'],
"pbc": [True, True, False],
"ns": 1,
"na": 1
}
class _PtAdsorptionLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, 'Platinum_adsorption.json')
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=PLATINUM_URL,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(
os.path.join(self.data_dir, 'Platinum_adsorption.tar.gz'),
self.data_dir)
loader = dc.data.JsonLoader(tasks=PLATINUM_TASKS,
feature_field="Structures",
label_field="Formation Energy",
featurizer=self.featurizer,
**self.args)
return loader.create_dataset(dataset_file)
def load_Platinum_Adsorption(
featurizer: Union[dc.feat.Featurizer, str] = dc.feat.SineCoulombMatrix(),
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = [],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""
Load Platinum Adsorption Dataset
The dataset consist of diffrent configurations of Adsorbates (i.e N and NO)
on Platinum surface represented as Lattice and their formation energy. There
are 648 diffrent adsorbate configuration in this datasets represented as Pymatgen
Structure objects.
1. Pymatgen structure object with site_properties with following key value.
- "SiteTypes", mentioning if it is a active site "A1" or spectator
site "S1".
- "oss", diffrent occupational sites. For spectator sites make it -1.
Parameters
----------
featurizer : Featurizer (default LCNNFeaturizer)
the featurizer to use for processing the data. Reccomended to use
the LCNNFeaturiser.
splitter : Splitter (default RandomSplitter)
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data will
be included in a single dataset.
transformers : list of TransformerGenerators or strings. the Transformers to
apply to the data and appropritate featuriser. Does'nt require any
transformation for LCNN_featuriser
reload : bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir : str
a directory to save the raw data in
save_dir : str, optional (default None)
a directory to save the dataset in
References
----------
.. [1] <NAME>, <NAME>. "Lattice Convolutional Neural Network Modeling of Adsorbate
Coverage Effects"J. Phys. Chem. C 2019, 123, 18951−18959
Examples
--------
>>>
>> import deepchem as dc
>> tasks, datasets, transformers = load_Platinum_Adsorption(
>> reload=True,
>> data_dir=data_path,
>> save_dir=data_path,
>> featurizer_kwargs=feat_args)
>> train_dataset, val_dataset, test_dataset = datasets
"""
loader = _PtAdsorptionLoader(featurizer, splitter, transformers,
PLATINUM_TASKS, data_dir, save_dir, **kwargs)
return loader.load_dataset('LCNN_feat', reload)
<file_sep>chembl_tasks = ['CHEMBL1075051', 'CHEMBL1075104', 'CHEMBL1075145', 'CHEMBL1075189'
, 'CHEMBL1075228', 'CHEMBL1075284', 'CHEMBL1075319', 'CHEMBL1163101'
, 'CHEMBL1163116', 'CHEMBL1163125', 'CHEMBL1255149', 'CHEMBL1255150'
, 'CHEMBL1293255', 'CHEMBL1293289', 'CHEMBL1293292', 'CHEMBL1741186'
, 'CHEMBL1741195', 'CHEMBL1744525', 'CHEMBL1764940', 'CHEMBL1781'
, 'CHEMBL1781862', 'CHEMBL1782', 'CHEMBL1784', 'CHEMBL1790', 'CHEMBL1792'
, 'CHEMBL1795101', 'CHEMBL1795126', 'CHEMBL1800', 'CHEMBL1801', 'CHEMBL1804'
, 'CHEMBL1806', 'CHEMBL1811', 'CHEMBL1821', 'CHEMBL1822', 'CHEMBL1824'
, 'CHEMBL1825', 'CHEMBL1827', 'CHEMBL1829', 'CHEMBL1833', 'CHEMBL1836'
, 'CHEMBL1844', 'CHEMBL1849', 'CHEMBL1850', 'CHEMBL1853', 'CHEMBL1855'
, 'CHEMBL1856', 'CHEMBL1860', 'CHEMBL1862', 'CHEMBL1865', 'CHEMBL1867'
, 'CHEMBL1868', 'CHEMBL1871', 'CHEMBL1873', 'CHEMBL1875', 'CHEMBL1878'
, 'CHEMBL1881', 'CHEMBL1889', 'CHEMBL1892', 'CHEMBL1898', 'CHEMBL1899'
, 'CHEMBL1900', 'CHEMBL1901', 'CHEMBL1902', 'CHEMBL1906', 'CHEMBL1907'
, 'CHEMBL1908', 'CHEMBL1913', 'CHEMBL1914', 'CHEMBL1916', 'CHEMBL1917'
, 'CHEMBL1919', 'CHEMBL1921', 'CHEMBL1921666', 'CHEMBL1926', 'CHEMBL1936'
, 'CHEMBL1937', 'CHEMBL1941', 'CHEMBL1942', 'CHEMBL1944', 'CHEMBL1945'
, 'CHEMBL1946', 'CHEMBL1947', 'CHEMBL1949', 'CHEMBL1951', 'CHEMBL1952'
, 'CHEMBL1955', 'CHEMBL1957', 'CHEMBL1966', 'CHEMBL1968', 'CHEMBL1974'
, 'CHEMBL1977', 'CHEMBL1978', 'CHEMBL1980', 'CHEMBL1981', 'CHEMBL1983'
, 'CHEMBL1985', 'CHEMBL1991', 'CHEMBL1994', 'CHEMBL1995', 'CHEMBL1997'
, 'CHEMBL2000', 'CHEMBL2001', 'CHEMBL2002', 'CHEMBL2007', 'CHEMBL2014'
, 'CHEMBL2016', 'CHEMBL202', 'CHEMBL2027', 'CHEMBL2028', 'CHEMBL203'
, 'CHEMBL2034', 'CHEMBL2035', 'CHEMBL2039', 'CHEMBL204', 'CHEMBL2041'
, 'CHEMBL2047', 'CHEMBL2049', 'CHEMBL205', 'CHEMBL2056', 'CHEMBL206'
, 'CHEMBL2061', 'CHEMBL2069', 'CHEMBL208', 'CHEMBL2083', 'CHEMBL2085'
, 'CHEMBL209', 'CHEMBL210', 'CHEMBL2107', 'CHEMBL2108', 'CHEMBL211', 'CHEMBL213'
, 'CHEMBL214', 'CHEMBL2146302', 'CHEMBL2147', 'CHEMBL2148', 'CHEMBL215'
, 'CHEMBL216', 'CHEMBL217', 'CHEMBL2179', 'CHEMBL218', 'CHEMBL2185', 'CHEMBL219'
, 'CHEMBL220', 'CHEMBL2207', 'CHEMBL2208', 'CHEMBL221', 'CHEMBL222', 'CHEMBL223'
, 'CHEMBL224', 'CHEMBL2243', 'CHEMBL225', 'CHEMBL226', 'CHEMBL2265', 'CHEMBL227'
, 'CHEMBL2274', 'CHEMBL2276', 'CHEMBL228', 'CHEMBL2285', 'CHEMBL2288'
, 'CHEMBL229', 'CHEMBL2292', 'CHEMBL230', 'CHEMBL2304402', 'CHEMBL2304404'
, 'CHEMBL231', 'CHEMBL2318', 'CHEMBL232', 'CHEMBL2326', 'CHEMBL2327'
, 'CHEMBL2329', 'CHEMBL233', 'CHEMBL2334', 'CHEMBL2335', 'CHEMBL2337'
, 'CHEMBL234', 'CHEMBL2345', 'CHEMBL235', 'CHEMBL236', 'CHEMBL2361', 'CHEMBL2363'
, 'CHEMBL2366456', 'CHEMBL2366505', 'CHEMBL2366512', 'CHEMBL2366516'
, 'CHEMBL2366517', 'CHEMBL237', 'CHEMBL2373', 'CHEMBL238', 'CHEMBL239'
, 'CHEMBL2391', 'CHEMBL2397', 'CHEMBL240', 'CHEMBL2409', 'CHEMBL241'
, 'CHEMBL2413', 'CHEMBL2414', 'CHEMBL242', 'CHEMBL2425', 'CHEMBL243'
, 'CHEMBL2431', 'CHEMBL2434', 'CHEMBL244', 'CHEMBL2447', 'CHEMBL245', 'CHEMBL246'
, 'CHEMBL2461', 'CHEMBL247', 'CHEMBL2470', 'CHEMBL2474', 'CHEMBL248'
, 'CHEMBL2487', 'CHEMBL2488', 'CHEMBL2489', 'CHEMBL249', 'CHEMBL2492'
, 'CHEMBL2499', 'CHEMBL251', 'CHEMBL252', 'CHEMBL2525', 'CHEMBL2527', 'CHEMBL253'
, 'CHEMBL2534', 'CHEMBL2536', 'CHEMBL254', 'CHEMBL255', 'CHEMBL256', 'CHEMBL2563'
, 'CHEMBL2564', 'CHEMBL2567', 'CHEMBL2568', 'CHEMBL2575', 'CHEMBL258'
, 'CHEMBL2581', 'CHEMBL259', 'CHEMBL2590', 'CHEMBL2599', 'CHEMBL260', 'CHEMBL261'
, 'CHEMBL2611', 'CHEMBL2617', 'CHEMBL262', 'CHEMBL2622', 'CHEMBL2637'
, 'CHEMBL264', 'CHEMBL265', 'CHEMBL2652', 'CHEMBL2664', 'CHEMBL267', 'CHEMBL268'
, 'CHEMBL269', 'CHEMBL2693', 'CHEMBL2695', 'CHEMBL270', 'CHEMBL2716'
, 'CHEMBL2722', 'CHEMBL273', 'CHEMBL2730', 'CHEMBL2736', 'CHEMBL274'
, 'CHEMBL2742', 'CHEMBL2749', 'CHEMBL275', 'CHEMBL2756', 'CHEMBL276'
, 'CHEMBL2778', 'CHEMBL278', 'CHEMBL2781', 'CHEMBL2782', 'CHEMBL2789'
, 'CHEMBL279', 'CHEMBL280', 'CHEMBL2803', 'CHEMBL2808', 'CHEMBL2815'
, 'CHEMBL2820', 'CHEMBL2828', 'CHEMBL283', 'CHEMBL2830', 'CHEMBL2835'
, 'CHEMBL284', 'CHEMBL2842', 'CHEMBL285', 'CHEMBL2851', 'CHEMBL2858', 'CHEMBL286'
, 'CHEMBL2868', 'CHEMBL287', 'CHEMBL2871', 'CHEMBL288', 'CHEMBL2882'
, 'CHEMBL2885', 'CHEMBL2902', 'CHEMBL2903', 'CHEMBL2916', 'CHEMBL2949'
, 'CHEMBL2954', 'CHEMBL2959', 'CHEMBL2971', 'CHEMBL2973', 'CHEMBL2978'
, 'CHEMBL298', 'CHEMBL299', 'CHEMBL2993', 'CHEMBL2996', 'CHEMBL2998', 'CHEMBL301'
, 'CHEMBL3012', 'CHEMBL3018', 'CHEMBL302', 'CHEMBL3024', 'CHEMBL3025'
, 'CHEMBL3037', 'CHEMBL304', 'CHEMBL3045', 'CHEMBL3048', 'CHEMBL3060'
, 'CHEMBL3066', 'CHEMBL3067', 'CHEMBL3072', 'CHEMBL308', 'CHEMBL3081'
, 'CHEMBL3085613', 'CHEMBL309', 'CHEMBL3100', 'CHEMBL3105', 'CHEMBL3106'
, 'CHEMBL311', 'CHEMBL3114', 'CHEMBL3116', 'CHEMBL312', 'CHEMBL313', 'CHEMBL3130'
, 'CHEMBL3138', 'CHEMBL3142', 'CHEMBL3145', 'CHEMBL3155', 'CHEMBL3157'
, 'CHEMBL3166', 'CHEMBL318', 'CHEMBL3180', 'CHEMBL3181', 'CHEMBL319'
, 'CHEMBL3192', 'CHEMBL3199', 'CHEMBL3202', 'CHEMBL321', 'CHEMBL322'
, 'CHEMBL3222', 'CHEMBL3223', 'CHEMBL3227', 'CHEMBL3229', 'CHEMBL3230'
, 'CHEMBL3231', 'CHEMBL324', 'CHEMBL3242', 'CHEMBL3247', 'CHEMBL325'
, 'CHEMBL3254', 'CHEMBL326', 'CHEMBL3267', 'CHEMBL3286', 'CHEMBL330'
, 'CHEMBL3305', 'CHEMBL331', 'CHEMBL3310', 'CHEMBL3314', 'CHEMBL3318'
, 'CHEMBL332', 'CHEMBL333', 'CHEMBL3332', 'CHEMBL335', 'CHEMBL3351', 'CHEMBL3358'
, 'CHEMBL3360', 'CHEMBL3361', 'CHEMBL3371', 'CHEMBL3374', 'CHEMBL338'
, 'CHEMBL339', 'CHEMBL3399910', 'CHEMBL340', 'CHEMBL3403', 'CHEMBL3419'
, 'CHEMBL3426', 'CHEMBL3437', 'CHEMBL3438', 'CHEMBL344', 'CHEMBL3464'
, 'CHEMBL3468', 'CHEMBL3471', 'CHEMBL3473', 'CHEMBL3474', 'CHEMBL3476'
, 'CHEMBL3486', 'CHEMBL3501', 'CHEMBL3510', 'CHEMBL3513', 'CHEMBL3522'
, 'CHEMBL3524', 'CHEMBL3535', 'CHEMBL3553', 'CHEMBL3559', 'CHEMBL3563'
, 'CHEMBL3568', 'CHEMBL3571', 'CHEMBL3572', 'CHEMBL3582', 'CHEMBL3587'
, 'CHEMBL3589', 'CHEMBL3590', 'CHEMBL3594', 'CHEMBL3602', 'CHEMBL3614'
, 'CHEMBL3623', 'CHEMBL3629', 'CHEMBL3638338', 'CHEMBL3649', 'CHEMBL3650'
, 'CHEMBL3687', 'CHEMBL3691', 'CHEMBL3699', 'CHEMBL3706', 'CHEMBL3710'
, 'CHEMBL3717', 'CHEMBL3729', 'CHEMBL3746', 'CHEMBL3759', 'CHEMBL3764'
, 'CHEMBL3766', 'CHEMBL3768', 'CHEMBL3769', 'CHEMBL3772', 'CHEMBL3775'
, 'CHEMBL3776', 'CHEMBL3778', 'CHEMBL3785', 'CHEMBL3788', 'CHEMBL3795'
, 'CHEMBL3798', 'CHEMBL3802', 'CHEMBL3807', 'CHEMBL3815', 'CHEMBL3816'
, 'CHEMBL3820', 'CHEMBL3833', 'CHEMBL3836', 'CHEMBL3837', 'CHEMBL3864'
, 'CHEMBL3868', 'CHEMBL3869', 'CHEMBL3880', 'CHEMBL3884', 'CHEMBL3891'
, 'CHEMBL3892', 'CHEMBL3910', 'CHEMBL3912', 'CHEMBL3920', 'CHEMBL3922'
, 'CHEMBL3942', 'CHEMBL3943', 'CHEMBL3948', 'CHEMBL3952', 'CHEMBL3959'
, 'CHEMBL3969', 'CHEMBL3974', 'CHEMBL3975', 'CHEMBL3976', 'CHEMBL3979'
, 'CHEMBL3983', 'CHEMBL3991', 'CHEMBL3996', 'CHEMBL4005', 'CHEMBL4015'
, 'CHEMBL4016', 'CHEMBL4018', 'CHEMBL4026', 'CHEMBL4029', 'CHEMBL4040'
, 'CHEMBL4051', 'CHEMBL4068', 'CHEMBL4072', 'CHEMBL4073', 'CHEMBL4074'
, 'CHEMBL4077', 'CHEMBL4078', 'CHEMBL4080', 'CHEMBL4093', 'CHEMBL4102'
, 'CHEMBL4111', 'CHEMBL4123', 'CHEMBL4124', 'CHEMBL4128', 'CHEMBL4132'
, 'CHEMBL4140', 'CHEMBL4142', 'CHEMBL4145', 'CHEMBL4150', 'CHEMBL4153'
, 'CHEMBL4161', 'CHEMBL4179', 'CHEMBL4188', 'CHEMBL4191', 'CHEMBL4198'
, 'CHEMBL4203', 'CHEMBL4204', 'CHEMBL4224', 'CHEMBL4234', 'CHEMBL4235'
, 'CHEMBL4247', 'CHEMBL4261', 'CHEMBL4282', 'CHEMBL4296', 'CHEMBL4302'
, 'CHEMBL4303', 'CHEMBL4306', 'CHEMBL4308', 'CHEMBL4315', 'CHEMBL4321'
, 'CHEMBL4333', 'CHEMBL4336', 'CHEMBL4338', 'CHEMBL4354', 'CHEMBL4358'
, 'CHEMBL4361', 'CHEMBL4372', 'CHEMBL4393', 'CHEMBL4394', 'CHEMBL4409'
, 'CHEMBL4414', 'CHEMBL4422', 'CHEMBL4427', 'CHEMBL4429', 'CHEMBL4430'
, 'CHEMBL4439', 'CHEMBL4441', 'CHEMBL4462', 'CHEMBL4465', 'CHEMBL4471'
, 'CHEMBL4477', 'CHEMBL4478', 'CHEMBL4481', 'CHEMBL4482', 'CHEMBL4501'
, 'CHEMBL4506', 'CHEMBL4508', 'CHEMBL4523', 'CHEMBL4550', 'CHEMBL4552'
, 'CHEMBL4561', 'CHEMBL4581', 'CHEMBL4586', 'CHEMBL4588', 'CHEMBL4599'
, 'CHEMBL4600', 'CHEMBL4608', 'CHEMBL4616', 'CHEMBL4617', 'CHEMBL4618'
, 'CHEMBL4625', 'CHEMBL4630', 'CHEMBL4633', 'CHEMBL4641', 'CHEMBL4644'
, 'CHEMBL4649', 'CHEMBL4652', 'CHEMBL4653', 'CHEMBL4657', 'CHEMBL4660'
, 'CHEMBL4662', 'CHEMBL4681', 'CHEMBL4683', 'CHEMBL4685', 'CHEMBL4687'
, 'CHEMBL4696', 'CHEMBL4698', 'CHEMBL4699', 'CHEMBL4722', 'CHEMBL4761'
, 'CHEMBL4768', 'CHEMBL4777', 'CHEMBL4779', 'CHEMBL4780', 'CHEMBL4789'
, 'CHEMBL4792', 'CHEMBL4793', 'CHEMBL4794', 'CHEMBL4801', 'CHEMBL4802'
, 'CHEMBL4803', 'CHEMBL4804', 'CHEMBL4805', 'CHEMBL4816', 'CHEMBL4822'
, 'CHEMBL4828', 'CHEMBL4829', 'CHEMBL4835', 'CHEMBL4860', 'CHEMBL4893'
, 'CHEMBL4895', 'CHEMBL4899', 'CHEMBL4908', 'CHEMBL4919', 'CHEMBL4975'
, 'CHEMBL4979', 'CHEMBL4980', 'CHEMBL5011', 'CHEMBL5017', 'CHEMBL5023'
, 'CHEMBL5024', 'CHEMBL5036', 'CHEMBL5067', 'CHEMBL5071', 'CHEMBL5076'
, 'CHEMBL5077', 'CHEMBL5080', 'CHEMBL5102', 'CHEMBL5103', 'CHEMBL5112'
, 'CHEMBL5113', 'CHEMBL5122', 'CHEMBL5131', 'CHEMBL5136', 'CHEMBL5137'
, 'CHEMBL5141', 'CHEMBL5145', 'CHEMBL5147', 'CHEMBL5160', 'CHEMBL5192'
, 'CHEMBL5203', 'CHEMBL5205', 'CHEMBL5247', 'CHEMBL5251', 'CHEMBL5282'
, 'CHEMBL5314', 'CHEMBL5328', 'CHEMBL5331', 'CHEMBL5353', 'CHEMBL5373'
, 'CHEMBL5375', 'CHEMBL5387', 'CHEMBL5393', 'CHEMBL5407', 'CHEMBL5409'
, 'CHEMBL5413', 'CHEMBL5414', 'CHEMBL5424', 'CHEMBL5441', 'CHEMBL5443'
, 'CHEMBL5445', 'CHEMBL5451', 'CHEMBL5457', 'CHEMBL5462', 'CHEMBL5471'
, 'CHEMBL5485', 'CHEMBL5491', 'CHEMBL5508', 'CHEMBL5522', 'CHEMBL5543'
, 'CHEMBL5555', 'CHEMBL5570', 'CHEMBL5582', 'CHEMBL5631', 'CHEMBL5645'
, 'CHEMBL5652', 'CHEMBL5658', 'CHEMBL5669', 'CHEMBL5697', 'CHEMBL5704'
, 'CHEMBL5736', 'CHEMBL5747', 'CHEMBL5763', 'CHEMBL5769', 'CHEMBL5800'
, 'CHEMBL5847', 'CHEMBL5879', 'CHEMBL5932', 'CHEMBL5966', 'CHEMBL5971'
, 'CHEMBL6007', 'CHEMBL6009', 'CHEMBL6080', 'CHEMBL6084', 'CHEMBL6136'
, 'CHEMBL6137', 'CHEMBL6140', 'CHEMBL6141', 'CHEMBL6145', 'CHEMBL6154'
, 'CHEMBL6164', 'CHEMBL6166', 'CHEMBL6184']
<file_sep># Dataset overview
The PCBA group contains data from experiments in the PubChem BioAssay database (Wang et al., 2012).
Ref: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
arXiv preprint arXiv:1502.02072<file_sep># This script creates the new deepchem enviroment
# This script works on only Bash and Zsh
CMDNAME=`basename ${BASH_SOURCE:-$0}`
if [ $# -ne 3 ]; then
echo "Please set two arguments."
echo "Usage) source $CMDNAME python_version cpu_or_gpu" 1>&2
echo "Example) source $CMDNAME 3.8 gpu tensorflow" 1>&2
return 1
fi
# This command is nearly equal to `conda init` command
# Need to use `conda activate` command
eval "$(conda shell.bash hook)"
# Create deepchem environment
conda config --set always_yes yes
conda create --name deepchem python=$1
conda install -c conda-forge conda-merge
if [ "$2" = "gpu" ];
then
# We expect the CUDA vesion is 10.1.
if [ "$3" = "tensorflow" ];
then
conda-merge $PWD/requirements/tensorflow/env_tensorflow.yml $PWD/requirements/env_test.yml > $PWD/env.yml
echo "Installing Tensorflow environment with GPU"
elif [ "$3" = "torch" ];
then
conda-merge $PWD/requirements/pytorch/env_pytorch.yml $PWD/requirements/pytorch/env_pytorch.gpu.yml $PWD/requirements/env_test.yml > $PWD/env.yml
echo "Installing pytorch environment with GPU"
elif [ "$3" = "jax" ];
then
conda-merge $PWD/requirements/jax/env_jax.yml $PWD/requirements/jax/env_jax.gpu.yml $PWD/requirements/env_test.yml > $PWD/env.yml
echo "Installing jax environment with GPU"
else
conda-merge $PWD/requirements/env_common.yml $PWD/requirements/env_test.yml > $PWD/env.yml
echo "Installing common environment with GPU"
fi
else
# We expect the CUDA vesion is 10.1.
if [ "$3" = "tensorflow" ];
then
conda-merge $PWD/requirements/tensorflow/env_tensorflow.yml $PWD/requirements/env_test.yml > $PWD/env.yml
echo "Installing Tensorflow environment with CPU"
elif [ "$3" = "torch" ];
then
conda-merge $PWD/requirements/pytorch/env_pytorch.yml $PWD/requirements/pytorch/env_pytorch.cpu.yml $PWD/requirements/env_test.yml > $PWD/env.yml
echo "Installing pytorch environment with CPU"
elif [ "$3" = "jax" ];
then
conda-merge $PWD/requirements/jax/env_jax.yml $PWD/requirements/jax/env_jax.cpu.yml $PWD/requirements/env_test.yml > $PWD/env.yml
echo "Installing jax environment with CPU"
else
conda-merge $PWD/requirements/env_common.yml $PWD/requirements/env_test.yml > $PWD/env.yml
echo "Installing common environment with CPU"
fi
fi
# Install all dependencies
conda env update --file $PWD/env.yml
<file_sep>import json
import tempfile
from deepchem.feat.vocabulary_builders.hf_vocab import HuggingFaceVocabularyBuilder
def testHuggingFaceVocabularyBuilder():
from tokenizers import models, trainers
from tokenizers.pre_tokenizers import Whitespace
corpus = """hello world"""
corpus_file = tempfile.NamedTemporaryFile()
with open(corpus_file.name, 'w') as fp:
fp.write(corpus)
model = models.BPE(unk_token="[UNK]")
special_tokens = ["[UNK]"]
trainer = trainers.BpeTrainer(vocab_size=25000,
special_tokens=special_tokens)
# Build vocabulary by wrapping in huggingface vocabulary builder
vb = HuggingFaceVocabularyBuilder(model=model, trainer=trainer)
vb.tokenizer.pre_tokenizer = Whitespace()
vb.build([corpus_file.name])
vocab_file = tempfile.NamedTemporaryFile()
vb.save(vocab_file.name)
# Load vocabulary and do a basic sanity check on the vocabulary
with open(vocab_file.name, 'r') as f:
data = json.loads(f.read())
assert len(data['added_tokens']) == 1 # [UNK]
assert list(data['model']['vocab'].keys()) == [
'[UNK]', 'd', 'e', 'h', 'l', 'o', 'r', 'w', 'el', 'hel', 'ld', 'lo',
'or', 'wor', 'hello', 'world'
]
assert data['model']['merges'] == [
'e l', 'h el', 'l d', 'l o', 'o r', 'w or', 'hel lo', 'wor ld'
]
<file_sep>import torch
import numpy as np
from deepchem.models.torch_models.torch_model import TorchModel
from deepchem.models.torch_models.layers import CNNModule
from deepchem.models.losses import L2Loss
from deepchem.metrics import to_one_hot
from typing import List, Union, Callable, Optional
from deepchem.utils.typing import OneOrMany, ActivationFn, LossFn
class CNN(TorchModel):
"""A 1, 2, or 3 dimensional convolutional network for either regression or classification.
The network consists of the following sequence of layers:
- A configurable number of convolutional layers
- A global pooling layer (either max pool or average pool)
- A final fully connected layer to compute the output
It optionally can compose the model from pre-activation residual blocks, as
described in https://arxiv.org/abs/1603.05027, rather than a simple stack of
convolution layers. This often leads to easier training, especially when using a
large number of layers. Note that residual blocks can only be used when
successive layers have the same output shape. Wherever the output shape changes, a
simple convolution layer will be used even if residual=True.
Examples
--------
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> np.random.seed(123)
>>> X = np.random.rand(n_samples, 10, n_features)
>>> y = np.random.randint(2, size=(n_samples, n_tasks)).astype(np.float32)
>>> dataset: dc.data.Dataset = dc.data.NumpyDataset(X, y)
>>> regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
>>> model = CNN(n_tasks, n_features, dims=1, kernel_size=3, mode='regression')
>>> avg_loss = model.fit(dataset, nb_epoch=10)
"""
def __init__(self,
n_tasks: int,
n_features: int,
dims: int,
layer_filters: List[int] = [100],
kernel_size: OneOrMany[int] = 5,
strides: OneOrMany[int] = 1,
weight_init_stddevs: OneOrMany[float] = 0.02,
bias_init_consts: OneOrMany[float] = 1.0,
weight_decay_penalty: float = 0.0,
weight_decay_penalty_type: str = 'l2',
dropouts: OneOrMany[float] = 0.5,
activation_fns: OneOrMany[ActivationFn] = 'relu',
pool_type: str = 'max',
mode: str = 'classification',
n_classes: int = 2,
uncertainty: bool = False,
residual: bool = False,
padding: Union[int, str] = 'valid',
**kwargs) -> None:
"""TorchModel wrapper for CNN
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
dims: int
the number of dimensions to apply convolutions over (1, 2, or 3)
layer_filters: list
the number of output filters for each convolutional layer in the network.
The length of this list determines the number of layers.
kernel_size: int, tuple, or list
a list giving the shape of the convolutional kernel for each layer. Each
element may be either an int (use the same kernel width for every dimension)
or a tuple (the kernel width along each dimension). Alternatively this may
be a single int or tuple instead of a list, in which case the same kernel
shape is used for every layer.
strides: int, tuple, or list
a list giving the stride between applications of the kernel for each layer.
Each element may be either an int (use the same stride for every dimension)
or a tuple (the stride along each dimension). Alternatively this may be a
single int or tuple instead of a list, in which case the same stride is
used for every layer.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization
of each layer. The length of this list should equal len(layer_filters)+1,
where the final element corresponds to the dense layer. Alternatively this
may be a single value instead of a list, in which case the same value is used
for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The length of this
list should equal len(layer_filters)+1, where the final element corresponds
to the dense layer. Alternatively this may be a single value instead of a
list, in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probability to use for each layer. The length of this list should equal len(layer_filters).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer
activation_fns: str or list
the torch activation function to apply to each layer. The length of this list should equal
len(layer_filters). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer, 'relu' by default
pool_type: str
the type of pooling layer to use, either 'max' or 'average'
mode: str
Either 'classification' or 'regression'
n_classes: int
the number of classes to predict (only used in classification mode)
uncertainty: bool
if True, include extra outputs and loss terms to enable the uncertainty
in outputs to be predicted
residual: bool
if True, the model will be composed of pre-activation residual blocks instead
of a simple stack of convolutional layers.
padding: str, int or tuple
the padding to use for convolutional layers, either 'valid' or 'same'
"""
self.mode = mode
self.n_classes = n_classes
self.n_tasks = n_tasks
self.model = CNNModule(n_tasks=n_tasks,
n_features=n_features,
dims=dims,
layer_filters=layer_filters,
kernel_size=kernel_size,
strides=strides,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
activation_fns=activation_fns,
pool_type=pool_type,
mode=mode,
n_classes=n_classes,
uncertainty=uncertainty,
residual=residual,
padding=padding)
regularization_loss: Optional[Callable]
if weight_decay_penalty != 0:
weights = [layer.weight for layer in self.model.layers]
if weight_decay_penalty_type == 'l1':
regularization_loss = lambda: weight_decay_penalty * torch.sum( # noqa: E731
torch.stack([torch.abs(w).sum() for w in weights]))
else:
regularization_loss = lambda: weight_decay_penalty * torch.sum( # noqa: E731
torch.stack([torch.square(w).sum() for w in weights]))
else:
regularization_loss = None
loss: Union[L2Loss, LossFn]
if uncertainty:
def loss(outputs, labels, weights):
diff = labels[0] - outputs[0]
return torch.mean(diff**2 / torch.exp(outputs[1]) + outputs[1])
else:
loss = L2Loss()
if self.mode == 'classification':
output_types = ['prediction', 'loss']
else:
if uncertainty:
output_types = ['prediction', 'variance', 'loss', 'loss']
else:
output_types = ["prediction"]
super(CNN, self).__init__(self.model,
loss=loss,
output_types=output_types,
regularization_loss=regularization_loss,
**kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if self.mode == 'classification':
if y_b is not None:
y_b = to_one_hot(y_b.flatten(), self.n_classes)\
.reshape(-1, self.n_tasks, self.n_classes)
dropout = np.array(0.) if mode == 'predict' else np.array(1.)
yield ([X_b, dropout], [y_b], [w_b])
<file_sep>import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import GRU, Linear, ReLU, Sequential
from typing import Iterable, List, Tuple, Optional, Dict, Literal
from deepchem.metrics import to_one_hot
import deepchem as dc
from deepchem.models.losses import SoftmaxCrossEntropy
from deepchem.feat.graph_data import BatchGraphData
from deepchem.models.losses import (
GlobalMutualInformationLoss,
LocalMutualInformationLoss,
)
from deepchem.models.torch_models.layers import MultilayerPerceptron
from deepchem.models.torch_models.modular import ModularTorchModel
try:
from torch_geometric.nn import GINConv, NNConv, global_add_pool
from torch_geometric.nn.aggr import Set2Set
except ImportError:
pass
class GINEncoder(torch.nn.Module):
"""
Graph Information Network (GIN) encoder. This is a graph convolutional network that produces encoded representations for molecular graph inputs.
Parameters
----------
num_features: int
The number of node features
embedding_dim: int
The dimension of the output embedding
num_gc_layers: int, optional (default 5)
The number of graph convolutional layers to use
Example
-------
>>> import numpy as np
>>> from deepchem.models.torch_models.infograph import GINEncoder
>>> from deepchem.feat.graph_data import GraphData
>>> encoder = GINEncoder(num_features=25, embedding_dim=32)
>>> node_features = np.random.randn(10, 25)
>>> edge_index = np.array([[0, 1, 2], [1, 2, 3]])
>>> edge_features = np.random.randn(3, 10)
>>> graph_index = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
>>> data = GraphData(node_features=node_features, edge_index=edge_index, edge_features=edge_features, graph_index=graph_index).numpy_to_torch()
>>> embedding, intermediate_embeddings = encoder(data)
>>> print(embedding.shape)
torch.Size([1, 30])
References
----------
.. [1] <NAME>., <NAME>., <NAME>. & <NAME>. How Powerful are Graph Neural Networks? arXiv:1810.00826 [cs, stat] (2019).
"""
def __init__(self,
num_features: int,
embedding_dim: int,
num_gc_layers: int = 5):
dim = int(
embedding_dim / num_gc_layers
) # the output dimension of this encoder is modified by the number of GC layers, so this is necessary to ensure that the output dimension is consistent with the InfoGraphEncoder
super().__init__()
self.num_gc_layers = num_gc_layers
self.convs = torch.nn.ModuleList()
self.bns = torch.nn.ModuleList()
for i in range(num_gc_layers):
if i == 0:
nn = Sequential(Linear(num_features, dim), ReLU(),
Linear(dim, dim))
else:
nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
conv = GINConv(nn)
bn = torch.nn.BatchNorm1d(dim)
self.convs.append(conv)
self.bns.append(bn)
def forward(self, data):
"""
Encodes the input graph data.
Parameters
----------
data : BatchGraphData
The batched input graph data.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
A tuple containing the encoded representation and intermediate embeddings.
"""
xs = []
x = data.node_features
for i in range(self.num_gc_layers):
x = F.relu(self.convs[i](x, data.edge_index))
x = self.bns[i](x)
xs.append(x)
xpool = [global_add_pool(x, data.graph_index) for x in xs]
x = torch.cat(xpool, 1)
xs = torch.cat(xs, 1)
return x, xs
class InfoGraphEncoder(torch.nn.Module):
"""
The encoder for the InfoGraph model. It is a message passing graph convolutional
network that produces encoded representations for molecular graph inputs.
Parameters
----------
num_features: int
Number of node features for each input
edge_features: int
Number of edge features for each input
embedding_dim: int
Dimension of the embedding
Example
-------
>>> import numpy as np
>>> from deepchem.models.torch_models.infograph import InfoGraphEncoder
>>> from deepchem.feat.graph_data import GraphData
>>> encoder = InfoGraphEncoder(num_features=25, edge_features=10, embedding_dim=32)
>>> node_features = np.random.randn(10, 25)
>>> edge_index = np.array([[0, 1, 2], [1, 2, 3]])
>>> edge_features = np.random.randn(3, 10)
>>> graph_index = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
>>> data = GraphData(node_features=node_features, edge_index=edge_index, edge_features=edge_features, graph_index=graph_index).numpy_to_torch()
>>> embedding, feature_map = encoder(data)
>>> print(embedding.shape)
torch.Size([1, 64])
"""
def __init__(self, num_features, edge_features, embedding_dim):
super().__init__()
self.lin0 = torch.nn.Linear(num_features, embedding_dim)
nn = Sequential(Linear(edge_features, 128), ReLU(),
Linear(128, embedding_dim * embedding_dim))
self.conv = NNConv(embedding_dim,
embedding_dim,
nn,
aggr='mean',
root_weight=False)
self.gru = GRU(embedding_dim, embedding_dim)
self.set2set = Set2Set(embedding_dim, processing_steps=3)
def forward(self, data):
"""
Encode input graphs into an embedding and feature map.
Parameters
----------
data: Union[BatchGraphData, GraphData]
Contains information about graphs.
Returns
-------
torch.Tensor
Encoded tensor of input data.
torch.Tensor
Feature map tensor of input data.
"""
out = F.relu(self.lin0(data.node_features))
h = out.unsqueeze(0)
for i in range(3):
m = F.relu(self.conv(out, data.edge_index, data.edge_features))
out, h = self.gru(m.unsqueeze(0), h)
out = out.squeeze(0)
feat_map = out
# set2set doubles the dimensionality of the embedding
out = self.set2set(out, data.graph_index)
return out, feat_map
class InfoGraph(nn.Module):
"""
The nn.Module for InfoGraph. This class defines the forward pass of InfoGraph.
References
----------
1. <NAME>., <NAME>., <NAME>. & <NAME>. InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Information Maximization. Preprint at http://arxiv.org/abs/1908.01000 (2020).
Example
-------
>>> from deepchem.models.torch_models.infograph import InfoGraphModel
>>> from deepchem.feat.molecule_featurizers import MolGraphConvFeaturizer
>>> from deepchem.feat.graph_data import BatchGraphData
>>> num_feat = 30
>>> num_edge = 11
>>> infographmodular = InfoGraphModel(num_feat, num_edge, 64)
>>> smiles = ['C1=CC=CC=C1', 'C1=CC=CC=C1C2=CC=CC=C2']
>>> featurizer = MolGraphConvFeaturizer(use_edges=True)
>>> graphs = BatchGraphData(featurizer.featurize(smiles))
>>> graphs = graphs.numpy_to_torch(infographmodular.device)
>>> model = infographmodular.model
>>> global_enc, local_enc = model(graphs)
"""
def __init__(self, encoder, local_d, global_d, prior_d, init_emb=False):
super().__init__()
self.encoder = encoder
self.local_d = local_d
self.global_d = global_d
self.prior_d = prior_d
if init_emb:
self.init_emb()
def init_emb(self):
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, data):
y, M = self.encoder(data)
g_enc = self.global_d(y)
l_enc = self.local_d(M)
return g_enc, l_enc
class InfoGraphFinetune(nn.Module):
"""The finetuning module for InfoGraph model
Parameters
----------
encoder: nn.Module
An encoder to encode input graph data
fc1: nn.Module
A fully connected layer
fc2: nn.Module
A fully connected layer
Example
-------
>>> from deepchem.models.torch_models.infograph import InfoGraphModel
>>> from deepchem.feat.molecule_featurizers import MolGraphConvFeaturizer
>>> from deepchem.feat.graph_data import BatchGraphData
>>> num_feat = 30
>>> num_edge = 11
>>> infographmodular = InfoGraphModel(num_feat, num_edge, num_gc_layers=1, task='regression', n_tasks=1)
>>> smiles = ['C1=CC=CC=C1', 'C1=CC=CC=C1C2=CC=CC=C2']
>>> featurizer = MolGraphConvFeaturizer(use_edges=True)
>>> graphs = BatchGraphData(featurizer.featurize(smiles))
>>> graphs = graphs.numpy_to_torch(infographmodular.device)
>>> model = infographmodular.model
>>> predictions = model(graphs)
Reference
---------
.. <NAME>, et. al, "InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Information Maximization".
"""
def __init__(self, encoder, fc1, fc2, init_emb=False):
super().__init__()
self.encoder = encoder
self.fc1 = fc1
self.fc2 = fc2
if init_emb:
self.init_emb()
def init_emb(self):
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, data):
y, _ = self.encoder(data)
return self.fc2(F.relu(self.fc1(y)))
class InfoGraphModel(ModularTorchModel):
"""InfoGraphMode
InfoGraphModel is a model which learn graph-level representation via unsupervised learning. To this end,
the model aims to maximize the mutual information between the representations of entire graphs and the representations of substructures of different granularity (eg. nodes, edges, triangles)
The unsupervised training of InfoGraph involves two encoders: one that encodes the entire graph and another that encodes substructures of different sizes. The mutual information between the two encoder outputs is maximized using a contrastive loss function.
The model randomly samples pairs of graphs and substructures, and then maximizes their mutual information by minimizing their distance in a learned embedding space.
This can be used for downstream tasks such as graph classification and molecular property prediction.It is implemented as a ModularTorchModel in order to facilitate transfer learning.
References
----------
1. <NAME>., <NAME>., <NAME>. & <NAME>. InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Information Maximization. Preprint at http://arxiv.org/abs/1908.01000 (2020).
Parameters
----------
num_features: int
Number of node features for each input
edge_features: int
Number of edge features for each input
embedding_dim: int
Dimension of the embedding
num_gc_layers: int
Number of graph convolutional layers
prior: bool
Whether to use a prior expectation in the loss term
gamma: float
Weight of the prior expectation in the loss term
measure: str
The divergence measure to use for the unsupervised loss. Options are 'GAN', 'JSD',
'KL', 'RKL', 'X2', 'DV', 'H2', or 'W1'.
average_loss: bool
Whether to average the loss over the batch
Example
-------
>>> from deepchem.models.torch_models.infograph import InfoGraphModel
>>> from deepchem.feat import MolGraphConvFeaturizer
>>> from deepchem.data import NumpyDataset
>>> import torch
>>> import tempfile
>>> tempdir = tempfile.TemporaryDirectory()
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = MolGraphConvFeaturizer(use_edges=True)
>>> X = featurizer.featurize(smiles)
>>> y = torch.randint(0, 2, size=(2, 1)).float()
>>> w = torch.ones(size=(2, 1)).float()
>>> dataset = NumpyDataset(X, y, w)
>>> num_feat, edge_dim = 30, 11 # num feat and edge dim by molgraph conv featurizer
>>> pretrain_model = InfoGraphModel(num_feat, edge_dim, num_gc_layers=1, task='pretraining', model_dir=tempdir.name)
>>> pretraining_loss = pretrain_model.fit(dataset, nb_epoch=1)
>>> pretrain_model.save_checkpoint()
>>> finetune_model = InfoGraphModel(num_feat, edge_dim, num_gc_layers=1, task='regression', n_tasks=1, model_dir=tempdir.name)
>>> finetune_model.restore(components=['encoder'])
>>> finetuning_loss = finetune_model.fit(dataset)
"""
def __init__(self,
num_features,
embedding_dim,
num_gc_layers=5,
prior=True,
gamma=.1,
measure='JSD',
average_loss=True,
task='pretraining',
n_tasks: Optional[int] = None,
**kwargs):
if task == 'regression':
assert n_tasks, 'Number of prediction tasks required for building regression model'
self.num_features = num_features
self.embedding_dim = embedding_dim * num_gc_layers
self.num_gc_layers = num_gc_layers
self.gamma = gamma
self.prior = prior
self.measure = measure
self.average_loss = average_loss
self.localloss = LocalMutualInformationLoss()._create_pytorch_loss(
measure, average_loss)
self.task = task
self.n_tasks = n_tasks
self.components = self.build_components()
self.model = self.build_model()
super().__init__(self.model, self.components, **kwargs)
def build_components(self) -> dict:
"""
Build the components of the model. InfoGraph is an unsupervised molecular graph representation learning model. It consists of an encoder, a local discriminator, a global discriminator, and a prior discriminator.
The unsupervised loss is calculated by the mutual information in embedding representations at all layers.
Components list, type and description:
--------------------------------------
encoder: GINEncoder, graph convolutional encoder
local_d: MultilayerPerceptron, local discriminator
global_d: MultilayerPerceptron, global discriminator
prior_d: MultilayerPerceptron, prior discriminator
fc1: MultilayerPerceptron, dense layer used during finetuning
fc2: MultilayerPerceptron, dense layer used during finetuning
"""
components: Dict[str, nn.Module] = {}
if self.task == 'pretraining':
components['encoder'] = GINEncoder(self.num_features,
self.embedding_dim,
self.num_gc_layers)
components['local_d'] = MultilayerPerceptron(self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,),
skip_connection=True)
components['global_d'] = MultilayerPerceptron(self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,),
skip_connection=True)
components['prior_d'] = MultilayerPerceptron(
self.embedding_dim,
1, (self.embedding_dim,),
activation_fn='sigmoid')
elif self.task == 'regression':
components['encoder'] = GINEncoder(self.num_features,
self.embedding_dim,
self.num_gc_layers)
components['fc1'] = torch.nn.Linear(self.embedding_dim,
self.embedding_dim)
# n_tasks is Optional[int] while argument 2 of nn.Linear has to be of type int
components['fc2'] = torch.nn.Linear(self.embedding_dim,
self.n_tasks) # type: ignore
return components
def build_model(self) -> nn.Module:
if self.task == 'pretraining':
model = InfoGraph(**self.components)
elif self.task == 'regression':
model = InfoGraphFinetune(**self.components) # type: ignore
return model
def loss_func(self, inputs, labels, weights):
if self.task == 'pretraining':
y, M = self.components['encoder'](inputs)
g_enc = self.components['global_d'](y)
l_enc = self.components['local_d'](M)
local_global_loss = self.localloss(l_enc, g_enc, inputs.graph_index)
if self.prior:
prior = torch.rand_like(y)
term_a = torch.log(self.components['prior_d'](prior)).mean()
term_b = torch.log(1.0 - self.components['prior_d'](y)).mean()
prior = -(term_a + term_b) * self.gamma
else:
prior = 0
return local_global_loss + prior
elif self.task == 'regression':
loss_fn = nn.MSELoss()
y = self.model(inputs)
return loss_fn(y, labels)
def _prepare_batch(self, batch):
"""
Prepares the batch for the model by converting the GraphData numpy arrays to torch tensors and moving them to the device.
"""
inputs, labels, weights = batch
inputs = BatchGraphData(inputs[0]).numpy_to_torch(self.device)
_, labels, weights = super()._prepare_batch(([], labels, weights))
if (len(labels) != 0) and (len(weights) != 0):
labels = labels[0]
weights = weights[0]
return inputs, labels, weights
def restore( # type: ignore
self,
components: Optional[List[str]] = None,
checkpoint: Optional[str] = None,
model_dir: Optional[str] = None,
map_location: Optional[torch.device] = None) -> None:
if checkpoint is None:
checkpoints = sorted(self.get_checkpoints(model_dir))
if len(checkpoints) == 0:
raise ValueError('No checkpoint found')
checkpoint = checkpoints[0]
data = torch.load(checkpoint, map_location=map_location)
for name, state_dict in data.items():
if name != 'model' and name in self.components.keys():
self.components[name].load_state_dict(state_dict)
self.build_model()
class InfoGraphStar(torch.nn.Module):
"""
The nn.Module for InfoGraphStar. This class defines the forward pass of InfoGraphStar.
Parameters
----------
encoder: torch.nn.Module
The encoder for InfoGraphStar.
unsup_encoder: torch.nn.Module
The unsupervised encoder for InfoGraph, of identical architecture to encoder.
ff1: torch.nn.Module
The first feedforward layer for InfoGraphStar.
ff2: torch.nn.Module
The second feedforward layer for InfoGraphStar.
fc1: torch.nn.Module
The first fully connected layer for InfoGraphStar.
fc2: torch.nn.Module
The second fully connected layer for InfoGraphStar.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME>, “InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Maximization.” arXiv, Jan. 17, 2020. http://arxiv.org/abs/1908.01000
Example
-------
>>> import torch
>>> import numpy as np
>>> from deepchem.models.torch_models.infograph import InfoGraphStarModel
>>> from deepchem.feat.molecule_featurizers import MolGraphConvFeaturizer
>>> from deepchem.feat.graph_data import BatchGraphData
>>> num_feat = 30
>>> num_edge = 11
>>> infographmodular = InfoGraphStarModel(num_feat,num_edge,64)
>>> smiles = ['C1=CC=CC=C1', 'C1=CC=CC=C1C2=CC=CC=C2']
>>> featurizer = MolGraphConvFeaturizer(use_edges=True)
>>> graphs = BatchGraphData(featurizer.featurize(smiles)).numpy_to_torch(infographmodular.device)
>>> model = infographmodular.model
>>> output = model(graphs).cpu().detach().numpy()
"""
def __init__(self,
encoder,
unsup_encoder,
ff1,
ff2,
fc1,
fc2,
local_d,
global_d,
mode='regression',
num_tasks=1,
num_classes=2,
init_emb=False):
super().__init__()
self.encoder = encoder
self.unsup_encoder = unsup_encoder
self.ff1 = ff1
self.ff2 = ff2
self.fc1 = fc1
self.fc2 = fc2
self.local_d = local_d
self.global_d = global_d
self.mode = mode
self.num_tasks = num_tasks
self.num_classes = num_classes
if init_emb:
self.init_emb()
def init_emb(self):
"""
Initializes the parameters of the model by setting their values from a uniform distribution and filling the bias with 0.
"""
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, data):
"""
Forward pass for InfoGraphStar.
Parameters
----------
data: Union[GraphData, BatchGraphData]
The input data, either a single graph or a batch of graphs.
"""
out, M = self.encoder(data)
out = F.relu(self.fc1(out))
pred = self.fc2(out)
if self.mode == 'classification':
pred = torch.reshape(pred, (-1, self.num_tasks, self.num_classes))
return pred
class InfoGraphStarModel(ModularTorchModel):
"""
InfographStar is a semi-supervised graph convolutional network for predicting molecular properties.
It aims to maximize the mutual information between the graph-level representation and the
representations of substructures of different scales. It does this by producing graph-level
encodings and substructure encodings, and then using a discriminator to classify if they
are from the same molecule or not.
Supervised training is done by using the graph-level encodings to predict the target property. Semi-supervised training is done by adding a loss term that maximizes the mutual information between the graph-level encodings and the substructure encodings to the supervised loss.
These modes can be chosen by setting the training_mode parameter.
To conduct training in unsupervised mode, use InfoGraphModel.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME>, “InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Maximization.” arXiv, Jan. 17, 2020. http://arxiv.org/abs/1908.01000
Parameters
----------
num_features: int
Number of node features for each input
edge_features: int
Number of edge features for each input
embedding_dim: int
Dimension of the embedding
training_mode: str
The mode to use for training. Options are 'supervised', 'semisupervised'. For unsupervised training, use InfoGraphModel.
measure: str
The divergence measure to use for the unsupervised loss. Options are 'GAN', 'JSD',
'KL', 'RKL', 'X2', 'DV', 'H2', or 'W1'.
average_loss: bool
Whether to average the loss over the batch
Examples
--------
>>> from deepchem.models.torch_models import InfoGraphStarModel
>>> from deepchem.feat import MolGraphConvFeaturizer
>>> from deepchem.data import NumpyDataset
>>> import torch
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = MolGraphConvFeaturizer(use_edges=True)
>>> X = featurizer.featurize(smiles)
>>> y = torch.randint(0, 2, size=(2, 1)).float()
>>> w = torch.ones(size=(2, 1)).float()
>>> ds = NumpyDataset(X, y, w)
>>> num_feat = max([ds.X[i].num_node_features for i in range(len(ds))])
>>> edge_dim = max([ds.X[i].num_edge_features for i in range(len(ds))])
>>> model = InfoGraphStarModel(num_feat, edge_dim, 15, training_mode='semisupervised')
>>> loss = model.fit(ds, nb_epoch=1)
"""
def __init__(self,
num_features,
edge_features,
embedding_dim,
task: Literal['supervised', 'semisupervised'] = 'supervised',
mode: Literal['regression', 'classification'] = 'regression',
num_classes=2,
num_tasks=1,
measure='JSD',
average_loss=True,
num_gc_layers=5,
**kwargs):
assert task in ['supervised', 'semisupervised'], 'Invalid model task'
assert mode in ['regression', 'classification'], 'Invalid model mode'
self.edge_features = edge_features
self.local = True
self.prior = False
self.gamma = .1
self.num_features = num_features
self.task = task
self.mode = mode
self.num_classes = num_classes
if self.mode == 'regression':
self.output_dim = num_tasks
elif self.mode == 'classification':
self.num_tasks = num_tasks
self.output_dim = num_classes * num_tasks
self.class_loss = SoftmaxCrossEntropy()._create_pytorch_loss()
if self.task == 'supervised':
self.embedding_dim = embedding_dim
elif self.task == 'semisupervised':
self.embedding_dim = embedding_dim * num_gc_layers
self.num_gc_layers = num_gc_layers
self.localloss = LocalMutualInformationLoss()._create_pytorch_loss(
measure, average_loss)
self.globalloss = GlobalMutualInformationLoss()._create_pytorch_loss(
measure, average_loss)
self.components = self.build_components()
self.model = self.build_model()
super().__init__(self.model, self.components, **kwargs)
def build_components(self):
"""
Builds the components of the InfoGraphStar model. InfoGraphStar works by maximizing the mutual information between the graph-level representation and the representations of substructures of different scales.
It does this by producing graph-level encodings and substructure encodings, and then using a discriminator to classify if they are from the same molecule or not.
The encoder is a graph convolutional network that produces the graph-level encodings and substructure encodings.
In a supervised training mode, only 1 encoder is used and the encodings are not compared, while in a semi-supvervised training mode they are different in order to prevent negative transfer from the pretraining stage.
The local discriminator is a multilayer perceptron that classifies if the substructure encodings are from the same molecule or not while the global discriminator classifies if the graph-level encodings are from the same molecule or not.
Components list, type and description:
--------------------------------------
encoder: InfoGraphEncoder
unsup_encoder: InfoGraphEncoder for supervised or GINEncoder for unsupervised training
ff1: MultilayerPerceptron, feedforward network
ff2: MultilayerPerceptron, feedforward network
fc1: torch.nn.Linear, fully connected layer
fc2: torch.nn.Linear, fully connected layer
local_d: MultilayerPerceptron, local discriminator
global_d: MultilayerPerceptron, global discriminator
"""
if self.task == 'supervised':
return {
'encoder':
InfoGraphEncoder(self.num_features, self.edge_features,
self.embedding_dim),
'unsup_encoder':
InfoGraphEncoder(self.num_features, self.edge_features,
self.embedding_dim),
'ff1':
MultilayerPerceptron(2 * self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,)),
'ff2':
MultilayerPerceptron(2 * self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,)),
'fc1':
torch.nn.Linear(2 * self.embedding_dim, self.embedding_dim),
'fc2':
torch.nn.Linear(self.embedding_dim, self.output_dim),
'local_d':
MultilayerPerceptron(self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,),
skip_connection=True),
'global_d':
MultilayerPerceptron(2 * self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,),
skip_connection=True)
}
elif self.task == 'semisupervised':
return {
'encoder':
InfoGraphEncoder(self.num_features, self.edge_features,
self.embedding_dim),
'unsup_encoder':
GINEncoder(self.num_features, self.embedding_dim,
self.num_gc_layers),
'ff1':
MultilayerPerceptron(2 * self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,)),
'ff2':
MultilayerPerceptron(self.embedding_dim, self.embedding_dim,
(self.embedding_dim,)),
'fc1':
torch.nn.Linear(2 * self.embedding_dim, self.embedding_dim),
'fc2':
torch.nn.Linear(self.embedding_dim, self.output_dim),
'local_d':
MultilayerPerceptron(self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,),
skip_connection=True),
'global_d':
MultilayerPerceptron(self.embedding_dim,
self.embedding_dim,
(self.embedding_dim,),
skip_connection=True)
}
def build_model(self):
"""
Builds the InfoGraph model by unpacking the components dictionary and passing them to the InfoGraph nn.module.
"""
if self.mode == 'regression':
return InfoGraphStar(**self.components,)
elif self.mode == 'classification':
return InfoGraphStar(**self.components,
mode=self.mode,
num_tasks=self.num_tasks,
num_classes=self.num_classes)
def loss_func(self, inputs, labels, weights):
sup_loss = self.sup_loss(inputs, labels)
if self.task == 'semisupervised':
local_unsup_loss = self.local_unsup_loss(inputs)
global_unsup_loss = self.global_unsup_loss(inputs, labels, weights)
loss = sup_loss + local_unsup_loss + global_unsup_loss * self.learning_rate
# original implementation also includes an option if not using a separate encoder:
# loss = sup_loss + local_unsup_loss * self.learning_rate
return (loss * weights).mean()
else:
return (sup_loss * weights).mean()
def sup_loss(self, inputs, labels):
if self.mode == 'regression':
out = self.model(inputs)
sup_loss = F.mse_loss(out, labels)
elif self.mode == 'classification':
out = self.model(inputs)
out = F.softmax(out, dim=2)
sup_loss = self.class_loss(out, labels)
return sup_loss
def local_unsup_loss(self, inputs):
if self.task == 'semisupervised':
y, M = self.components['unsup_encoder'](inputs)
else:
y, M = self.components['encoder'](inputs)
g_enc = self.components['global_d'](y)
l_enc = self.components['local_d'](M)
if self.local:
loss = self.localloss(l_enc, g_enc, inputs.graph_index)
return loss
def global_unsup_loss(self, inputs, labels, weights):
y, M = self.components['encoder'](inputs)
y_, M_ = self.components['unsup_encoder'](inputs)
g_enc = self.components['ff1'](y)
g_enc1 = self.components['ff2'](y_)
loss = self.globalloss(g_enc, g_enc1)
return loss
def _prepare_batch(self, batch):
"""
Prepares the batch for the model by converting the GraphData numpy arrays to torch tensors and moving them to the device.
"""
inputs, labels, weights = batch
inputs = BatchGraphData(inputs[0]).numpy_to_torch(self.device)
_, labels, weights = super()._prepare_batch(([], labels, weights))
if (len(labels) != 0) and (len(weights) != 0):
labels = labels[0]
weights = weights[0]
return inputs, labels, weights
def default_generator(
self,
dataset: dc.data.Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if self.mode == 'classification' and y_b is not None:
y_b = to_one_hot(y_b.flatten(), self.num_classes).reshape(
-1, self.num_tasks, self.num_classes)
yield ([X_b], [y_b], [w_b])
<file_sep>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 23 16:02:07 2017
@author: zqwu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
import tempfile
from sklearn.svm import SVC
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
n_features = 1024
tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21()
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
def model_builder(model_dir):
sklearn_model = SVC(C=1.0, class_weight="balanced", probability=True)
return dc.models.SklearnModel(sklearn_model, model_dir)
model_dir = tempfile.mkdtemp()
model = dc.models.SingletaskToMultitask(tox21_tasks, model_builder, model_dir)
# Fit trained model
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>'''
This module contains different variations of the Physics Informer Neural Network model using the JaxModel API
'''
import numpy as np
import time
from typing import Any, Callable, Iterable, List, Optional, Tuple, Union
from collections.abc import Sequence as SequenceCollection
from deepchem.data import Dataset
from deepchem.models.losses import Loss
from deepchem.models.optimizers import Optimizer
from deepchem.utils.typing import LossFn, OneOrMany
from deepchem.trans.transformers import Transformer, undo_transforms
# JAX dependencies
import jax
import jax.numpy as jnp
import haiku as hk
import optax
from deepchem.models.jax_models.jax_model import JaxModel
from deepchem.models.jax_models.jax_model import create_default_gradient_fn, create_default_eval_fn
import logging
import warnings
logger = logging.getLogger(__name__)
def create_default_update_fn(optimizer: optax.GradientTransformation,
model_loss: Callable):
"""
This function calls the update function, to implement the backpropagation
"""
@jax.jit
def update(params, opt_state, batch, target, weights,
rng) -> Tuple[hk.Params, optax.OptState, jnp.ndarray]:
batch_loss, grads = jax.value_and_grad(model_loss)(params, target,
weights, rng, *batch)
updates, opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state, batch_loss
return update
class PINNModel(JaxModel):
"""
This is class is derived from the JaxModel class and methods are also very similar to JaxModel,
but it has the option of passing multiple arguments(Done using *args) suitable for PINNs model.
Ex - Approximating f(x, y, z, t) satisfying a Linear differential equation.
This model is recommended for linear partial differential equations but if you can accurately write
the gradient function in Jax depending on your use case, then it will work as well.
This class requires two functions apart from the usual function definition and weights
[1] **grad_fn** : Each PINNs have a different strategy for calculating its final losses. This
function tells the PINNModel how to go about computing the derivatives for backpropagation.
It should follow this format:
>>>
>> def gradient_fn(forward_fn, loss_outputs, initial_data):
>>
>> def model_loss(params, target, weights, rng, ...):
>>
>> # write code using the arguments.
>> # ... indicates the variable number of positional arguments.
>> return
>>
>> return model_loss
"..." can be replaced with various arguments like (x, y, z, y) but should match with eval_fn
[2] **eval_fn**: Function for defining how the model needs to compute during inference.
It should follow this format
>>>
>> def create_eval_fn(forward_fn, params):
>> def eval_model(..., rng=None):
>> # write code here using arguments
>>
>> return
>> return eval_model
"..." can be replaced with various arguments like (x, y, z, y) but should match with grad_fn
[3] boundary_data:
For a detailed example, check out - deepchem/models/jax_models/tests/test_pinn.py where we have
solved f'(x) = -sin(x)
References
----------
.. [1] Raissi et. al. "Physics-informed neural networks: A deep learning framework for solving
forward and inverse problems involving nonlinear partial differential equations" Journal of
Computational Physics https://doi.org/10.1016/j.jcp.2018.10.045
.. [2] Raissi et. al. "Physics Informed Deep Learning (Part I): Data-driven
Solutions of Nonlinear Partial Differential Equations" arXiv preprint arXiv:1711.10561
Notes
-----
This class requires Jax, Haiku and Optax to be installed.
"""
def __init__(self,
forward_fn: hk.State,
params: hk.Params,
initial_data: dict = {},
output_types: Optional[List[str]] = None,
batch_size: int = 100,
learning_rate: float = 0.001,
optimizer: Optional[Union[optax.GradientTransformation,
Optimizer]] = None,
grad_fn: Callable = create_default_gradient_fn,
update_fn: Callable = create_default_update_fn,
eval_fn: Callable = create_default_eval_fn,
rng=jax.random.PRNGKey(1),
log_frequency: int = 100,
**kwargs):
"""
Parameters
----------
forward_fn: hk.State or Function
Any Jax based model that has a `apply` method for computing the network. Currently
only haiku models are supported.
params: hk.Params
The parameter of the Jax based networks
initial_data: dict
This acts as a session variable which will be passed as a dictionary in grad_fn
output_types: list of strings, optional (default None)
the type of each output from the model, as described above
batch_size: int, optional (default 100)
default batch size for training and evaluating
learning_rate: float or LearningRateSchedule, optional (default 0.001)
the learning rate to use for fitting. If optimizer is specified, this is
ignored.
optimizer: optax object
For the time being, it is optax object
grad_fn: Callable (default create_default_gradient_fn)
It defines how the loss function and gradients need to be calculated for the PINNs model
update_fn: Callable (default create_default_update_fn)
It defines how the weights need to be updated using backpropogation. We have used optax library
for optimisation operations. Its reccomended to leave this default.
eval_fn: Callable (default create_default_eval_fn)
Function for defining on how the model needs to compute during inference.
rng: jax.random.PRNGKey, optional (default 1)
A default global PRNG key to use for drawing random numbers.
log_frequency: int, optional (default 100)
The frequency at which to log data. Data is logged using
`logging` by default.
"""
warnings.warn(
'PinnModel is still in active development and we could change the design of the API in the future.'
)
self.boundary_data = initial_data
super(PINNModel,
self).__init__(forward_fn, params, None, output_types, batch_size,
learning_rate, optimizer, grad_fn, update_fn,
eval_fn, rng, log_frequency, **kwargs)
def fit_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
loss: Optional[Union[Loss, LossFn]] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
if not isinstance(callbacks, SequenceCollection):
callbacks = [callbacks]
self._ensure_built()
avg_loss = 0.0
last_avg_loss = 0.0
averaged_batches = 0
if loss is None:
loss = self._loss_fn
model_loss_fn = self._create_gradient_fn(self.forward_fn,
self._loss_outputs,
self.boundary_data)
grad_update = self._create_update_fn(self.optimizer, model_loss_fn)
params, opt_state = self._get_trainable_params()
rng = self.rng
time1 = time.time()
# Main training loop
for batch in generator:
inputs, labels, weights = self._prepare_batch(batch)
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
if isinstance(labels, list) and len(labels) == 1:
labels = labels[0]
if isinstance(weights, list) and len(weights) == 1:
weights = weights[0]
params, opt_state, batch_loss = grad_update(params,
opt_state,
inputs,
labels,
weights,
rng=rng)
rng, _ = jax.random.split(rng)
avg_loss += jax.device_get(batch_loss)
self._global_step += 1
current_step = self._global_step
averaged_batches += 1
should_log = (current_step % self.log_frequency == 0)
if should_log:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
# Capture the last avg_loss in case of return since we're resetting to 0 now
last_avg_loss = avg_loss
avg_loss = 0.0
averaged_batches = 0
for c in callbacks:
c(self, current_step)
# Report final results.
if averaged_batches > 0:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
last_avg_loss = avg_loss
time2 = time.time()
logger.info("TIMING: model fitting took %0.3f s" % (time2 - time1))
self._set_trainable_params(params, opt_state)
return last_avg_loss
def _prepare_batch(self, batch):
inputs, labels, weights = batch
inputs = [
x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs
]
inputs = [np.split(x, x.shape[1], 1) for x in inputs]
if labels is not None:
labels = [
x.astype(np.float32) if x.dtype == np.float64 else x
for x in labels
]
else:
labels = []
if weights is not None:
weights = [
x.astype(np.float32) if x.dtype == np.float64 else x
for x in weights
]
else:
weights = []
return (inputs, labels, weights)
def default_generator(
self,
dataset: Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
"""Create a generator that iterates batches for a dataset.
Subclasses may override this method to customize how model inputs are
generated from the data.
Parameters
----------
dataset: Dataset
the data to iterate
epochs: int
the number of times to iterate over the full dataset
mode: str
allowed values are 'fit' (called during training), 'predict' (called
during prediction), and 'uncertainty' (called during uncertainty
prediction)
deterministic: bool
whether to iterate over the dataset in order, or randomly shuffle the
data for each epoch
pad_batches: bool
whether to pad each batch up to this model's preferred batch size
Returns
-------
a generator that iterates batches, each represented as a tuple of lists:
([inputs], [outputs], [weights])
"""
for epoch in range(epochs):
for (X_b, y_b, w_b,
_) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
yield ([X_b], [y_b], [w_b])
def _predict(self, generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[Transformer], uncertainty: bool,
other_output_types: Optional[OneOrMany[str]]):
"""
Predict outputs for data provided by a generator.
This is the private implementation of prediction. Do not
call it directly. Instead call one of the public prediction
methods.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
transformers: List[dc.trans.Transformers]
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
uncertainty: bool
specifies whether this is being called as part of estimating uncertainty.
If True, it sets the training flag so that dropout will be enabled, and
returns the values of the uncertainty outputs.
other_output_types: list, optional
Provides a list of other output_types (strings) to predict from model.
Returns
-------
A NumpyArray if the model produces a single output, or a list of arrays otherwise.
"""
results: Optional[List[List[np.ndarray]]] = None
variances: Optional[List[List[np.ndarray]]] = None
if uncertainty and (other_output_types is not None):
raise ValueError(
'This model cannot compute uncertainties and other output types simultaneously. Please invoke one at a time.'
)
if uncertainty:
if self._variance_outputs is None or len(
self._variance_outputs) == 0:
raise ValueError('This model cannot compute uncertainties')
if len(self._variance_outputs) != len(self._prediction_outputs):
raise ValueError(
'The number of variances must exactly match the number of outputs'
)
if other_output_types:
if self._other_outputs is None or len(self._other_outputs) == 0:
raise ValueError(
'This model cannot compute other outputs since no other output_types were specified.'
)
self._ensure_built()
eval_fn = self._create_eval_fn(self.forward_fn, self.params)
rng = self.rng
for batch in generator:
inputs, _, _ = self._prepare_batch(batch)
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
output_values = eval_fn(*inputs, rng)
if isinstance(output_values, jnp.ndarray):
output_values = [output_values]
output_values = [jax.device_get(t) for t in output_values]
# Apply tranformers and record results.
if uncertainty:
var = [output_values[i] for i in self._variance_outputs]
if variances is None:
variances = [var]
else:
for i, t in enumerate(var):
variances[i].append(t)
access_values = []
if other_output_types:
access_values += self._other_outputs
elif self._prediction_outputs is not None:
access_values += self._prediction_outputs
if len(access_values) > 0:
output_values = [output_values[i] for i in access_values]
if len(transformers) > 0:
if len(output_values) > 1:
raise ValueError(
"predict() does not support Transformers for models with multiple outputs."
)
elif len(output_values) == 1:
output_values = [
undo_transforms(output_values[0], transformers)
]
if results is None:
results = [[] for i in range(len(output_values))]
for i, t in enumerate(output_values):
results[i].append(t)
# Concatenate arrays to create the final results.
final_results = []
final_variances = []
if results is not None:
for r in results:
final_results.append(np.concatenate(r, axis=0))
if uncertainty and variances is not None:
for v in variances:
final_variances.append(np.concatenate(v, axis=0))
return zip(final_results, final_variances)
if len(final_results) == 1:
return final_results[0]
else:
return final_results
<file_sep>import numpy as np
import deepchem as dc
mols = [
'C1=CC2=C(C=C1)C1=CC=CC=C21', 'O=C1C=CC(=O)C2=C1OC=CO2', 'C1=C[N]C=C1',
'C1=CC=CC=C[C+]1', 'C1=[C]NC=C1', 'N[C@@H](C)C(=O)O', 'N[C@H](C)C(=O)O',
'CC', 'O=C=O', 'C#N', 'CCN(CC)CC', 'CC(=O)O', 'C1CCCCC1', 'c1ccccc1'
]
print("Original set of molecules")
print(mols)
splitter = dc.splits.ScaffoldSplitter()
# TODO: This should be swapped for simpler splitter API once that's merged in.
dataset = dc.data.NumpyDataset(X=np.array(mols), ids=mols)
train, valid, test = splitter.train_valid_test_split(dataset)
# The return values are dc.data.Dataset objects so we need to extract
# the ids
print("Training set")
print(train)
print("Valid set")
print(valid)
print("Test set")
print(test)
<file_sep>"""
PDBBind dataset loader.
"""
import os
import numpy as np
import pandas as pd
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
DATASETS_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/"
PDBBIND_URL = DATASETS_URL + "pdbbindv2019/"
PDBBIND_TASKS = ['-logKd/Ki']
class _PDBBindLoader(_MolnetLoader):
def __init__(self,
*args,
pocket: bool = True,
set_name: str = 'core',
**kwargs):
super(_PDBBindLoader, self).__init__(*args, **kwargs)
self.pocket = pocket
self.set_name = set_name
if set_name == 'general':
self.name = 'pdbbind_v2019_other_PL' # 'general' set folder name
elif set_name == 'refined':
self.name = 'pdbbind_v2019_refined'
elif set_name == 'core':
self.name = 'pdbbind_v2013_core_set'
def create_dataset(self) -> Dataset:
if self.set_name not in ['refined', 'general', 'core']:
raise ValueError(
"Only 'refined', 'general', and 'core' are supported for set_name."
)
filename = self.name + '.tar.gz'
data_folder = os.path.join(self.data_dir, self.name)
dataset_file = os.path.join(self.data_dir, filename)
if not os.path.exists(data_folder):
if self.set_name in ['refined', 'general']:
dc.utils.data_utils.download_url(url=PDBBIND_URL + filename,
dest_dir=self.data_dir)
else:
dc.utils.data_utils.download_url(url=DATASETS_URL + filename,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(dataset_file,
dest_dir=self.data_dir)
# get pdb and sdf filenames, labels and pdbids
protein_files, ligand_files, labels, pdbs = self._process_pdbs()
# load and featurize each complex
features = self.featurizer.featurize(
list(zip(ligand_files, protein_files)))
dataset = dc.data.DiskDataset.from_numpy(features, y=labels, ids=pdbs)
return dataset
def _process_pdbs(
self) -> Tuple[List[str], List[str], np.ndarray, List[str]]:
if self.set_name == 'general':
data_folder = os.path.join(self.data_dir, 'v2019-other-PL')
index_labels_file = os.path.join(
data_folder, 'index/INDEX_general_PL_data.2019')
elif self.set_name == 'refined':
data_folder = os.path.join(self.data_dir, 'refined-set')
index_labels_file = os.path.join(data_folder,
'index/INDEX_refined_data.2019')
elif self.set_name == 'core':
data_folder = os.path.join(self.data_dir, 'v2013-core')
index_labels_file = os.path.join(data_folder,
'pdbbind_v2013_core.csv')
if self.set_name in ['general', 'refined']:
# Extract locations of data
with open(index_labels_file, "r") as g:
pdbs = [line[:4] for line in g.readlines() if line[0] != "#"]
# Extract labels
with open(index_labels_file, "r") as g:
labels = np.array([
# Lines have format
# PDB code, resolution, release year, -logKd/Ki, Kd/Ki, reference, ligand name
# The base-10 logarithm, -log kd/pk
float(line.split()[3])
for line in g.readlines()
if line[0] != "#"
])
else:
df = pd.read_csv(index_labels_file)
pdbs = df.pdb_id.tolist()
labels = np.array(df.label.tolist())
if self.pocket: # only load binding pocket
protein_files = [
os.path.join(data_folder, pdb, "%s_pocket.pdb" % pdb)
for pdb in pdbs
]
else:
protein_files = [
os.path.join(data_folder, pdb, "%s_protein.pdb" % pdb)
for pdb in pdbs
]
ligand_files = [
os.path.join(data_folder, pdb, "%s_ligand.sdf" % pdb)
for pdb in pdbs
]
return (protein_files, ligand_files, labels, pdbs)
def load_pdbbind(
featurizer: dc.feat.ComplexFeaturizer,
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
pocket: bool = True,
set_name: str = 'core',
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load PDBBind dataset.
The PDBBind dataset includes experimental binding affinity data
and structures for 4852 protein-ligand complexes from the "refined set"
and 12800 complexes from the "general set" in PDBBind v2019 and 193
complexes from the "core set" in PDBBind v2013.
The refined set removes data with obvious problems
in 3D structure, binding data, or other aspects and should therefore
be a better starting point for docking/scoring studies. Details on
the criteria used to construct the refined set can be found in [4]_.
The general set does not include the refined set. The core set is
a subset of the refined set that is not updated annually.
Random splitting is recommended for this dataset.
The raw dataset contains the columns below:
- "ligand" - SDF of the molecular structure
- "protein" - PDB of the protein structure
- "CT_TOX" - Clinical trial results
Parameters
----------
featurizer: ComplexFeaturizer or str
the complex featurizer to use for processing the data.
Alternatively you can pass one of the names from
dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
pocket: bool (default True)
If true, use only the binding pocket for featurization.
set_name: str (default 'core')
Name of dataset to download. 'refined', 'general', and 'core' are supported.
Returns
-------
tasks, datasets, transformers: tuple
tasks: list
Column names corresponding to machine learning target variables.
datasets: tuple
train, validation, test splits of data as
``deepchem.data.datasets.Dataset`` instances.
transformers: list
``deepchem.trans.transformers.Transformer`` instances applied
to dataset.
References
----------
.. [1] <NAME>. et al. Acc. Chem. Res. 2017, 50, 302-309. (PDBbind v.2016)
.. [2] <NAME>.H. et al. Bioinformatics, 2015, 31, 405-412. (PDBbind v.2014)
.. [3] <NAME>. et al. J. Chem. Inf. Model., 2014, 54, 1700-1716.(PDBbind v.2013)
.. [4] <NAME>. et al. J. Chem. Inf. Model., 2009, 49, 1079-1093. (PDBbind v.2009)
.. [5] <NAME>. et al. J. Med. Chem., 2005, 48, 4111-4119. (Original release)
.. [6] Wang, R.X. et al. J. Med. Chem., 2004, 47, 2977-2980. (Original release)
"""
loader = _PDBBindLoader(featurizer,
splitter,
transformers,
PDBBIND_TASKS,
data_dir,
save_dir,
pocket=pocket,
set_name=set_name,
**kwargs)
return loader.load_dataset(loader.name, reload)
<file_sep>import os
import numpy as np
import deepchem as dc
from rdkit import Chem
seed = 123
np.random.seed(seed)
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, "refined_atomconv")
train_dir = os.path.join(base_dir, "train")
test_dir = os.path.join(base_dir, "test")
pdbbind_dir = os.path.join(base_dir, "v2015")
print("Loading ids from %s" % data_dir)
d = dc.data.DiskDataset(data_dir)
ids = d.ids
def compute_ligand_mol(pdb_subdir, pdb_code):
ligand_file = os.path.join(pdb_subdir, "%s_ligand.pdb" % pdb_code)
mol = Chem.MolFromPDBFile(str(ligand_file))
return mol
def create_scaffold_indices(pdbbind_dir, base_dir):
frac_train = 0.8
frac_valid = 0.0
scaffolds = {}
y = np.array([0 for val in ids])
w = np.ones_like(y)
for ind, pdb_code in enumerate(ids):
print("Processing %s" % str(pdb_code))
pdb_subdir = os.path.join(pdbbind_dir, pdb_code)
mol = compute_ligand_mol(pdb_subdir, pdb_code)
if mol is not None:
engine = dc.utils.ScaffoldGenerator(include_chirality=False)
scaffold = engine.get_scaffold(mol)
if scaffold not in scaffolds:
scaffolds[scaffold] = [ind]
else:
scaffolds[scaffold].append(ind)
else:
print(pdb_code)
# Sort from largest to smallest scaffold sets
scaffolds = {key: sorted(value) for key, value in scaffolds.items()}
scaffold_sets = [
scaffold_set
for (scaffold, scaffold_set) in sorted(
scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
train_cutoff = frac_train * len(y)
valid_cutoff = (frac_train + frac_valid) * len(y)
train_inds, valid_inds, test_inds = [], [], []
for scaffold_set in scaffold_sets:
if len(train_inds) + len(scaffold_set) > train_cutoff:
if len(train_inds) + len(valid_inds) + len(scaffold_set) > valid_cutoff:
test_inds += scaffold_set
else:
valid_inds += scaffold_set
else:
train_inds += scaffold_set
return train_inds, valid_inds, test_inds
print("Generating scaffold indices")
train_inds, _, test_inds = create_scaffold_indices(pdbbind_dir, base_dir)
print("Using indices from scaffold splitter on pdbbind dataset")
splitter = dc.splits.IndiceSplitter(test_indices=test_inds)
train_dataset, test_dataset = splitter.train_test_split(d, train_dir, test_dir)
<file_sep>import deepchem as dc
import numpy as np
import os
def test_numpy_dataset_get_shape():
"""Test that get_shape works for numpy datasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_single_shard():
"""Test that get_shape works for disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_multishard():
"""Test that get_shape works for multisharded disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Should now have 10 shards
dataset.reshard(shard_size=10)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_legacy_shape_single_shard():
"""Test that get_shape works for legacy disk dataset."""
# This is the shape of legacy_data
num_datapoints = 100
num_features = 10
num_tasks = 10
current_dir = os.path.dirname(os.path.abspath(__file__))
# legacy_dataset is a dataset in the legacy format kept around for testing
# purposes.
data_dir = os.path.join(current_dir, "legacy_dataset")
dataset = dc.data.DiskDataset(data_dir)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_disk_dataset_get_legacy_shape_multishard():
"""Test that get_shape works for multisharded legacy disk dataset."""
# This is the shape of legacy_data_reshard
num_datapoints = 100
num_features = 10
num_tasks = 10
# legacy_dataset_reshard is a sharded dataset in the legacy format kept
# around for testing
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "legacy_dataset_reshard")
dataset = dc.data.DiskDataset(data_dir)
# Should now have 10 shards
assert dataset.get_number_shards() == 10
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_get_shard_size():
"""
Test that using ids for getting the shard size does not break the method.
The issue arises when attempting to load a dataset that does not have a labels
column. The create_dataset method of the DataLoader class sets the y to None
in this case, which causes the existing implementation of the get_shard_size()
method to fail, as it relies on the dataset having a not None y column. This
consequently breaks all methods depending on this, like the splitters for
example.
Note
----
DiskDatasets without labels cannot be resharded!
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, "reaction_smiles.csv")
featurizer = dc.feat.DummyFeaturizer()
loader = dc.data.CSVLoader(tasks=[],
feature_field="reactions",
featurizer=featurizer)
dataset = loader.create_dataset(file_path)
assert dataset.get_shard_size() == 4
<file_sep>import deepchem as dc
import numpy as np
import sklearn
from sklearn.ensemble import RandomForestClassifier
N = 100
n_feat = 5
n_classes = 3
X = np.random.rand(N, n_feat)
y = np.random.randint(n_classes, size=(N,))
dataset = dc.data.NumpyDataset(X, y)
sklearn_model = RandomForestClassifier(class_weight="balanced", n_estimators=50)
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
print("About to fit model")
model.fit(dataset)
model.save()
print("About to evaluate model")
train_scores = model.evaluate(dataset, sklearn.metrics.roc_auc_score, [],n_classes=n_classes)
print("Train scores")
print(train_scores)
<file_sep>import unittest
import pytest
import deepchem as dc
import numpy as np
from deepchem.feat.mol_graphs import ConvMol
try:
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense
class MLP(dc.models.KerasModel):
def __init__(self,
n_tasks=1,
feature_dim=100,
hidden_layer_size=64,
**kwargs):
self.feature_dim = feature_dim
self.hidden_layer_size = hidden_layer_size
self.n_tasks = n_tasks
model, loss, output_types = self._build_graph()
super(MLP, self).__init__(model=model,
loss=loss,
output_types=output_types,
**kwargs)
def _build_graph(self):
inputs = Input(dtype=tf.float32,
shape=(self.feature_dim,),
name="Input")
out1 = Dense(units=self.hidden_layer_size,
activation='relu')(inputs)
final = Dense(units=self.n_tasks, activation='sigmoid')(out1)
outputs = [final]
output_types = ['prediction']
loss = dc.models.losses.BinaryCrossEntropy()
model = tf.keras.Model(inputs=[inputs], outputs=outputs)
return model, loss, output_types
has_tensorflow = True
except:
has_tensorflow = False
class TestPretrained(unittest.TestCase):
@pytest.mark.tensorflow
def setUp(self):
self.feature_dim = 2
self.hidden_layer_size = 10
data_points = 10
X = np.random.randn(data_points, self.feature_dim)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
self.dataset = dc.data.NumpyDataset(X, y)
@pytest.mark.tensorflow
def test_load_from_pretrained(self):
"""Tests loading pretrained model."""
source_model = MLP(hidden_layer_size=self.hidden_layer_size,
feature_dim=self.feature_dim,
batch_size=10)
source_model.fit(self.dataset, nb_epoch=1000, checkpoint_interval=0)
dest_model = MLP(feature_dim=self.feature_dim,
hidden_layer_size=self.hidden_layer_size,
n_tasks=10)
assignment_map = dict()
value_map = dict()
dest_vars = dest_model.model.trainable_variables[:-2]
for idx, dest_var in enumerate(dest_vars):
source_var = source_model.model.trainable_variables[idx]
assignment_map[source_var.experimental_ref()] = dest_var
value_map[source_var.experimental_ref()] = source_var.numpy()
dest_model.load_from_pretrained(source_model=source_model,
assignment_map=assignment_map,
value_map=value_map)
for source_var, dest_var in assignment_map.items():
source_val = source_var.deref().numpy()
dest_val = dest_var.numpy()
np.testing.assert_array_almost_equal(source_val, dest_val)
@pytest.mark.tensorflow
def test_load_pretrained_subclassed_model(self):
from rdkit import Chem
bi_tasks = ['a', 'b']
y = np.ones((3, 2))
smiles = ['C', 'CC', 'CCC']
mols = [Chem.MolFromSmiles(smile) for smile in smiles]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(mols)
dataset = dc.data.NumpyDataset(X, y, ids=smiles)
source_model = dc.models.GraphConvModel(n_tasks=len(bi_tasks),
graph_conv_layers=[128, 128],
dense_layer_size=512,
dropout=0,
mode='regression',
learning_rate=0.001,
batch_size=8,
model_dir="model")
source_model.fit(dataset)
dest_model = dc.models.GraphConvModel(n_tasks=len(bi_tasks),
graph_conv_layers=[128, 128],
dense_layer_size=512,
dropout=0,
mode='regression',
learning_rate=0.001,
batch_size=8)
X_b, y_b, w_b, ids_b = next(
dataset.iterbatches(batch_size=8,
deterministic=True,
pad_batches=True))
multiConvMol = ConvMol.agglomerate_mols(X_b)
n_samples = np.array(X_b.shape[0])
inputs = [
multiConvMol.get_atom_features(), multiConvMol.deg_slice,
np.array(multiConvMol.membership), n_samples
]
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
inputs.append(multiConvMol.get_deg_adjacency_lists()[i])
dest_model.load_from_pretrained(source_model=source_model,
assignment_map=None,
value_map=None,
include_top=False,
inputs=inputs)
source_vars = source_model.model.trainable_variables[:-2]
dest_vars = dest_model.model.trainable_variables[:-2]
assert len(source_vars) == len(dest_vars)
for source_var, dest_var in zip(*(source_vars, dest_vars)):
source_val = source_var.numpy()
dest_val = dest_var.numpy()
np.testing.assert_array_almost_equal(source_val, dest_val)
@pytest.mark.tensorflow
def test_restore_equivalency(self):
"""Test for restore based pretrained model loading."""
source_model = MLP(feature_dim=self.feature_dim,
hidden_layer_size=self.hidden_layer_size)
source_model.fit(self.dataset, nb_epoch=1000)
dest_model = MLP(feature_dim=self.feature_dim,
hidden_layer_size=self.hidden_layer_size)
dest_model.load_from_pretrained(source_model=source_model,
assignment_map=None,
value_map=None,
model_dir=None,
include_top=True)
predictions = np.squeeze(dest_model.predict_on_batch(self.dataset.X))
np.testing.assert_array_almost_equal(self.dataset.y,
np.round(predictions))
<file_sep># Release
This note explains how to release deepchem packages.
## How to release
1. Create and merge a release PR
- Modify the version in `deepchem/__init__.py` (Remove `.dev`, e.g. `2.4.0.dev` -> `2.4.0`)
- Update the documents for installing a new package in `README.md` and `docs`
- Update the dockerfile at `deepchem/docker/tag/Dockerfile`
2. Push a new tag to the merge commit -> release new PyPI package and docker image
3. Create and merge a release PR in the [feedstock repository](https://github.com/conda-forge/deepchem-feedstock) -> release new Conda Forge package
4. Publish the documents for a new tag in [ReadTheDocs](https://readthedocs.org/projects/deepchem/versions/).
5. Create and merge a PR for bumping the version
- Modify the version in `deepchem/__init__.py` again (Set the next dev version, e.g. `2.4.0` -> `2.5.0.dev`)
## PyPI
### Nightly build version
We publish nightly build packages only when merging PRs to the master.
The publish process is automated by GitHub Actions and it is in `pypi-build` section of `.github/workflows/main.yml`.
### Major version
We publish a major version package only when pushing a new tag.
The publish process is automated by GitHub Actions and it is in `pypi` section of `.github/workflows/release.yml`.
## Conda Forge
We have [the feedstock repository](https://github.com/conda-forge/deepchem-feedstock) for managing the build recipe for conda-forge.
After pushing a new tag, we create a PR for publishing a new package.
Basically, we need to modify the version of deepchem and dependencies like TensorFlow in `recipe/meta.yml`.
After merging the PR, we could publish a new package.
## Docker
### Nightly build version
The latest tag (deepchemio/deepchem:latest) is a nightly build and the image is built by `docker/nightly/Dockerfile`.
We publish nightly build images only when merging PRs to the master.
The publish process is automated by GitHub Actions and it is in `docker-build` section of `.github/workflows/main.yml`.
### Major version
We publish a major version image only when pushing a new tag.
The publish process is automated by GitHub Actions and it is in `docker` section of `.github/workflows/release.yml`.
## Docs
We should manually modify documents for installing a new package before pushing a new tag.
Basically, we modify `README.md` and `docs/get_started/installation.rst`. (include `docs/index.rst` in some cases)
If the release fixes or changes a known issue that was listed in `docs/src/issues.rst`, please update that page also.
After pushing a new tag, we go to [the project page](https://readthedocs.org/projects/deepchem/versions) in ReadTheDocs and publish the documents for a new tag.
## Website
We should manually modify the DeepChem website's installation instructions after each new stable release.
This can be done by modifying the text strings in the jQuery code at the bottom of github.com/deepchem.github.io/index.html. When the changes are pushed to github.com/deepchem/deepchem.github.io, the website will automatically update.
<file_sep>"""
qm7 dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
QM7_MAT_UTL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/qm7.mat"
QM7_CSV_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/qm7.csv"
QM7B_MAT_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/qm7b.mat"
GDB7_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb7.tar.gz"
QM7_TASKS = ["u0_atom"]
class _QM7Loader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "gdb7.sdf")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=GDB7_URL,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(
os.path.join(self.data_dir, "gdb7.tar.gz"), self.data_dir)
loader = dc.data.SDFLoader(tasks=self.tasks,
featurizer=self.featurizer,
sanitize=True)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_qm7(
featurizer: Union[dc.feat.Featurizer, str] = dc.feat.CoulombMatrix(23),
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load QM7 dataset
QM7 is a subset of GDB-13 (a database of nearly 1 billion
stable and synthetically accessible organic molecules)
containing up to 7 heavy atoms C, N, O, and S. The 3D
Cartesian coordinates of the most stable conformations and
their atomization energies were determined using ab-initio
density functional theory (PBE0/tier2 basis set). This dataset
also provided Coulomb matrices as calculated in [Rupp et al.
PRL, 2012]:
Stratified splitting is recommended for this dataset.
The data file (.mat format, we recommend using `scipy.io.loadmat`
for python users to load this original data) contains five arrays:
- "X" - (7165 x 23 x 23), Coulomb matrices
- "T" - (7165), atomization energies (unit: kcal/mol)
- "P" - (5 x 1433), cross-validation splits as used in [Montavon et al.
NIPS, 2012]
- "Z" - (7165 x 23), atomic charges
- "R" - (7165 x 23 x 3), cartesian coordinate (unit: Bohr) of each atom in
the molecules
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
Note
----
DeepChem 2.4.0 has turned on sanitization for this dataset by
default. For the QM7 dataset, this means that calling this
function will return 6838 compounds instead of 7160 in the source
dataset file. This appears to be due to valence specification
mismatches in the dataset that weren't caught in earlier more lax
versions of RDKit. Note that this may subtly affect benchmarking
results on this
dataset.
References
----------
.. [1] <NAME>, et al. "Fast and accurate modeling of molecular
atomization energies with machine learning." Physical review letters
108.5 (2012): 058301.
.. [2] <NAME>, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in Neural
Information Proccessing Systems. 2012.
"""
loader = _QM7Loader(featurizer, splitter, transformers, QM7_TASKS, data_dir,
save_dir, **kwargs)
return loader.load_dataset('qm7', reload)
<file_sep># Pull Request Template for contributing a new dataset to MoleculeNet
## Dataset information
- Dataset : <!-- Please fill in the short name for identifying your dataset -->
- Data Type :
- [ ] Molecules (SMILES, 3D coordinate)
- [ ] Inorganic crystals (Composition, 3D coordinate)
- [ ] Other
- Task Type :
- [ ] Regression
- [ ] Classification
- [ ] Other
- The number of Data : <!-- Please fill in the number of your data -->
- The number of Tasks : <!-- Please fill in the number of your tasks -->
- Recommended split :
- [ ] Random
- [ ] Stratified
- [ ] Scaffold (for molecules)
- Recommended metrics :
- [ ] MAE
- [ ] RMSE
- [ ] R^2
- [ ] PRC-AUC
- [ ] PRC-AUC
<!-- For details on recommended split types and metrics,
refer to the MolNet paper:https://arxiv.org/abs/1703.00564 -->
## Reference
<!-- Please fill in the MLA style reference. -->
<file_sep>echo "Pulling featurized and split ACNN datasets from deepchem"
wget http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/acnn_core.tar.gz
echo "Extracting ACNN datasets"
tar -zxvf acnn_core.tar.gz
<file_sep>Hyperparameter Tuning
=====================
One of the most important aspects of machine learning is
hyperparameter tuning. Many machine learning models have a number of
hyperparameters that control aspects of the model. These
hyperparameters typically cannot be learned directly by the same
learning algorithm used for the rest of learning and have to be set in
an alternate fashion. The :code:`dc.hyper` module contains utilities
for hyperparameter tuning.
DeepChem's hyperparameter optimzation algorithms are simple and run in
single-threaded fashion. They are not intended to be production grade
hyperparameter utilities, but rather useful first tools as you start
exploring your parameter space. As the needs of your application grow,
we recommend swapping to a more heavy duty hyperparameter
optimization library.
Hyperparameter Optimization API
-------------------------------
.. autoclass:: deepchem.hyper.HyperparamOpt
:members:
Grid Hyperparameter Optimization
--------------------------------
This is the simplest form of hyperparameter optimization that simply
involves iterating over a fixed grid of possible values for
hyperaparameters.
.. autoclass:: deepchem.hyper.GridHyperparamOpt
:members:
Gaussian Process Hyperparameter Optimization
--------------------------------------------
.. autoclass:: deepchem.hyper.GaussianProcessHyperparamOpt
:members:
<file_sep>"""
Test that genomic metrics work.
"""
import unittest
import numpy as np
import deepchem as dc
import pytest
try:
import tensorflow as tf
has_tensorflow = True
except:
has_tensorflow = False
from deepchem.metrics.genomic_metrics import get_motif_scores
from deepchem.metrics.genomic_metrics import get_pssm_scores
from deepchem.metrics.genomic_metrics import in_silico_mutagenesis
LETTERS = "ACGT"
class TestGenomicMetrics(unittest.TestCase):
"""
Tests that genomic metrics work as expected.
"""
def test_get_motif_scores(self):
"""Check that motif_scores have correct shape."""
# Encode motif
motif_name = "TAL1_known4"
sequences = np.array(["ACGTA", "GATAG", "CGCGC"])
sequences = dc.utils.genomics_utils.seq_one_hot_encode(sequences,
letters=LETTERS)
# sequences now has shape (3, 4, 5, 1)
self.assertEqual(sequences.shape, (3, 4, 5, 1))
motif_scores = get_motif_scores(sequences, [motif_name])
self.assertEqual(motif_scores.shape, (3, 1, 5))
def test_get_pssm_scores(self):
"""Test get_pssm_scores returns correct shape."""
sequences = np.array(["ACGTA", "GATAG", "CGCGC"])
sequences = dc.utils.genomics_utils.seq_one_hot_encode(sequences,
letters=LETTERS)
# sequences now has shape (3, 4, 5, 1)
self.assertEqual(sequences.shape, (3, 4, 5, 1))
pssm = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0,
1]])
pssm_scores = get_pssm_scores(sequences, pssm)
self.assertEqual(pssm_scores.shape, (3, 5))
def create_model_for_mutagenesis(self):
keras_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(1, 15, activation='relu', padding='same'),
tf.keras.layers.Conv2D(1, 15, activation='relu', padding='same'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1, activation='relu')
])
return dc.models.KerasModel(keras_model,
dc.models.losses.BinaryCrossEntropy())
@pytest.mark.tensorflow
def test_in_silico_mutagenesis_shape(self):
"""Test in-silico mutagenesis returns correct shape."""
# Construct and train SequenceDNN model
sequences = np.array(["ACGTA", "GATAG", "CGCGC"])
sequences = dc.utils.genomics_utils.seq_one_hot_encode(sequences,
letters=LETTERS)
labels = np.array([1, 0, 0])
labels = np.reshape(labels, (3, 1))
self.assertEqual(sequences.shape, (3, 4, 5, 1))
dataset = dc.data.NumpyDataset(sequences, labels)
model = self.create_model_for_mutagenesis()
model.fit(dataset, nb_epoch=1)
# Call in-silico mutagenesis
mutagenesis_scores = in_silico_mutagenesis(model, sequences)
self.assertEqual(mutagenesis_scores.shape, (1, 3, 4, 5, 1))
@pytest.mark.tensorflow
def test_in_silico_mutagenesis_nonzero(self):
"""Test in-silico mutagenesis returns nonzero output."""
# Construct and train SequenceDNN model
sequences = np.array(["ACGTA", "GATAG", "CGCGC"])
sequences = dc.utils.genomics_utils.seq_one_hot_encode(sequences,
letters=LETTERS)
labels = np.array([1, 0, 0])
labels = np.reshape(labels, (3, 1))
self.assertEqual(sequences.shape, (3, 4, 5, 1))
dataset = dc.data.NumpyDataset(sequences, labels)
model = self.create_model_for_mutagenesis()
model.fit(dataset, nb_epoch=1)
# Call in-silico mutagenesis
mutagenesis_scores = in_silico_mutagenesis(model, sequences)
self.assertEqual(mutagenesis_scores.shape, (1, 3, 4, 5, 1))
# Check nonzero elements exist
assert np.count_nonzero(mutagenesis_scores) > 0
<file_sep>"""
Miscellaneous utility functions.
"""
# flake8: noqa
import logging
logger = logging.getLogger(__name__)
from deepchem.utils.batch_utils import batch_coulomb_matrix_features
from deepchem.utils.conformers import ConformerGenerator
from deepchem.utils.evaluate import relative_difference
from deepchem.utils.evaluate import Evaluator
from deepchem.utils.evaluate import GeneratorEvaluator
from deepchem.utils.coordinate_box_utils import CoordinateBox
from deepchem.utils.coordinate_box_utils import intersect_interval
from deepchem.utils.coordinate_box_utils import intersection
from deepchem.utils.coordinate_box_utils import union
from deepchem.utils.coordinate_box_utils import merge_overlapping_boxes
from deepchem.utils.coordinate_box_utils import get_face_boxes
from deepchem.utils.data_utils import pad_array
from deepchem.utils.data_utils import get_data_dir
from deepchem.utils.data_utils import download_url
from deepchem.utils.data_utils import untargz_file
from deepchem.utils.data_utils import unzip_file
from deepchem.utils.data_utils import UniversalNamedTemporaryFile
from deepchem.utils.data_utils import load_image_files
from deepchem.utils.data_utils import load_sdf_files
from deepchem.utils.data_utils import load_csv_files
from deepchem.utils.data_utils import load_json_files
from deepchem.utils.data_utils import load_pickle_files
from deepchem.utils.data_utils import load_data
from deepchem.utils.data_utils import save_to_disk
from deepchem.utils.data_utils import load_from_disk
from deepchem.utils.data_utils import save_dataset_to_disk
from deepchem.utils.data_utils import load_dataset_from_disk
from deepchem.utils.data_utils import remove_missing_entries
from deepchem.utils.debug_utils import get_print_threshold
from deepchem.utils.debug_utils import set_print_threshold
from deepchem.utils.debug_utils import get_max_print_size
from deepchem.utils.debug_utils import set_max_print_size
from deepchem.utils.fragment_utils import AtomShim
from deepchem.utils.fragment_utils import MolecularFragment
from deepchem.utils.fragment_utils import get_partial_charge
from deepchem.utils.fragment_utils import merge_molecular_fragments
from deepchem.utils.fragment_utils import get_mol_subset
from deepchem.utils.fragment_utils import strip_hydrogens
from deepchem.utils.fragment_utils import get_contact_atom_indices
from deepchem.utils.fragment_utils import reduce_molecular_complex_to_contacts
from deepchem.utils.genomics_utils import seq_one_hot_encode
from deepchem.utils.genomics_utils import encode_bio_sequence
from deepchem.utils.geometry_utils import unit_vector
from deepchem.utils.geometry_utils import angle_between
from deepchem.utils.geometry_utils import generate_random_unit_vector
from deepchem.utils.geometry_utils import generate_random_rotation_matrix
from deepchem.utils.geometry_utils import is_angle_within_cutoff
from deepchem.utils.geometry_utils import compute_centroid
from deepchem.utils.geometry_utils import subtract_centroid
from deepchem.utils.geometry_utils import compute_protein_range
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.graph_utils import fourier_encode_dist
from deepchem.utils.graph_utils import aggregate_mean
from deepchem.utils.graph_utils import aggregate_max
from deepchem.utils.graph_utils import aggregate_min
from deepchem.utils.graph_utils import aggregate_std
from deepchem.utils.graph_utils import aggregate_var
from deepchem.utils.graph_utils import aggregate_moment
from deepchem.utils.graph_utils import aggregate_sum
from deepchem.utils.graph_utils import scale_identity
from deepchem.utils.graph_utils import scale_amplification
from deepchem.utils.graph_utils import scale_attenuation
from deepchem.utils.hash_utils import hash_ecfp
from deepchem.utils.hash_utils import hash_ecfp_pair
from deepchem.utils.hash_utils import vectorize
from deepchem.utils.molecule_feature_utils import one_hot_encode
from deepchem.utils.molecule_feature_utils import get_atom_type_one_hot
from deepchem.utils.molecule_feature_utils import construct_hydrogen_bonding_info
from deepchem.utils.molecule_feature_utils import get_atom_hydrogen_bonding_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_is_in_aromatic_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_hybridization_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_total_num_Hs_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_chirality_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_formal_charge
from deepchem.utils.molecule_feature_utils import get_atom_formal_charge_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_partial_charge
from deepchem.utils.molecule_feature_utils import get_atom_total_degree_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_type_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_is_in_same_ring_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_is_conjugated_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_stereo_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_graph_distance_one_hot
from deepchem.utils.pdbqt_utils import pdbqt_to_pdb
from deepchem.utils.pdbqt_utils import convert_protein_to_pdbqt
from deepchem.utils.pdbqt_utils import convert_mol_to_pdbqt
from deepchem.utils.docking_utils import write_vina_conf
from deepchem.utils.docking_utils import write_gnina_conf
from deepchem.utils.docking_utils import read_gnina_log
from deepchem.utils.docking_utils import load_docked_ligands
from deepchem.utils.docking_utils import prepare_inputs
from deepchem.utils.voxel_utils import convert_atom_to_voxel
from deepchem.utils.voxel_utils import convert_atom_pair_to_voxel
from deepchem.utils.voxel_utils import voxelize
from deepchem.utils.sequence_utils import hhblits
from deepchem.utils.sequence_utils import hhsearch
try:
from deepchem.utils.pytorch_utils import unsorted_segment_sum
from deepchem.utils.pytorch_utils import segment_sum
except ModuleNotFoundError as e:
logger.warning(
f'Skipped loading some Pytorch utilities, missing a dependency. {e}')
<file_sep>"""
Script that trains GraphConv models on qm7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load Tox21 dataset
tasks, datasets, transformers = dc.molnet.load_qm7(featurizer='GraphConv',
move_mean=True)
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
# Batch size of models
batch_size = 64
model = dc.models.GraphConvModel(len(tasks),
batch_size=batch_size,
learning_rate=0.001,
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
<file_sep>import os
import numpy as np
import deepchem as dc
import pandas as pd
from rdkit import Chem
def load_pdbbind_labels(labels_file):
"""Loads pdbbind labels as dataframe
Parameters
----------
labels_file: str
Location of PDBbind datafile.
Returns
-------
contents_df: pd.DataFrame
Dataframe containing contents of PDBbind datafile.
"""
contents = []
with open(labels_file) as f:
for line in f:
if line.startswith("#"):
continue
else:
splitline = line.split()
if len(splitline) == 8:
contents.append(splitline)
else:
print("Incorrect data format")
print(splitline)
contents_df = pd.DataFrame(
contents,
columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki",
"ignore-this-field", "reference", "ligand name"))
return contents_df
seed = 123
np.random.seed(seed)
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, "refined_atomconv")
train_dir = os.path.join(base_dir, "train")
test_dir = os.path.join(base_dir, "test")
pdbbind_dir = os.path.join(base_dir, "v2015")
print("Loading ids from %s" % data_dir)
d = dc.data.DiskDataset(data_dir)
ids = d.ids
pdbbind_data_file = os.path.join(pdbbind_dir, "INDEX_general_PL_data.2015")
contents_df = load_pdbbind_labels(pdbbind_data_file)
df_ids = contents_df["PDB code"].values.tolist()
df_years = contents_df["release year"].values
def shard_generator():
for ind, pdb_code in enumerate(ids):
i = df_ids.index(pdb_code)
y = df_years[i]
X = np.zeros((1, 5))
w = np.ones((1, 1))
yield X, y, w, pdb_code
print("Generating year dataset")
temp_d = dc.data.DiskDataset.create_dataset(shard_generator())
print("Performing Stratified split on year dataset")
s = dc.splits.SingletaskStratifiedSplitter()
train_ind, test_ind = s.train_test_indices(temp_d)
print("Using indices from Stratified splitter on pdbbind dataset")
splitter = dc.splits.IndiceSplitter(test_indices=test_ind)
train_dataset, test_dataset = splitter.train_test_split(d, train_dir, test_dir)
<file_sep>import pytest
import numpy as np
import functools
try:
import jax
import jax.numpy as jnp
import haiku as hk
import optax # noqa: F401
from deepchem.models import PINNModel
from deepchem.data import NumpyDataset
from deepchem.models.optimizers import Adam
from jax import jacrev
has_haiku_and_optax = True
except:
has_haiku_and_optax = False
@pytest.mark.jax
def test_sine_x():
"""
Here we are solving the differential equation- f'(x) = -sin(x) and f(0) = 1
We give initial for the neural network at x_init --> np.linspace(-1 * np.pi, 1 * np.pi, 5)
And we try to approximate the function for the domain (-np.pi, np.pi)
"""
# The PINNModel requires you to create two functions
# `create_eval`_fn for letting the model know how to compute the model in inference and
# `gradient_fn` for letting model know how to compute the gradient and different regulariser
# equation loss depending on the differential equation
def create_eval_fn(forward_fn, params):
"""
Calls the function to evaluate the model
"""
@jax.jit
def eval_model(x, rng=None):
bu = forward_fn(params, rng, x)
return jnp.squeeze(bu)
return eval_model
def gradient_fn(forward_fn, loss_outputs, initial_data):
"""
This function calls the gradient function, to implement the backpropagation
"""
boundary_data = initial_data['X0']
boundary_target = initial_data['u0']
@jax.jit
def model_loss(params, target, weights, rng, x_train):
@functools.partial(jax.vmap, in_axes=(None, 0))
def periodic_loss(params, x):
"""
diffrential equation => grad(f(x)) = - sin(x)
minimize f(x) := grad(f(x)) + sin(x)
"""
x = jnp.expand_dims(x, 0)
u_x = jacrev(forward_fn, argnums=(2))(params, rng, x)
return u_x + jnp.sin(x)
u_pred = forward_fn(params, rng, boundary_data)
loss_u = jnp.mean((u_pred - boundary_target)**2)
f_pred = periodic_loss(params, x_train)
loss_f = jnp.mean((f_pred**2))
return loss_u + loss_f
return model_loss
# defining the Haiku model
def f(x):
net = hk.nets.MLP(output_sizes=[256, 128, 1],
activation=jax.nn.softplus)
val = net(x)
return val
init_params, forward_fn = hk.transform(f)
rng = jax.random.PRNGKey(500)
params = init_params(rng, np.random.rand(1000, 1))
opt = Adam(learning_rate=1e-2)
# giving an initial boundary condition at 5 points between [-pi, pi] which will be used in l2 loss
in_array = np.linspace(-1 * np.pi, 1 * np.pi, 5)
out_array = np.cos(in_array)
initial_data = {
'X0': jnp.expand_dims(in_array, 1),
'u0': jnp.expand_dims(out_array, 1)
}
j_m = PINNModel(forward_fn=forward_fn,
params=params,
initial_data=initial_data,
batch_size=1000,
optimizer=opt,
grad_fn=gradient_fn,
eval_fn=create_eval_fn,
deterministic=True,
log_frequency=1000)
# defining our training data. We feed 100 points between [-pi, pi] without the labels,
# which will be used as the differential loss(regulariser)
X_f = np.expand_dims(np.linspace(-1 * np.pi, 1 * np.pi, 100), 1)
dataset = NumpyDataset(X_f)
_ = j_m.fit(dataset, nb_epochs=1000)
# The expected solution must be as close to cos(x)
test = np.expand_dims(np.linspace(-1 * np.pi, 1 * np.pi, 1000), 1)
dataset_test = NumpyDataset(test)
ans = j_m.predict(dataset_test)
out_array = np.cos(test).squeeze()
assert np.allclose(out_array, ans, atol=1e-01)
<file_sep>import deepchem as dc
import numpy as np
def test_IRV_transformer():
n_features = 128
n_samples = 20
test_samples = 5
n_tasks = 2
X = np.random.randint(2, size=(n_samples, n_features))
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
X_test = np.random.randint(2, size=(test_samples, n_features))
y_test = np.zeros((test_samples, n_tasks))
w_test = np.ones((test_samples, n_tasks))
test_dataset = dc.data.NumpyDataset(X_test, y_test, w_test, ids=None)
sims = np.sum(X_test[0, :] * X, axis=1, dtype=float) / np.sum(
np.sign(X_test[0, :] + X), axis=1, dtype=float)
sims = sorted(sims, reverse=True)
IRV_transformer = dc.trans.IRVTransformer(10, n_tasks, dataset)
test_dataset_trans = IRV_transformer.transform(test_dataset)
dataset_trans = IRV_transformer.transform(dataset)
assert test_dataset_trans.X.shape == (test_samples, 20 * n_tasks)
assert np.allclose(test_dataset_trans.X[0, :10], sims[:10])
assert np.allclose(test_dataset_trans.X[0, 10:20], [0] * 10)
assert not np.isclose(dataset_trans.X[0, 0], 1.)
<file_sep>"""
Gradient Boosting Decision Tree wrapper interface
"""
import os
import logging
import tempfile
import warnings
from typing import Callable, Optional, Union
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from deepchem.data import Dataset
from deepchem.models.sklearn_models import SklearnModel
logger = logging.getLogger(__name__)
class GBDTModel(SklearnModel):
"""Wrapper class that wraps GBDT models as DeepChem models.
This class supports LightGBM/XGBoost models.
"""
def __init__(self,
model: BaseEstimator,
model_dir: Optional[str] = None,
early_stopping_rounds: int = 50,
eval_metric: Optional[Union[str, Callable]] = None,
**kwargs):
"""
Parameters
----------
model: BaseEstimator
The model instance of scikit-learn wrapper LightGBM/XGBoost models.
model_dir: str, optional (default None)
Path to directory where model will be stored.
early_stopping_rounds: int, optional (default 50)
Activates early stopping. Validation metric needs to improve at least once
in every early_stopping_rounds round(s) to continue training.
eval_metric: Union[str, Callbale]
If string, it should be a built-in evaluation metric to use.
If callable, it should be a custom evaluation metric, see official note for more details.
"""
if model_dir is not None:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
else:
model_dir = tempfile.mkdtemp()
self.model_dir = model_dir
self.model = model
self.model_class = model.__class__
self.early_stopping_rounds = early_stopping_rounds
self.model_type = self._check_model_type()
if eval_metric is None:
if self.model_type == 'classification':
self.eval_metric: Optional[Union[str, Callable]] = 'auc'
elif self.model_type == 'regression':
self.eval_metric = 'mae'
else:
self.eval_metric = eval_metric
else:
self.eval_metric = eval_metric
def _check_model_type(self) -> str:
class_name = self.model.__class__.__name__
if class_name.endswith('Classifier'):
return 'classification'
elif class_name.endswith('Regressor'):
return 'regression'
elif class_name == 'NoneType':
return 'none'
else:
raise ValueError(
'{} is not a supported model instance.'.format(class_name))
def fit(self, dataset: Dataset):
"""Fits GDBT model with all data.
First, this function splits all data into train and valid data (8:2),
and finds the best n_estimators. And then, we retrain all data using
best n_estimators * 1.25.
Parameters
----------
dataset: Dataset
The `Dataset` to train this model on.
"""
X = dataset.X
y = np.squeeze(dataset.y)
# GDBT doesn't support multi-output(task)
if len(y.shape) != 1:
raise ValueError("GDBT model doesn't support multi-output(task)")
seed = self.model.random_state
stratify = None
if self.model_type == 'classification':
stratify = y
# Find optimal n_estimators based on original learning_rate and early_stopping_rounds
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=seed,
stratify=stratify)
self.model.fit(X_train,
y_train,
early_stopping_rounds=self.early_stopping_rounds,
eval_metric=self.eval_metric,
eval_set=[(X_test, y_test)])
# retrain model to whole data using best n_estimators * 1.25
if self.model.__class__.__name__.startswith('XGB'):
estimated_best_round = np.round(self.model.best_ntree_limit * 1.25)
else:
estimated_best_round = np.round(self.model.best_iteration_ * 1.25)
self.model.n_estimators = np.int64(estimated_best_round)
self.model.fit(X, y, eval_metric=self.eval_metric)
def fit_with_eval(self, train_dataset: Dataset, valid_dataset: Dataset):
"""Fits GDBT model with valid data.
Parameters
----------
train_dataset: Dataset
The `Dataset` to train this model on.
valid_dataset: Dataset
The `Dataset` to validate this model on.
"""
X_train, X_valid = train_dataset.X, valid_dataset.X
y_train, y_valid = np.squeeze(train_dataset.y), np.squeeze(
valid_dataset.y)
# GDBT doesn't support multi-output(task)
if len(y_train.shape) != 1 or len(y_valid.shape) != 1:
raise ValueError("GDBT model doesn't support multi-output(task)")
self.model.fit(X_train,
y_train,
early_stopping_rounds=self.early_stopping_rounds,
eval_metric=self.eval_metric,
eval_set=[(X_valid, y_valid)])
#########################################
# Deprecation warnings for XGBoostModel
#########################################
class XGBoostModel(GBDTModel):
def __init__(self, *args, **kwargs):
warnings.warn(
"XGBoostModel is deprecated and has been renamed to GBDTModel.",
FutureWarning)
super(XGBoostModel, self).__init__(*args, **kwargs)
<file_sep>import deepchem as dc
import unittest
class TestDNASim(unittest.TestCase):
def test_motif_density_localization_simulation(self):
"Test motif density localization simulation." ""
params = {
"motif_name": "TAL1_known4",
"seq_length": 1000,
"center_size": 150,
"min_motif_counts": 2,
"max_motif_counts": 4,
"num_pos": 30,
"num_neg": 30,
"GC_fraction": 0.4
}
sequences, y, embed = dc.molnet.simulate_motif_density_localization(
**params)
assert sequences.shape == (60,)
assert y.shape == (60, 1)
def test_motif_counting_simulation(self):
"Test motif counting"
params = {
"motif_name": "TAL1_known4",
"seq_length": 1000,
"pos_counts": [5, 10],
"neg_counts": [1, 2],
"num_pos": 30,
"num_neg": 30,
"GC_fraction": 0.4
}
sequences, y, embed = dc.molnet.simulate_motif_counting(**params)
assert sequences.shape == (60,)
assert y.shape == (60, 1)
def test_simple_motif_embedding(self):
"Test simple motif embedding"
params = {
"motif_name": "TAL1_known4",
"seq_length": 1000,
"num_seqs": 30,
"GC_fraction": 0.4
}
sequences, embed = dc.molnet.simple_motif_embedding(**params)
assert sequences.shape == (30,)
def test_motif_density(self):
"Test motif density"
params = {
"motif_name": "TAL1_known4",
"seq_length": 1000,
"num_seqs": 30,
"min_counts": 2,
"max_counts": 4,
"GC_fraction": 0.4
}
sequences, embed = dc.molnet.motif_density(**params)
assert sequences.shape == (30,)
def test_single_motif_detection(self):
"Test single motif detection"
params = {
"motif_name": "TAL1_known4",
"seq_length": 1000,
"num_pos": 30,
"num_neg": 30,
"GC_fraction": 0.4
}
sequences, y, embed = dc.molnet.simulate_single_motif_detection(
**params)
assert sequences.shape == (60,)
assert y.shape == (60, 1)
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import warnings
import numpy as np
import tensorflow as tf
try:
from collections.abc import Sequence as SequenceCollection
except:
from collections import Sequence as SequenceCollection
from deepchem.nn import model_ops
class RobustMultitaskClassifier(MultiTaskClassifier):
"""Implements a neural network for robust multitasking.
Key idea is to have bypass layers that feed directly from features
to task output. Hopefully will allow tasks to route around bad
multitasking.
"""
def __init__(self,
n_tasks,
n_features,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bypass_layer_sizes=[100],
bypass_weight_init_stddevs=[.02],
bypass_bias_init_consts=[1.],
bypass_dropouts=[.5],
**kwargs):
"""Create a MultiTaskClassifier.
In addition to the following arguments, this class also accepts
all the keyword arguments from MultiTaskClassifier.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of
this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight
initialization of each layer. The length of this list should
equal len(layer_sizes). Alternatively this may be a single
value instead of a list, in which case the same value is used
for every layer.
bypass_layer_sizes: list
the size of each dense bypass layer in the network. The length
of this list determines the number of layers.
bypass_weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight
initialization of each layer. The length of this list should
equal len(bypass_layer_sizes). Alternatively this may be a
single value instead of a list, in which case the same value is
used for every layer.
bypass_bias_init_consts: list or loat
the value to initialize the biases in each layer to. The
length of this list should equal len(bypass_layer_sizes).
Alternatively this may be a single value instead of a list, in
which case the same value is used for every layer.
bypass_dropouts: list or float
the dropout probablity to use for each layer. The length of
this list should equal len(bypass_layer_sizes). Alternatively
this may be a single value instead of a list, in which case the
same value is used for every layer.
"""
self.bypass_layer_sizes = bypass_layer_sizes
self.bypass_weight_init_stddevs = bypass_weight_init_stddevs
self.bypass_bias_init_consts = bypass_bias_init_consts
self.bypass_dropouts = bypass_dropouts
n_layers = len(layer_sizes)
assert n_layers == len(bypass_layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bypass_weight_init_stddevs, SequenceCollection):
bypass_weight_init_stddevs = [bypass_weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
# Add the input features.
mol_features = Feature(shape=(None, n_features))
prev_layer = mol_features
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
bypass_layer_sizes = self.bypass_layer_sizes
bypass_weight_init_stddevs = self.bypass_weight_init_stddevs
bypass_bias_init_consts = self.bypass_bias_init_consts
bypass_dropouts = self.bypass_dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, "All layer params must have same length."
num_layers = lengths_set.pop()
assert num_layers > 0, "Must have some layers defined."
bypass_lengths_set = {
len(bypass_layer_sizes),
len(bypass_weight_init_stddevs),
len(bypass_bias_init_consts),
len(bypass_dropouts),
}
assert len(bypass_lengths_set) == 1, (
"All bypass_layer params" + " must have same length.")
num_bypass_layers = bypass_lengths_set.pop()
label_placeholders = self.add_label_placeholders(graph, name_scopes)
weight_placeholders = self.add_example_weight_placeholders(
graph, name_scopes)
if training:
graph.queue = tf.FIFOQueue(
capacity=5,
dtypes=[tf.float32] *
(len(label_placeholders) + len(weight_placeholders) + 1))
graph.enqueue = graph.queue.enqueue(
[mol_features] + label_placeholders + weight_placeholders)
queue_outputs = graph.queue.dequeue()
labels = queue_outputs[1:len(label_placeholders) + 1]
weights = queue_outputs[len(label_placeholders) + 1:]
prev_layer = queue_outputs[0]
else:
labels = label_placeholders
weights = weight_placeholders
prev_layer = mol_features
top_layer = prev_layer
prev_layer_size = num_features
for i in range(num_layers):
# layer has shape [None, layer_sizes[i]]
print("Adding weights of shape %s" % str(
[prev_layer_size, layer_sizes[i]]))
layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]])))
layer = model_ops.dropout(layer, dropouts[i], training)
prev_layer = layer
prev_layer_size = layer_sizes[i]
output = []
# top_multitask_layer has shape [None, layer_sizes[-1]]
top_multitask_layer = prev_layer
for task in range(self.n_tasks):
# TODO(rbharath): Might want to make it feasible to have multiple
# bypass layers.
# Construct task bypass layer
prev_bypass_layer = top_layer
prev_bypass_layer_size = num_features
for i in range(num_bypass_layers):
# bypass_layer has shape [None, bypass_layer_sizes[i]]
print("Adding bypass weights of shape %s" % str(
[prev_bypass_layer_size, bypass_layer_sizes[i]]))
bypass_layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_bypass_layer,
size=bypass_layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_bypass_layer_size, bypass_layer_sizes[i]],
stddev=bypass_weight_init_stddevs[i]),
bias_init=tf.constant(
value=bypass_bias_init_consts[i],
shape=[bypass_layer_sizes[i]])))
bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i],
training)
prev_bypass_layer = bypass_layer
prev_bypass_layer_size = bypass_layer_sizes[i]
top_bypass_layer = prev_bypass_layer
if num_bypass_layers > 0:
# task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
task_layer = tf.concat(
axis=1, values=[top_multitask_layer, top_bypass_layer])
task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
else:
task_layer = top_multitask_layer
task_layer_size = layer_sizes[-1]
print("Adding output weights of shape %s" % str([task_layer_size, 1]))
output.append(
model_ops.logits(
task_layer,
num_classes=2,
weight_init=tf.truncated_normal(
shape=[task_layer_size, 2], stddev=weight_init_stddevs[-1]),
bias_init=tf.constant(value=bias_init_consts[-1], shape=[2])))
return (output, labels, weights)
class RobustMultitaskRegressor(TensorflowMultiTaskRegressor):
"""Implements a neural network for robust multitasking.
Key idea is to have bypass layers that feed directly from features to task
output. Hopefully will allow tasks to route around bad multitasking.
"""
def __init__(self,
n_tasks,
n_features,
logdir=None,
bypass_layer_sizes=[100],
bypass_weight_init_stddevs=[.02],
bypass_bias_init_consts=[1.],
bypass_dropouts=[.5],
**kwargs):
warnings.warn("RobustMultiTaskRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.bypass_layer_sizes = bypass_layer_sizes
self.bypass_weight_init_stddevs = bypass_weight_init_stddevs
self.bypass_bias_init_consts = bypass_bias_init_consts
self.bypass_dropouts = bypass_dropouts
super(RobustMultitaskRegressor, self).__init__(n_tasks, n_features, logdir,
**kwargs)
def build(self, graph, name_scopes, training):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x num_features.
"""
num_features = self.n_features
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
with placeholder_scope:
mol_features = tf.placeholder(
tf.float32, shape=[None, num_features], name='mol_features')
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
bypass_layer_sizes = self.bypass_layer_sizes
bypass_weight_init_stddevs = self.bypass_weight_init_stddevs
bypass_bias_init_consts = self.bypass_bias_init_consts
bypass_dropouts = self.bypass_dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, "All layer params must have same length."
num_layers = lengths_set.pop()
assert num_layers > 0, "Must have some layers defined."
bypass_lengths_set = {
len(bypass_layer_sizes),
len(bypass_weight_init_stddevs),
len(bypass_bias_init_consts),
len(bypass_dropouts),
}
assert len(bypass_lengths_set) == 1, (
"All bypass_layer params" + " must have same length.")
num_bypass_layers = bypass_lengths_set.pop()
label_placeholders = self.add_label_placeholders(graph, name_scopes)
weight_placeholders = self.add_example_weight_placeholders(
graph, name_scopes)
if training:
graph.queue = tf.FIFOQueue(
capacity=5,
dtypes=[tf.float32] *
(len(label_placeholders) + len(weight_placeholders) + 1))
graph.enqueue = graph.queue.enqueue(
[mol_features] + label_placeholders + weight_placeholders)
queue_outputs = graph.queue.dequeue()
labels = queue_outputs[1:len(label_placeholders) + 1]
weights = queue_outputs[len(label_placeholders) + 1:]
prev_layer = queue_outputs[0]
else:
labels = label_placeholders
weights = weight_placeholders
prev_layer = mol_features
top_layer = prev_layer
prev_layer_size = num_features
for i in range(num_layers):
# layer has shape [None, layer_sizes[i]]
print("Adding weights of shape %s" % str(
[prev_layer_size, layer_sizes[i]]))
layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]])))
layer = model_ops.dropout(layer, dropouts[i], training)
prev_layer = layer
prev_layer_size = layer_sizes[i]
output = []
# top_multitask_layer has shape [None, layer_sizes[-1]]
top_multitask_layer = prev_layer
for task in range(self.n_tasks):
# TODO(rbharath): Might want to make it feasible to have multiple
# bypass layers.
# Construct task bypass layer
prev_bypass_layer = top_layer
prev_bypass_layer_size = num_features
for i in range(num_bypass_layers):
# bypass_layer has shape [None, bypass_layer_sizes[i]]
print("Adding bypass weights of shape %s" % str(
[prev_bypass_layer_size, bypass_layer_sizes[i]]))
bypass_layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_bypass_layer,
size=bypass_layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_bypass_layer_size, bypass_layer_sizes[i]],
stddev=bypass_weight_init_stddevs[i]),
bias_init=tf.constant(
value=bypass_bias_init_consts[i],
shape=[bypass_layer_sizes[i]])))
bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i],
training)
prev_bypass_layer = bypass_layer
prev_bypass_layer_size = bypass_layer_sizes[i]
top_bypass_layer = prev_bypass_layer
if num_bypass_layers > 0:
# task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
task_layer = tf.concat(
axis=1, values=[top_multitask_layer, top_bypass_layer])
task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
else:
task_layer = top_multitask_layer
task_layer_size = layer_sizes[-1]
print("Adding output weights of shape %s" % str([task_layer_size, 1]))
output.append(
tf.squeeze(
model_ops.fully_connected_layer(
tensor=task_layer,
size=1,
weight_init=tf.truncated_normal(
shape=[task_layer_size, 1],
stddev=weight_init_stddevs[-1]),
bias_init=tf.constant(
value=bias_init_consts[-1], shape=[1])),
axis=1))
return (output, labels, weights)
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import sys
from subprocess import call
from atomicnet_pdbbind_datasets import load_core_pdbbind_fragment_coordinates
call([
"wget",
"http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/pdbbind_v2015.tar.gz"
])
call(["tar", "-xvzf", "pdbbind_v2015.tar.gz"])
# This could be done with openbabel in python
call(["convert_ligand_sdf_to_pdb.sh"])
base_dir = os.getcwd()
pdbbind_dir = os.path.join(base_dir, "v2015")
datafile = "INDEX_core_data.2013"
frag1_num_atoms = 140
frag2_num_atoms = 821
complex_num_atoms = 908
max_num_neighbors = 8
neighbor_cutoff = 12.0
pdbbind_tasks, dataset, transformers = load_core_pdbbind_fragment_coordinates(
frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors,
neighbor_cutoff, pdbbind_dir, base_dir, datafile)
<file_sep>import pytest
import deepchem as dc
from deepchem.feat.graph_data import BatchGraphData
@pytest.mark.torch
def test_grover_batch_mol_graph():
import torch
from deepchem.utils.grover import extract_grover_attributes
grover_featurizer = dc.feat.GroverFeaturizer(
features_generator=dc.feat.CircularFingerprint())
smiles = ['CC', 'CCC']
mol_graphs = grover_featurizer.featurize(smiles)
mol_graph = mol_graphs[0]
batched_mol_graph = BatchGraphData(mol_graphs)
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, fg_labels, additional_features = extract_grover_attributes(
batched_mol_graph)
# 6 atoms: CC -> 2, CCC -> 3
assert f_atoms.shape == (5, mol_graph.node_features.shape[1])
# 7 bonds: CC -> 2, CCC -> 4 (bonds are considered as undirected
# and a single bond contributes to 2 bonds)
assert f_bonds.shape == (6, mol_graph.edge_features.shape[1])
assert fg_labels.shape == (2, mol_graph.fg_labels.shape[0])
assert additional_features.shape == (2,
mol_graph.additional_features.shape[0])
assert (a_scope == torch.Tensor([[0, 2], [2, 3]])).all()
assert (b_scope == torch.Tensor([[0, 2], [2, 4]])).all()
assert (a2b == torch.Tensor([[1, 0], [0, 0], [3, 0], [5, 0], [2, 4]])).all()
assert (b2a == torch.Tensor([0, 1, 2, 4, 3, 4])).all()
assert (b2revb == torch.Tensor([1, 0, 3, 2, 5, 4])).all()
assert (a2a == torch.Tensor([[1, 0], [0, 0], [4, 0], [4, 0], [2, 3]])).all()
<file_sep># flake8:noqa
from deepchem.models.jax_models.jax_model import JaxModel
from deepchem.models.jax_models.pinns_model import PINNModel
<file_sep># Dataset overview
The Tox21 datasets were used in the recent (Tox21 Data Challenge)[https://tripod.nih.gov/tox21/challenge/]; they contain experimental data for targets relevant to drug toxicity prediction
Ref: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
arXiv preprint arXiv:1502.02072<file_sep>"""
Atomic coordinate featurizer.
"""
import logging
import warnings
import numpy as np
from deepchem.feat.base_classes import Featurizer, ComplexFeaturizer
from deepchem.feat.molecule_featurizers import AtomicCoordinates
from deepchem.utils.data_utils import pad_array
from deepchem.utils.rdkit_utils import MoleculeLoadException, get_xyz_from_mol, \
load_molecule, merge_molecules_xyz, merge_molecules
def compute_neighbor_list(coords, neighbor_cutoff, max_num_neighbors,
periodic_box_size):
"""Computes a neighbor list from atom coordinates."""
N = coords.shape[0]
try:
import mdtraj
except ModuleNotFoundError:
raise ImportError("This function requires mdtraj to be installed.")
traj = mdtraj.Trajectory(coords.reshape((1, N, 3)), None)
box_size = None
if periodic_box_size is not None:
box_size = np.array(periodic_box_size)
traj.unitcell_vectors = np.array(
[[[box_size[0], 0, 0], [0, box_size[1], 0], [0, 0, box_size[2]]]],
dtype=np.float32)
neighbors = mdtraj.geometry.compute_neighborlist(traj, neighbor_cutoff)
neighbor_list = {}
for i in range(N):
if max_num_neighbors is not None and len(
neighbors[i]) > max_num_neighbors:
delta = coords[i] - coords.take(neighbors[i], axis=0)
if box_size is not None:
delta -= np.round(delta / box_size) * box_size
dist = np.linalg.norm(delta, axis=1)
sorted_neighbors = list(zip(dist, neighbors[i]))
sorted_neighbors.sort()
neighbor_list[i] = [
sorted_neighbors[j][1] for j in range(max_num_neighbors)
]
else:
neighbor_list[i] = list(neighbors[i])
return neighbor_list
class NeighborListAtomicCoordinates(Featurizer):
"""
Adjacency List of neighbors in 3-space
Neighbors determined by user-defined distance cutoff [in Angstrom].
https://en.wikipedia.org/wiki/Cell_list
Ref: http://www.cs.cornell.edu/ron/references/1989/Calculations%20of%20a%20List%20of%20Neighbors%20in%20Molecular%20Dynamics%20Si.pdf
Parameters
----------
neighbor_cutoff: float
Threshold distance [Angstroms] for counting neighbors.
periodic_box_size: 3 element array
Dimensions of the periodic box in Angstroms, or None to not use periodic boundary conditions
"""
def __init__(self,
max_num_neighbors=None,
neighbor_cutoff=4,
periodic_box_size=None):
if neighbor_cutoff <= 0:
raise ValueError("neighbor_cutoff must be positive value.")
if max_num_neighbors is not None:
if not isinstance(max_num_neighbors, int) or max_num_neighbors <= 0:
raise ValueError("max_num_neighbors must be positive integer.")
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
self.periodic_box_size = periodic_box_size
# Type of data created by this featurizer
self.dtype = object
self.bohr_coords_featurizer = AtomicCoordinates(use_bohr=True)
self.coords_featurizer = AtomicCoordinates(use_bohr=False)
def _featurize(self, mol):
"""
Compute neighbor list.
Parameters
----------
mol: rdkit Mol
To be featurized.
"""
# TODO(rbharath): Should this return a list?
bohr_coords = self.bohr_coords_featurizer._featurize(mol)
coords = self.coords_featurizer._featurize(mol)
neighbor_list = compute_neighbor_list(coords, self.neighbor_cutoff,
self.max_num_neighbors,
self.periodic_box_size)
return (bohr_coords, neighbor_list)
class NeighborListComplexAtomicCoordinates(ComplexFeaturizer):
"""
Adjacency list of neighbors for protein-ligand complexes in 3-space.
Neighbors determined by user-defined distance cutoff.
"""
def __init__(self, max_num_neighbors=None, neighbor_cutoff=4):
if neighbor_cutoff <= 0:
raise ValueError("neighbor_cutoff must be positive value.")
if max_num_neighbors is not None:
if not isinstance(max_num_neighbors, int) or max_num_neighbors <= 0:
raise ValueError("max_num_neighbors must be positive integer.")
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
# Type of data created by this featurizer
self.dtype = object
def _featurize(self, datapoint, **kwargs):
"""
Compute neighbor list for complex.
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
mol_pdb_file, protein_pdb_file = datapoint
mol_coords, ob_mol = load_molecule(mol_pdb_file)
protein_coords, protein_mol = load_molecule(protein_pdb_file)
system_coords = merge_molecules_xyz([mol_coords, protein_coords])
system_neighbor_list = compute_neighbor_list(system_coords,
self.neighbor_cutoff,
self.max_num_neighbors,
None)
return (system_coords, system_neighbor_list)
class AtomicConvFeaturizer(ComplexFeaturizer):
"""This class computes the featurization that corresponds to AtomicConvModel.
This class computes featurizations needed for AtomicConvModel.
Given two molecular structures, it computes a number of useful
geometric features. In particular, for each molecule and the global
complex, it computes a coordinates matrix of size (N_atoms, 3)
where N_atoms is the number of atoms. It also computes a
neighbor-list, a dictionary with N_atoms elements where
neighbor-list[i] is a list of the atoms the i-th atom has as
neighbors. In addition, it computes a z-matrix for the molecule
which is an array of shape (N_atoms,) that contains the atomic
number of that atom.
Since the featurization computes these three quantities for each of
the two molecules and the complex, a total of 9 quantities are
returned for each complex. Note that for efficiency, fragments of
the molecules can be provided rather than the full molecules
themselves.
"""
def __init__(self,
frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
neighbor_cutoff,
strip_hydrogens=True):
"""
Parameters
----------
frag1_num_atoms: int
Maximum number of atoms in fragment 1.
frag2_num_atoms: int
Maximum number of atoms in fragment 2.
complex_num_atoms: int
Maximum number of atoms in complex of frag1/frag2 together.
max_num_neighbors: int
Maximum number of atoms considered as neighbors.
neighbor_cutoff: float
Maximum distance (angstroms) for two atoms to be considered as
neighbors. If more than `max_num_neighbors` atoms fall within
this cutoff, the closest `max_num_neighbors` will be used.
strip_hydrogens: bool (default True)
Remove hydrogens before computing featurization.
"""
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.complex_num_atoms = complex_num_atoms
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
self.strip_hydrogens = strip_hydrogens
self.neighborlist_featurizer = NeighborListComplexAtomicCoordinates(
self.max_num_neighbors, self.neighbor_cutoff)
def _featurize(self, complex):
mol_pdb_file, protein_pdb_file = complex
try:
frag1_coords, frag1_mol = load_molecule(mol_pdb_file,
is_protein=False,
sanitize=True,
add_hydrogens=False)
frag2_coords, frag2_mol = load_molecule(protein_pdb_file,
is_protein=True,
sanitize=True,
add_hydrogens=False)
except MoleculeLoadException:
# Currently handles loading failures by returning None
# TODO: Is there a better handling procedure?
logging.warning(
"Some molecules cannot be loaded by Rdkit. Skipping")
return None
except ImportError as e:
logging.warning("%s" % e)
raise ImportError(e)
system_mol = merge_molecules([frag1_mol, frag2_mol])
system_coords = get_xyz_from_mol(system_mol)
frag1_coords, frag1_mol = self._strip_hydrogens(frag1_coords, frag1_mol)
frag2_coords, frag2_mol = self._strip_hydrogens(frag2_coords, frag2_mol)
system_coords, system_mol = self._strip_hydrogens(
system_coords, system_mol)
try:
frag1_coords, frag1_neighbor_list, frag1_z = self.featurize_mol(
frag1_coords, frag1_mol, self.frag1_num_atoms)
frag2_coords, frag2_neighbor_list, frag2_z = self.featurize_mol(
frag2_coords, frag2_mol, self.frag2_num_atoms)
system_coords, system_neighbor_list, system_z = self.featurize_mol(
system_coords, system_mol, self.complex_num_atoms)
except ValueError:
logging.warning(
"max_atoms was set too low. Some complexes too large and skipped"
)
return None
except ImportError as e:
logging.warning("%s" % e)
raise ImportError(e)
return frag1_coords, frag1_neighbor_list, frag1_z, frag2_coords, frag2_neighbor_list, frag2_z, \
system_coords, system_neighbor_list, system_z
def get_Z_matrix(self, mol, max_atoms):
if len(mol.GetAtoms()) > max_atoms:
raise ValueError(
"A molecule is larger than permitted by max_atoms. "
"Increase max_atoms and try again.")
return pad_array(
np.array([atom.GetAtomicNum() for atom in mol.GetAtoms()]),
max_atoms)
def featurize_mol(self, coords, mol, max_num_atoms):
logging.info("Featurizing molecule of size: %d", len(mol.GetAtoms()))
neighbor_list = compute_neighbor_list(coords, self.neighbor_cutoff,
self.max_num_neighbors, None)
# pad outputs
z = self.get_Z_matrix(mol, max_num_atoms)
z = pad_array(z, max_num_atoms)
coords = pad_array(coords, (max_num_atoms, 3))
return coords, neighbor_list, z
def _strip_hydrogens(self, coords, mol):
class MoleculeShim(object):
"""
Shim of a Molecule which supports #GetAtoms()
"""
def __init__(self, atoms):
self.atoms = [AtomShim(x) for x in atoms]
def GetAtoms(self):
return self.atoms
class AtomShim(object):
def __init__(self, atomic_num):
self.atomic_num = atomic_num
def GetAtomicNum(self):
return self.atomic_num
if not self.strip_hydrogens:
return coords, mol
indexes_to_keep = []
atomic_numbers = []
for index, atom in enumerate(mol.GetAtoms()):
if atom.GetAtomicNum() != 1:
indexes_to_keep.append(index)
atomic_numbers.append(atom.GetAtomicNum())
mol = MoleculeShim(atomic_numbers)
coords = coords[indexes_to_keep]
return coords, mol
# Deprecation warnings for old atomic conv featurizer name #
ATOMICCONV_DEPRECATION = "{} is deprecated and has been renamed to {} and will be removed in DeepChem 3.0."
class ComplexNeighborListFragmentAtomicCoordinates(AtomicConvFeaturizer):
def __init__(self, *args, **kwargs):
warnings.warn(
ATOMICCONV_DEPRECATION.format(
"ComplexNeighorListFragmentAtomicCoordinates",
"AtomicConvFeaturizer"), FutureWarning)
super(ComplexNeighborListFragmentAtomicCoordinates,
self).__init__(*args, **kwargs)
<file_sep># 2017 DeepCrystal Technologies - <NAME>
#
# Data loading a splitting file
#
# MIT License - have fun!!
# ===========================================================
import os
import random
from collections import OrderedDict
import deepchem as dc
from deepchem.utils import ScaffoldGenerator
from deepchem.utils.save import log
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import TruncatedSVD
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
random.seed(2)
np.random.seed(2)
torch.manual_seed(2)
def generate_scaffold(smiles, include_chirality=False):
"""Compute the Bemis-Murcko scaffold for a SMILES string."""
mol = Chem.MolFromSmiles(smiles)
engine = ScaffoldGenerator(include_chirality=include_chirality)
scaffold = engine.get_scaffold(mol)
return scaffold
def split(dataset,
frac_train=.80,
frac_valid=.10,
frac_test=.10,
log_every_n=1000):
"""
Splits internal compounds into train/validation/test by scaffold.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)
scaffolds = {}
log("About to generate scaffolds", True)
data_len = len(dataset)
for ind, smiles in enumerate(dataset):
if ind % log_every_n == 0:
log("Generating scaffold %d/%d" % (ind, data_len), True)
scaffold = generate_scaffold(smiles)
if scaffold not in scaffolds:
scaffolds[scaffold] = [ind]
else:
scaffolds[scaffold].append(ind)
scaffolds = {key: sorted(value) for key, value in scaffolds.items()}
scaffold_sets = [
scaffold_set
for (scaffold, scaffold_set) in sorted(
scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
train_cutoff = frac_train * len(dataset)
valid_cutoff = (frac_train + frac_valid) * len(dataset)
train_inds, valid_inds, test_inds = [], [], []
log("About to sort in scaffold sets", True)
for scaffold_set in scaffold_sets:
if len(train_inds) + len(scaffold_set) > train_cutoff:
if len(train_inds) + len(valid_inds) + len(scaffold_set) > valid_cutoff:
test_inds += scaffold_set
else:
valid_inds += scaffold_set
else:
train_inds += scaffold_set
return train_inds, valid_inds, test_inds
def load_dataset(filename, whiten=False):
f = open(filename, 'r')
features = []
labels = []
tracer = 0
for line in f:
if tracer == 0:
tracer += 1
continue
splits = line[:-1].split(',')
features.append(splits[-1])
labels.append(float(splits[-2]))
features = np.array(features)
labels = np.array(labels, dtype='float32').reshape(-1, 1)
train_ind, val_ind, test_ins = split(features)
train_features = np.take(features, train_ind)
train_labels = np.take(labels, train_ind)
val_features = np.take(features, val_ind)
val_labels = np.take(labels, val_ind)
return train_features, train_labels, val_features, val_labels
<file_sep>"""
Tests for Pose Scoring
"""
import logging
import unittest
import numpy as np
from deepchem.dock.pose_scoring import vina_nonlinearity
from deepchem.dock.pose_scoring import vina_hydrophobic
from deepchem.dock.pose_scoring import vina_gaussian_first
from deepchem.dock.pose_scoring import vina_gaussian_second
from deepchem.dock.pose_scoring import vina_hbond
from deepchem.dock.pose_scoring import vina_repulsion
from deepchem.dock.pose_scoring import cutoff_filter
from deepchem.dock.pose_scoring import vina_energy_term
logger = logging.getLogger(__name__)
class TestPoseScoring(unittest.TestCase):
"""Does sanity checks on pose generation."""
def test_cutoff_filter(self):
N = 10
M = 5
d = np.ones((N, M))
x = np.random.rand(N, M)
cutoff_dist = 0.5
x_thres = cutoff_filter(d, x, cutoff=cutoff_dist)
assert (x_thres == np.zeros((N, M))).all()
def test_vina_nonlinearity(self):
N = 10
M = 5
c = np.random.rand(N, M)
Nrot = 5
w = 0.5
out_tensor = vina_nonlinearity(c, w, Nrot)
assert out_tensor.shape == (N, M)
assert (out_tensor == c / (1 + w * Nrot)).all()
def test_vina_repulsion(self):
N = 10
M = 5
d = np.ones((N, M))
out_tensor = vina_repulsion(d)
assert out_tensor.shape == (N, M)
# Where d is greater than zero, the repulsion is just zeros
assert (out_tensor == np.zeros_like(d)).all()
def test_vina_hydrophobic(self):
N = 10
M = 5
d = np.zeros((N, M))
out_tensor = vina_hydrophobic(d)
assert out_tensor.shape == (N, M)
# When d is 0, this should just be 1
assert (out_tensor == np.ones_like(d)).all()
def test_vina_hbond(self):
N = 10
M = 5
d = np.zeros((N, M))
out_tensor = vina_hbond(d)
assert out_tensor.shape == (N, M)
# When d == 0, the hbond interaction is 0
assert (out_tensor == np.zeros_like(d)).all()
def test_vina_gaussian(self):
N = 10
M = 5
d = np.zeros((N, M))
out_tensor = vina_gaussian_first(d)
assert out_tensor.shape == (N, M)
# The exponential returns 1 when input 0.
assert (out_tensor == np.ones_like(d)).all()
d = 3 * np.ones((N, M))
out_tensor = vina_gaussian_second(d)
assert out_tensor.shape == (N, M)
# This exponential returns 1 when input 3
assert (out_tensor == np.ones_like(d)).all()
def test_energy_term(self):
N = 10
M = 5
coords1 = np.random.rand(N, 3)
coords2 = np.random.rand(M, 3)
weights = np.ones((5,))
wrot = 1.0
Nrot = 3
energy = vina_energy_term(coords1, coords2, weights, wrot, Nrot)
assert energy > 0
<file_sep>Utilities
=========
DeepChem has a broad collection of utility functions. Many of these
maybe be of independent interest to users since they deal with some
tricky aspects of processing scientific datatypes.
Data Utilities
--------------
Array Utilities
^^^^^^^^^^^^^^^
.. autofunction:: deepchem.utils.data_utils.pad_array
Data Directory
^^^^^^^^^^^^^^^
The DeepChem data directory is where downloaded MoleculeNet datasets are stored.
.. autofunction:: deepchem.utils.data_utils.get_data_dir
URL Handling
^^^^^^^^^^^^
.. autofunction:: deepchem.utils.data_utils.download_url
File Handling
^^^^^^^^^^^^^
.. autofunction:: deepchem.utils.data_utils.untargz_file
.. autofunction:: deepchem.utils.data_utils.unzip_file
.. autofunction:: deepchem.utils.data_utils.load_data
.. autofunction:: deepchem.utils.data_utils.load_sdf_files
.. autofunction:: deepchem.utils.data_utils.load_csv_files
.. autofunction:: deepchem.utils.data_utils.load_json_files
.. autofunction:: deepchem.utils.data_utils.load_pickle_files
.. autofunction:: deepchem.utils.data_utils.load_from_disk
.. autofunction:: deepchem.utils.data_utils.save_to_disk
.. autofunction:: deepchem.utils.data_utils.load_dataset_from_disk
.. autofunction:: deepchem.utils.data_utils.save_dataset_to_disk
Molecular Utilities
-------------------
.. autoclass:: deepchem.utils.conformers.ConformerGenerator
:members:
.. autoclass:: deepchem.utils.rdkit_utils.MoleculeLoadException
:members:
.. autofunction:: deepchem.utils.rdkit_utils.get_xyz_from_mol
.. autofunction:: deepchem.utils.rdkit_utils.add_hydrogens_to_mol
.. autofunction:: deepchem.utils.rdkit_utils.compute_charges
.. autofunction:: deepchem.utils.rdkit_utils.load_molecule
.. autofunction:: deepchem.utils.rdkit_utils.write_molecule
Molecular Fragment Utilities
----------------------------
It's often convenient to manipulate subsets of a molecule. The :code:`MolecularFragment` class aids in such manipulations.
.. autoclass:: deepchem.utils.fragment_utils.MolecularFragment
:members:
.. autoclass:: deepchem.utils.fragment_utils.AtomShim
:members:
.. autofunction:: deepchem.utils.fragment_utils.strip_hydrogens
.. autofunction:: deepchem.utils.fragment_utils.merge_molecular_fragments
.. autofunction:: deepchem.utils.fragment_utils.get_contact_atom_indices
.. autofunction:: deepchem.utils.fragment_utils.reduce_molecular_complex_to_contacts
Coordinate Box Utilities
------------------------
.. autoclass:: deepchem.utils.coordinate_box_utils.CoordinateBox
:members:
.. autofunction:: deepchem.utils.coordinate_box_utils.intersect_interval
.. autofunction:: deepchem.utils.coordinate_box_utils.union
.. autofunction:: deepchem.utils.coordinate_box_utils.merge_overlapping_boxes
.. autofunction:: deepchem.utils.coordinate_box_utils.get_face_boxes
Evaluation Utils
----------------
.. autoclass:: deepchem.utils.evaluate.Evaluator
:members:
.. autoclass:: deepchem.utils.evaluate.GeneratorEvaluator
:members:
.. autofunction:: deepchem.utils.evaluate.relative_difference
Genomic Utilities
-----------------
.. autofunction:: deepchem.utils.genomics_utils.seq_one_hot_encode
.. autofunction:: deepchem.utils.genomics_utils.encode_bio_sequence
.. autofunction:: deepchem.utils.sequence_utils.hhblits
.. autofunction:: deepchem.utils.sequence_utils.hhsearch
.. autofunction:: deepchem.utils.sequence_utils.MSA_to_dataset
Geometry Utilities
------------------
.. autofunction:: deepchem.utils.geometry_utils.unit_vector
.. autofunction:: deepchem.utils.geometry_utils.angle_between
.. autofunction:: deepchem.utils.geometry_utils.generate_random_unit_vector
.. autofunction:: deepchem.utils.geometry_utils.generate_random_rotation_matrix
.. autofunction:: deepchem.utils.geometry_utils.is_angle_within_cutoff
Graph Utilities
---------------
.. autofunction:: deepchem.utils.graph_utils.fourier_encode_dist
.. autofunction:: deepchem.utils.graph_utils.aggregate_mean
.. autofunction:: deepchem.utils.graph_utils.aggregate_max
.. autofunction:: deepchem.utils.graph_utils.aggregate_min
.. autofunction:: deepchem.utils.graph_utils.aggregate_std
.. autofunction:: deepchem.utils.graph_utils.aggregate_var
.. autofunction:: deepchem.utils.graph_utils.aggregate_moment
.. autofunction:: deepchem.utils.graph_utils.aggregate_sum
.. autofunction:: deepchem.utils.graph_utils.scale_identity
.. autofunction:: deepchem.utils.graph_utils.scale_amplification
.. autofunction:: deepchem.utils.graph_utils.scale_attenuation
Hash Function Utilities
-----------------------
.. autofunction:: deepchem.utils.hash_utils.hash_ecfp
.. autofunction:: deepchem.utils.hash_utils.hash_ecfp_pair
.. autofunction:: deepchem.utils.hash_utils.vectorize
Voxel Utils
-----------
.. autofunction:: deepchem.utils.voxel_utils.convert_atom_to_voxel
.. autofunction:: deepchem.utils.voxel_utils.convert_atom_pair_to_voxel
.. autofunction:: deepchem.utils.voxel_utils.voxelize
Graph Convolution Utilities
---------------------------
.. autofunction:: deepchem.utils.molecule_feature_utils.one_hot_encode
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_type_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.construct_hydrogen_bonding_info
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_hydrogen_bonding_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_is_in_aromatic_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_hybridization_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_total_num_Hs_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_chirality_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_formal_charge
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_partial_charge
.. autofunction:: deepchem.utils.molecule_feature_utils.get_atom_total_degree_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_bond_type_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_bond_is_in_same_ring_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_bond_is_conjugated_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_bond_stereo_one_hot
.. autofunction:: deepchem.utils.molecule_feature_utils.get_bond_graph_distance_one_hot
Grover Utilities
----------------
.. autofunction:: deepchem.utils.grover.extract_grover_attributes
Debug Utilities
---------------
Docking Utilities
-----------------
These utilities assist in file preparation and processing for molecular
docking.
.. autofunction:: deepchem.utils.docking_utils.write_vina_conf
.. autofunction:: deepchem.utils.docking_utils.write_gnina_conf
.. autofunction:: deepchem.utils.docking_utils.load_docked_ligands
.. autofunction:: deepchem.utils.docking_utils.prepare_inputs
.. autofunction:: deepchem.utils.docking_utils.read_gnina_log
Print Threshold
^^^^^^^^^^^^^^^
The printing threshold controls how many dataset elements are printed
when :code:`dc.data.Dataset` objects are converted to strings or
represnted in the IPython repl.
.. autofunction:: deepchem.utils.debug_utils.get_print_threshold
.. autofunction:: deepchem.utils.debug_utils.set_print_threshold
.. autofunction:: deepchem.utils.debug_utils.get_max_print_size
.. autofunction:: deepchem.utils.debug_utils.set_max_print_size
Fake Data Generator
-------------------
The utilities here are used to generate random sample data which can be
used for testing model architectures or other purposes.
.. autoclass:: deepchem.utils.fake_data_generator.FakeGraphGenerator
:members:
Electron Sampler
-------------------
The utilities here are used to sample electrons in a given molecule
and update it using monte carlo methods, which can be used for methods
like Variational Monte Carlo, etc.
.. autoclass:: deepchem.utils.electron_sampler.ElectronSampler
:members:
Density Functional Theory Utilities
-----------------------------------
The utilites here are used to create an object that contains information about a system's self-consistent iteration steps and other processes.
.. autoclass:: deepchem.utils.dftutils.KSCalc
:members:
.. autofunction:: deepchem.utils.dftutils.hashstr
.. autoclass:: deepchem.utils.dftutils.BaseGrid
:members:
Pytorch Utilities
-----------------
.. autofunction:: deepchem.utils.pytorch_utils.unsorted_segment_sum
.. autofunction:: deepchem.utils.pytorch_utils.segment_sum
Batch Utilities
---------------
The utilites here are used for computing features on batch of data.
Can be used inside of default_generator function.
.. autofunction:: deepchem.utils.batch_utils.batch_coulomb_matrix_features
<file_sep>import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem, rdMolAlign
from deepchem.feat.graph_data import GraphData
from deepchem.feat import MolecularFeaturizer
# similar to SNAP featurizer. both taken from Open Graph Benchmark (OGB) github.com/snap-stanford/ogb
# The difference between this and the SNAP features is the lack of masking tokens, possible_implicit_valence_list, possible_bond_dirs
# and the prescence of possible_bond_stereo_list, possible_is_conjugated_list, possible_is_in_ring_list,
allowable_features = {
'possible_atomic_num_list': list(range(1, 119)) + ['misc'], # type: ignore
'possible_chirality_list': [
'CHI_UNSPECIFIED', 'CHI_TETRAHEDRAL_CW', 'CHI_TETRAHEDRAL_CCW',
'CHI_OTHER', 'misc'
],
'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],
'possible_formal_charge_list': [
-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'
],
'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],
'possible_hybridization_list': [
'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'
],
'possible_is_aromatic_list': [False, True],
'possible_is_in_ring_list': [False, True],
'possible_bond_type_list': [
'SINGLE', 'DOUBLE', 'TRIPLE', 'AROMATIC', 'misc'
],
'possible_bond_stereo_list': [
'STEREONONE',
'STEREOZ',
'STEREOE',
'STEREOCIS',
'STEREOTRANS',
'STEREOANY',
],
'possible_is_conjugated_list': [False, True],
}
full_atom_feature_dims = list(
map(
len, # type: ignore
[
allowable_features['possible_atomic_num_list'],
allowable_features['possible_chirality_list'],
allowable_features['possible_degree_list'],
allowable_features['possible_formal_charge_list'],
allowable_features['possible_numH_list'],
allowable_features['possible_number_radical_e_list'],
allowable_features['possible_hybridization_list'],
allowable_features['possible_is_aromatic_list'],
allowable_features['possible_is_in_ring_list']
]))
full_bond_feature_dims = list(
map(
len, # type: ignore
[
allowable_features['possible_bond_type_list'],
allowable_features['possible_bond_stereo_list'],
allowable_features['possible_is_conjugated_list']
]))
def safe_index(feature_list, e):
"""
Return index of element e in list l. If e is not present, return the last index
Parameters
----------
feature_list : list
Feature vector
e : int
Element index to find in feature vector
"""
try:
return feature_list.index(e)
except ValueError:
return len(feature_list) - 1
class RDKitConformerFeaturizer(MolecularFeaturizer):
"""
A featurizer that featurizes an RDKit mol object as a GraphData object with 3D coordinates. The 3D coordinates are represented in the node_pos_features attribute of the GraphData object of shape [num_atoms * num_conformers, 3].
The ETKDGv2 algorithm is used to generate 3D coordinates for the molecule.
The RDKit source for this algorithm can be found in RDkit/Code/GraphMol/DistGeomHelpers/Embedder.cpp
The documentation can be found here:
https://rdkit.org/docs/source/rdkit.Chem.rdDistGeom.html#rdkit.Chem.rdDistGeom.ETKDGv2
This featurization requires RDKit.
Examples
--------
>>> from deepchem.feat.molecule_featurizers.conformer_featurizer import RDKitConformerFeaturizer
>>> from deepchem.feat.graph_data import BatchGraphData
>>> import numpy as np
>>> featurizer = RDKitConformerFeaturizer(num_conformers=2)
>>> molecule = "CCO"
>>> features_list = featurizer.featurize([molecule])
>>> batched_feats = BatchGraphData(np.concatenate(features_list).ravel())
>>> print(batched_feats.node_pos_features.shape)
(18, 3)
"""
def __init__(self, num_conformers: int = 1, rmsd_cutoff: float = 2):
"""
Initialize the RDKitConformerFeaturizer with the given parameters.
Parameters
----------
num_conformers : int, optional, default=1
The number of conformers to generate for each molecule.
rmsd_cutoff : float, optional, default=2
The root-mean-square deviation (RMSD) cutoff value. Conformers with an RMSD
greater than this value will be discarded.
"""
self.num_conformers = num_conformers
self.rmsd_cutoff = rmsd_cutoff
def atom_to_feature_vector(self, atom):
"""
Converts an RDKit atom object to a feature list of indices.
Parameters
----------
atom : Chem.rdchem.Atom
RDKit atom object.
Returns
-------
List[int]
List of feature indices for the given atom.
"""
atom_feature = [
safe_index(allowable_features['possible_atomic_num_list'],
atom.GetAtomicNum()),
safe_index(allowable_features['possible_chirality_list'],
str(atom.GetChiralTag())),
safe_index(allowable_features['possible_degree_list'],
atom.GetTotalDegree()),
safe_index(allowable_features['possible_formal_charge_list'],
atom.GetFormalCharge()),
safe_index(allowable_features['possible_numH_list'],
atom.GetTotalNumHs()),
safe_index(allowable_features['possible_number_radical_e_list'],
atom.GetNumRadicalElectrons()),
safe_index(allowable_features['possible_hybridization_list'],
str(atom.GetHybridization())),
allowable_features['possible_is_aromatic_list'].index(
atom.GetIsAromatic()),
allowable_features['possible_is_in_ring_list'].index(
atom.IsInRing()),
]
return atom_feature
def bond_to_feature_vector(self, bond):
"""
Converts an RDKit bond object to a feature list of indices.
Parameters
----------
bond : Chem.rdchem.Bond
RDKit bond object.
Returns
-------
List[int]
List of feature indices for the given bond.
"""
bond_feature = [
safe_index(allowable_features['possible_bond_type_list'],
str(bond.GetBondType())),
allowable_features['possible_bond_stereo_list'].index(
str(bond.GetStereo())),
allowable_features['possible_is_conjugated_list'].index(
bond.GetIsConjugated()),
]
return bond_feature
def _featurize(self, datapoint):
"""
Featurizes a molecule into a graph representation with 3D coordinates.
Parameters
----------
datapoint : RdkitMol
RDKit molecule object
Returns
-------
graph: List[GraphData]
list of GraphData objects of the molecule conformers with 3D coordinates.
"""
# add hydrogen bonds to molecule because they are not in the smiles representation
mol = Chem.AddHs(datapoint)
ps = AllChem.ETKDGv2()
ps.useRandomCoords = True
AllChem.EmbedMolecule(mol, ps)
AllChem.EmbedMultipleConfs(mol, self.num_conformers)
AllChem.MMFFOptimizeMolecule(mol)
rmsd_list = []
rdMolAlign.AlignMolConformers(mol, RMSlist=rmsd_list)
# insert 0 RMSD for first conformer
rmsd_list.insert(0, 0)
conformers = [
mol.GetConformer(i)
for i in range(self.num_conformers)
if rmsd_list[i] < self.rmsd_cutoff
]
# if conformer list is less than num_conformers, pad by repeating conformers
conf_idx = 0
while len(conformers) < self.num_conformers:
conformers.append(conformers[conf_idx])
conf_idx += 1
coordinates = [conf.GetPositions() for conf in conformers]
atom_features_list = []
for atom in mol.GetAtoms():
atom_features_list.append(self.atom_to_feature_vector(atom))
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = self.bond_to_feature_vector(bond)
# add edges in both directions
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# Graph connectivity in COO format with shape [2, num_edges]
graph_list = []
for i in range(self.num_conformers):
graph_list.append(
GraphData(node_pos_features=np.array(coordinates[i]),
node_features=np.array(atom_features_list),
edge_features=np.array(edge_features_list),
edge_index=np.array(edges_list).T))
return graph_list
<file_sep>"""BD2013 dataset loader to be used with DeepMHC."""
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__license__ = "MIT"
import numpy as np
import os
import logging
import deepchem as dc
DATASET_URL = "http://tools.iedb.org/static/main/binding_data_2013.zip"
FILE_NAME = "bdata.20130222.mhci.txt"
TEST_FILES = [
"2016-12-09", "2016-05-03", "2016-02-19", "2015-08-07", "2015-07-31",
"2015-07-17", "2015-06-26", "2015-06-19", "2015-05-15", "2015-02-06",
"2015-01-16", "2014-10-31", "2014-06-20", "2014-05-23", "2014-03-28",
"2014-03-21"
]
TEST_URLS = [
"http://tools.iedb.org/auto_bench/mhci/weekly/accumulated/" + str(date) +
"/predictions" for date in TEST_FILES
]
logger = logging.getLogger(__name__)
aa_charset = [
"A", "R", "N", "D", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "F", "P",
"S", "T", "W", "Y", "V"
]
def to_one_hot_array(sequence):
"""Converts the sequence to one-hot-array."""
one_hot_array = list()
for letter in sequence:
one_hot_array.append([letter == i for i in aa_charset])
return np.asarray(one_hot_array, dtype=np.int32)
def featurize(sequences, pad_length=13):
"""One-hot encoding for sequences with padding."""
features = list()
for sequence in sequences:
one_hot_seq = to_one_hot_array(sequence)
num_to_pad = pad_length - len(sequence)
if num_to_pad % 2 == 0:
one_hot_seq = np.pad(
one_hot_seq, [(int(num_to_pad / 2), int(num_to_pad / 2)), (0, 0)],
mode='constant')
else:
one_hot_seq = np.pad(
one_hot_seq, [(int((num_to_pad + 1) / 2), int((num_to_pad - 1) / 2)),
(0, 0)],
mode='constant')
features.append(one_hot_seq)
features = np.asarray(features)
return features
def load_bd2013_human(mhc_allele="HLA-A*02:01",
seq_len=9,
pad_len=13,
test_measure_type="ic50",
reload=True):
"""Loads the human specific data from the bd2013 dataset."""
bd13_tasks = ["-log(IC50)"]
data_dir = dc.utils.get_data_dir()
save_dir = os.path.join(data_dir, "bd13", mhc_allele, str(seq_len))
train_dir = os.path.join(save_dir, "train_dir")
test_dir = os.path.join(save_dir, "test_dir")
# TODO (VIGS25): Account for the reload option
# Downloading train files
train_file = os.path.join(data_dir, "binding_data_2013.zip")
if not os.path.exists(train_file):
logger.info("Downloading Binding data...")
dc.utils.download_url(url=DATASET_URL, dest_dir=data_dir)
if os.path.exists(train_dir):
logger.info("Directory for training data already exists")
else:
logger.info("Unzipping full dataset...")
dc.utils.unzip_file(file=train_file, dest_dir=data_dir)
# Parsing training data
train_labels = list()
train_sequences = list()
with open(os.path.join(data_dir, FILE_NAME), "r") as f:
for line in f.readlines():
elements = line.strip().split("\t")
# Pick only sequences from humans, belong to specific MHC allele and having given seq_len
if elements[0] == "human" and elements[1] == mhc_allele and int(
elements[2]) == seq_len:
train_sequences.append(elements[3])
train_labels.append(float(elements[-1]))
# Test Files loading
test_labels = list()
test_sequences = list()
test_check_file = os.path.join(data_dir, TEST_FILES[0] + '_predictions.tsv')
if not os.path.exists(test_check_file):
for index, filename in enumerate(TEST_FILES):
test_url = TEST_URLS[index]
test_filename = filename + '_predictions.tsv'
dc.utils.download_url(url=test_url, dest_dir=data_dir, name=test_filename)
for filename in TEST_FILES:
test_filename = os.path.join(data_dir, filename + '_predictions.tsv')
with open(test_filename, 'r') as f:
for line in f.readlines():
elements = line.strip().split("\t")
if len(elements) == 1:
continue
if elements[2] == mhc_allele and int(
elements[3]) == seq_len and elements[4] == test_measure_type:
test_sequences.append(elements[5])
test_labels.append(float(elements[6]))
# One Hot Featurization
logger.info("Featurizing training data...")
train_features = featurize(train_sequences, pad_length=pad_len)
train_labels = np.array(train_labels).astype(np.float32)
train_labels = np.expand_dims(train_labels, axis=1)
logger.info("Featurizing test data...")
test_features = featurize(test_sequences, pad_length=pad_len)
test_labels = np.array(test_labels).astype(np.float32)
test_labels = np.expand_dims(test_labels, axis=1)
train_dataset = dc.data.DiskDataset.from_numpy(train_features, train_labels)
test_dataset = dc.data.DiskDataset.from_numpy(test_features, test_labels)
train_dataset.move(new_data_dir=train_dir)
test_dataset.move(new_data_dir=test_dir)
logger.info("Featurization complete.")
transformers = []
for transformer in transformers:
train_dataset = transformer.transform(train_dataset)
test_dataset = transformer.transform(test_dataset)
return bd13_tasks, (train_dataset, None, test_dataset), transformers
<file_sep>"""
Code for processing datasets using scikit-learn.
"""
import inspect
import logging
from typing import List, Optional
import numpy as np
from sklearn.base import BaseEstimator
from deepchem.models import Model
from deepchem.data import Dataset
from deepchem.trans import Transformer
from deepchem.utils.data_utils import load_from_disk, save_to_disk
from deepchem.utils.typing import OneOrMany
logger = logging.getLogger(__name__)
class SklearnModel(Model):
"""Wrapper class that wraps scikit-learn models as DeepChem models.
When you're working with scikit-learn and DeepChem, at times it can
be useful to wrap a scikit-learn model as a DeepChem model. The
reason for this might be that you want to do an apples-to-apples
comparison of a scikit-learn model to another DeepChem model, or
perhaps you want to use the hyperparameter tuning capabilities in
`dc.hyper`. The `SklearnModel` class provides a wrapper around scikit-learn
models that allows scikit-learn models to be trained on `Dataset` objects
and evaluated with the same metrics as other DeepChem models.
Example
------
>>> import deepchem as dc
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> # Generating a random data and creating a dataset
>>> X, y = np.random.randn(5, 1), np.random.randn(5)
>>> dataset = dc.data.NumpyDataset(X, y)
>>> # Wrapping a Sklearn Linear Regression model using DeepChem models API
>>> sklearn_model = LinearRegression()
>>> dc_model = dc.models.SklearnModel(sklearn_model)
>>> dc_model.fit(dataset) # fitting dataset
Notes
-----
All `SklearnModels` perform learning solely in memory. This means that it
may not be possible to train `SklearnModel` on large `Dataset`s.
"""
def __init__(self,
model: BaseEstimator,
model_dir: Optional[str] = None,
**kwargs):
"""
Parameters
----------
model: BaseEstimator
The model instance which inherits a scikit-learn `BaseEstimator` Class.
model_dir: str, optional (default None)
If specified the model will be stored in this directory. Else, a
temporary directory will be used.
model_instance: BaseEstimator (DEPRECATED)
The model instance which inherits a scikit-learn `BaseEstimator` Class.
kwargs: dict
kwargs['use_weights'] is a bool which determines if we pass weights into
self.model.fit().
"""
if 'model_instance' in kwargs:
model_instance = kwargs['model_instance']
if model is not None:
raise ValueError(
"Can not use both model and model_instance argument at the same time."
)
logger.warning(
"model_instance argument is deprecated and will be removed in a future version of DeepChem."
"Use model argument instead.")
model = model_instance
super(SklearnModel, self).__init__(model, model_dir, **kwargs)
if 'use_weights' in kwargs:
self.use_weights = kwargs['use_weights']
else:
self.use_weights = True
if self.use_weights and self.model is not None:
# model is None when reloading a model
if 'sample_weight' not in inspect.getfullargspec(
self.model.fit).args:
self.use_weights = False
logger.info("The model does not support training with weights."
"Hence, not using weight of datapoint for training")
def fit(self, dataset: Dataset) -> None:
"""Fits scikit-learn model to data.
Parameters
----------
dataset: Dataset
The `Dataset` to train this model on.
"""
X = dataset.X
y = np.squeeze(dataset.y)
w = np.squeeze(dataset.w)
# Some scikit-learn models don't use weights.
if self.use_weights:
self.model.fit(X, y, w)
return
self.model.fit(X, y)
def predict_on_batch(self, X: np.typing.ArrayLike) -> np.ndarray:
"""Makes predictions on batch of data.
Parameters
----------
X: np.ndarray
A numpy array of features.
Returns
-------
np.ndarray
The value is a return value of `predict_proba` or `predict` method
of the scikit-learn model. If the scikit-learn model has both methods,
the value is always a return value of `predict_proba`.
"""
try:
return self.model.predict_proba(X)
except AttributeError:
return self.model.predict(X)
def predict(self,
X: Dataset,
transformers: List[Transformer] = []) -> OneOrMany[np.ndarray]:
"""Makes predictions on dataset.
Parameters
----------
dataset: Dataset
Dataset to make prediction on.
transformers: List[Transformer]
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
"""
return super(SklearnModel, self).predict(X, transformers)
def save(self):
"""Saves scikit-learn model to disk using joblib."""
save_to_disk(self.model, self.get_model_filename(self.model_dir))
def reload(self):
"""Loads scikit-learn model from joblib file on disk."""
self.model = load_from_disk(self.get_model_filename(self.model_dir))
<file_sep>import logging
from typing import List
import numpy as np
import scipy
from deepchem.feat.base_classes import Featurizer
from typing import Any, Iterable
logger = logging.getLogger(__name__)
CHARSET = [
'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R',
'S', 'T', 'V', 'W', 'Y', 'X', 'Z', 'B', 'U', 'O'
]
class SparseMatrixOneHotFeaturizer(Featurizer):
"""Encodes any arbitrary string as a one-hot array.
This featurizer uses the sklearn OneHotEncoder to create
sparse matrix representation of a one-hot array of any string.
It is expected to be used in large datasets that produces memory overload
using standard featurizer such as OneHotFeaturizer. For example: SwissprotDataset
Examples
--------
>>> import deepchem as dc
>>> featurizer = dc.feat.SparseMatrixOneHotFeaturizer()
>>> sequence = "MMMQLA"
>>> encodings = featurizer.featurize([sequence])
>>> encodings[0].shape
(6, 25)
"""
def __init__(self, charset: List[str] = CHARSET):
"""Initialize featurizer.
Parameters
----------
charset: List[str] (default code)
A list of strings, where each string is length 1 and unique.
"""
if len(charset) != len(set(charset)):
raise ValueError("All values in charset must be unique.")
self.charset = charset
from sklearn.preprocessing import OneHotEncoder
cat = np.array(self.charset).reshape(1, len(self.charset))
self.ohe = OneHotEncoder(categories=list(cat), handle_unknown='ignore')
def featurize(self,
datapoints: Iterable[Any],
log_every_n: int = 1000,
**kwargs) -> np.ndarray:
"""Featurize strings.
Parameters
----------
datapoints: list
A list of either strings (str or numpy.str_)
log_every_n: int, optional (default 1000)
How many elements are featurized every time a featurization is logged.
"""
# Featurize data using featurize() in parent class
return Featurizer.featurize(self, datapoints, log_every_n)
def _featurize(self, datapoint: Any, **kwargs):
""" Use parent method of base clase Featurizer.
Parameters
----------
datapoint : list of string
string to be converted to a sparse one hot matrix.
Returns
-------
scipy sparse matrix
A scipy sparse matrix of the one hot representation of the given string.
"""
# Featurize str data
if isinstance(datapoint, (str, np.str_)):
sequence = np.array(list(datapoint)).reshape(-1, 1)
sparse_mat = self.ohe.fit_transform(sequence)
return sparse_mat
else:
raise ValueError("Datapoint is not a string")
def untransform(self, one_hot_vectors: scipy.sparse.base.spmatrix) -> str:
"""Convert from one hot representation back to original string
Parameters
----------
one_hot_vectors: np.ndarray
An array of one hot encoded features.
Returns
-------
str
Original string for an one hot encoded array.
"""
string = ""
invers_trans = self.ohe.inverse_transform(one_hot_vectors)
for one_hot in invers_trans:
string += one_hot[0]
return string
<file_sep># UV Examples
The UV dataset is an in-house dataset from Merck that was first introduced in the following paper:
Ramsundar, Bharath, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
The UV dataset tests 10,000 of Merck's internal compounds on
190 absorption wavelengths between 210 and 400 nm. Unlike
most of the other datasets featured in MoleculeNet, the UV
collection does not have structures for the compounds tested
since they were proprietary Merck compounds. However, the
collection does feature pre-computed descriptors for these
compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
<file_sep>"""
Contains class for gaussian process hyperparameter optimizations.
"""
import os
import logging
import tempfile
from typing import Dict, List, Optional, Tuple, Union, Any, Callable
from deepchem.data import Dataset
from deepchem.trans import Transformer
from deepchem.models import Model
from deepchem.metrics import Metric
from deepchem.hyper.base_classes import HyperparamOpt
from deepchem.hyper.base_classes import _convert_hyperparam_dict_to_filename
logger = logging.getLogger(__name__)
def compute_parameter_range(
params_dict: Dict,
search_range: Union[int, float,
Dict]) -> Dict[str, Tuple[str, List[float]]]:
"""Convenience Function to compute parameter search space.
Parameters
----------
params_dict: Dict
Dictionary mapping strings to Ints/Floats. An explicit list of
parameters is computed with `search_range`. The optimization range
computed is specified in the documentation for `search_range`
below.
search_range: int/float/Dict (default 4)
The `search_range` specifies the range of parameter values to
search for. If `search_range` is an int/float, it is used as the
global search range for parameters. This creates a search
problem on the following space:
optimization on [initial value / search_range,
initial value * search_range]
If `search_range` is a dict, it must contain the same keys as
for `params_dict`. In this case, `search_range` specifies a
per-parameter search range. This is useful in case some
parameters have a larger natural range than others. For a given
hyperparameter `hp` this would create the following search
range:
optimization on hp on [initial value[hp] / search_range[hp],
initial value[hp] * search_range[hp]]
Returns
-------
param_range: Dict
Dictionary mapping hyperparameter names to tuples. Each tuple is
of form `(value_type, value_range)` where `value_type` is a string
that is either "int" or "cont" and `value_range` is a list of two
elements of the form `[low, hi]`. This format is expected by
pyGPGO which `GaussianProcessHyperparamOpt` uses to perform
optimization.
"""
# Range of optimization
param_range = {}
if isinstance(search_range, dict):
if sorted(params_dict.keys()) != sorted(search_range.keys()):
raise ValueError(
"If search_range is provided as a dictionary, it must have the same keys as params_dict."
)
elif (not isinstance(search_range, int)) and (not isinstance(
search_range, float)):
raise ValueError("search_range must be a dict or int or float.")
for hp, value in params_dict.items():
if isinstance(search_range, dict):
hp_search_range = search_range[hp]
else:
# We know from guard above that this is an int/float
hp_search_range = search_range
if isinstance(value, int):
value_range = [value // hp_search_range, value * hp_search_range]
param_range[hp] = ("int", value_range)
elif isinstance(value, float):
value_range = [value / hp_search_range, value * hp_search_range]
param_range[hp] = ("cont", value_range)
return param_range
class GaussianProcessHyperparamOpt(HyperparamOpt):
"""
Gaussian Process Global Optimization(GPGO)
This class uses Gaussian Process optimization to select
hyperparameters. Underneath the hood it uses pyGPGO to optimize
models. If you don't have pyGPGO installed, you won't be able to use
this class.
Note that `params_dict` has a different semantics than for
`GridHyperparamOpt`. `param_dict[hp]` must be an int/float and is
used as the center of a search range.
Examples
--------
This example shows the type of constructor function expected.
>>> import deepchem as dc
>>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(n_tasks=1, **p))
Here's a more sophisticated example that shows how to optimize only
some parameters of a model. In this case, we have some parameters we
want to optimize, and others which we don't. To handle this type of
search, we create a `model_builder` which hard codes some arguments
(in this case, `n_tasks` and `n_features` which are properties of a
dataset and not hyperparameters to search over.)
>>> import numpy as np
>>> from sklearn.ensemble import RandomForestRegressor as RF
>>> def model_builder(**model_params):
... n_estimators = model_params['n_estimators']
... min_samples_split = model_params['min_samples_split']
... rf_model = RF(n_estimators=n_estimators, min_samples_split=min_samples_split)
... rf_model = RF(n_estimators=n_estimators)
... return dc.models.SklearnModel(rf_model)
>>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(model_builder)
>>> params_dict = {"n_estimators":100, "min_samples_split":2}
>>> train_dataset = dc.data.NumpyDataset(X=np.random.rand(50, 5),
... y=np.random.rand(50, 1))
>>> valid_dataset = dc.data.NumpyDataset(X=np.random.rand(20, 5),
... y=np.random.rand(20, 1))
>>> metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
>> best_model, best_hyperparams, all_results =\
optimizer.hyperparam_search(params_dict, train_dataset, valid_dataset, metric, max_iter=2)
>> type(best_hyperparams)
<class 'dict'>
Parameters
----------
model_builder: constructor function.
This parameter must be constructor function which returns an
object which is an instance of `dc.models.Model`. This function
must accept two arguments, `model_params` of type `dict` and
`model_dir`, a string specifying a path to a model directory.
max_iter: int, default 20
number of optimization trials
search_range: int/float/Dict (default 4)
The `search_range` specifies the range of parameter values to
search for. If `search_range` is an int/float, it is used as the
global search range for parameters. This creates a search
problem on the following space:
optimization on [initial value / search_range,
initial value * search_range]
If `search_range` is a dict, it must contain the same keys as
for `params_dict`. In this case, `search_range` specifies a
per-parameter search range. This is useful in case some
parameters have a larger natural range than others. For a given
hyperparameter `hp` this would create the following search
range:
optimization on hp on [initial value[hp] / search_range[hp],
initial value[hp] * search_range[hp]]
Notes
-----
This class requires pyGPGO to be installed.
"""
def __init__(self,
model_builder: Callable[..., Model],
max_iter: int = 20,
search_range: Union[int, float, Dict] = 4):
super(GaussianProcessHyperparamOpt,
self).__init__(model_builder=model_builder)
self.max_iter = max_iter
self.search_range = search_range
def hyperparam_search(
self,
params_dict: Dict,
train_dataset: Dataset,
valid_dataset: Dataset,
metric: Metric,
output_transformers: List[Transformer] = [],
nb_epoch: int = 10,
use_max: bool = True,
logfile: str = 'results.txt',
logdir: Optional[str] = None,
**kwargs) -> Tuple[Model, Dict[str, Any], Dict[str, Any]]:
"""Perform hyperparameter search using a gaussian process.
Parameters
----------
params_dict: Dict
Maps hyperparameter names (strings) to possible parameter
values. The semantics of this list are different than for
`GridHyperparamOpt`. `params_dict[hp]` must map to an int/float,
which is used as the center of a search with radius
`search_range` since pyGPGO can only optimize numerical
hyperparameters.
train_dataset: Dataset
dataset used for training
valid_dataset: Dataset
dataset used for validation(optimization on valid scores)
metric: Metric
metric used for evaluation
output_transformers: list[Transformer]
Transformers for evaluation. This argument is needed since
`train_dataset` and `valid_dataset` may have been transformed
for learning and need the transform to be inverted before
the metric can be evaluated on a model.
nb_epoch: int, (default 10)
Specifies the number of training epochs during each iteration of optimization.
Not used by all model types.
use_max: bool, (default True)
Specifies whether to maximize or minimize `metric`.
maximization(True) or minimization(False)
logdir: str, optional, (default None)
The directory in which to store created models. If not set, will
use a temporary directory.
logfile: str, optional (default `results.txt`)
Name of logfile to write results to. If specified, this is must
be a valid file. If not specified, results of hyperparameter
search will be written to `logdir/results.txt`.
Returns
-------
Tuple[`best_model`, `best_hyperparams`, `all_scores`]
`(best_model, best_hyperparams, all_scores)` where `best_model` is
an instance of `dc.model.Model`, `best_hyperparams` is a
dictionary of parameters, and `all_scores` is a dictionary mapping
string representations of hyperparameter sets to validation
scores.
"""
try:
from pyGPGO.covfunc import matern32
from pyGPGO.acquisition import Acquisition
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.GPGO import GPGO
except ModuleNotFoundError:
raise ImportError("This class requires pyGPGO to be installed.")
# Specify logfile
log_file = None
if logfile:
log_file = logfile
elif logdir is not None:
# Make logdir if it doesn't exist.
if not os.path.exists(logdir):
os.makedirs(logdir, exist_ok=True)
log_file = os.path.join(logdir, "results.txt")
# setup range
param_range = compute_parameter_range(params_dict, self.search_range)
param_keys = list(param_range.keys())
# Stores all results
all_results: Dict[Any, Any] = {}
# Store all model references so we don't have to reload
all_models = {}
# Stores all model locations
model_locations = {}
def _optimize(nb_epoch, **placeholders):
"""Private Optimizing function
Take in hyper parameter values and number of training epochs.
Return valid set performances.
Parameters
----------
nb_epoch: int
Number of epochs to train model being optimized during each iteration.
Not used by all model types.
placeholders: keyword arguments
Should be various hyperparameters as specified in `param_keys` above.
Returns:
--------
valid_scores: float
valid set performances
"""
hyper_parameters = {}
for hp in param_keys:
if param_range[hp][0] == "int":
# param values are always float in BO, so this line converts float to int
# see : https://github.com/josejimenezluna/pyGPGO/issues/10
hyper_parameters[hp] = int(placeholders[hp])
else:
hyper_parameters[hp] = float(placeholders[hp])
logger.info("Running hyperparameter set: %s" %
str(hyper_parameters))
if log_file:
with open(log_file, 'w+') as f:
# Record hyperparameters
f.write("Parameters: %s" % str(hyper_parameters))
f.write('\n')
hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters)
if hp_str in all_results:
# We have already evaluated the model for these hyperparameters.
if use_max:
return all_results[hp_str]
else:
return -all_results[hp_str]
if logdir is not None:
filename = "model%s" % hp_str
model_dir = os.path.join(logdir, filename)
logger.info("model_dir is %s" % model_dir)
try:
os.makedirs(model_dir)
except OSError:
if not os.path.isdir(model_dir):
logger.info(
"Error creating model_dir, using tempfile directory"
)
model_dir = tempfile.mkdtemp()
else:
model_dir = tempfile.mkdtemp()
# Add it on to the information needed for the constructor
hyper_parameters["model_dir"] = model_dir
model = self.model_builder(**hyper_parameters)
try:
model.fit(train_dataset, nb_epoch=nb_epoch)
# Not all models have nb_epoch
except TypeError:
model.fit(train_dataset)
try:
model.save()
# Some models autosave
except NotImplementedError:
pass
multitask_scores = model.evaluate(valid_dataset, [metric],
output_transformers)
score = multitask_scores[metric.name]
if log_file:
with open(log_file, 'a') as f:
# Record performances
f.write("Score: %s" % str(score))
f.write('\n')
# Store all results
all_results[hp_str] = score
# Store reference to model
all_models[hp_str] = model
model_locations[hp_str] = model_dir
# GPGO maximize performance by default
# set performance to its negative value for minimization
if use_max:
return score
else:
return -score
# Demarcating internal function for readability
def optimizing_function(**placeholders):
"""Wrapper function
Take in hyper parameter values.
Calls a private optimize function (_optimize) with number of epochs.
Returns valid set performances.
Parameters
----------
placeholders: keyword arguments
Should be various hyperparameters as specified in `param_keys` above.
Returns:
--------
valid_scores: float
valid set performances
"""
return _optimize(nb_epoch=nb_epoch, **placeholders)
# execute GPGO
cov = matern32()
gp = GaussianProcess(cov)
acq = Acquisition(mode='ExpectedImprovement')
gpgo = GPGO(gp, acq, optimizing_function, param_range)
logger.info("Max number of iteration: %i" % self.max_iter)
gpgo.run(max_iter=self.max_iter)
hp_opt, valid_performance_opt = gpgo.getResult()
hyper_parameters = {}
for hp in param_keys:
if param_range[hp][0] == "int":
hyper_parameters[hp] = int(hp_opt[hp])
else:
# FIXME: Incompatible types in assignment
hyper_parameters[hp] = float(hp_opt[hp]) # type: ignore
hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters)
# Let's fetch the model with the best parameters
best_model = all_models[hp_str]
# Compare best model to default hyperparameters
if log_file:
with open(log_file, 'a') as f:
# Record hyperparameters
f.write("params_dict:")
f.write(str(params_dict))
f.write('\n')
# Return default hyperparameters
return best_model, hyper_parameters, all_results
<file_sep>import pytest
from deepchem.models.tests.test_graph_models import get_dataset
import deepchem as dc
import numpy as np
try:
import jax
import jax.numpy as jnp
from jax import random
import haiku as hk
import optax
from deepchem.models import JaxModel
has_haiku_and_optax = True
except:
has_haiku_and_optax = False
@pytest.mark.jax
def test_pure_jax_model():
"""
Here we train a fully NN model made purely in Jax.
The model is taken from Jax Tutorial https://jax.readthedocs.io/en/latest/notebooks/neural_network_with_tfds_data.html
"""
n_data_points = 50
n_features = 1
np.random.seed(1234)
X = np.random.rand(n_data_points, n_features)
y = X * X + X + 1
dataset = dc.data.NumpyDataset(X, y)
# Initialize the weights with random values
def random_layer_params(m, n, key, scale=1e-2):
w_key, b_key = random.split(key)
return scale * random.normal(w_key, (m, n)), scale * random.normal(
b_key, (n,))
def init_network_params(sizes, key):
keys = random.split(key, len(sizes))
return [
random_layer_params(m, n, k)
for m, n, k in zip(sizes[:-1], sizes[1:], keys)
]
layer_sizes = [1, 256, 128, 1]
params = init_network_params(layer_sizes, random.PRNGKey(0))
# Forward function which takes the params
def forward_fn(params, rng, x):
for i, weights in enumerate(params[:-1]):
w, b = weights
x = jnp.dot(x, w) + b
x = jax.nn.relu(x)
final_w, final_b = params[-1]
output = jnp.dot(x, final_w) + final_b
return output
def rms_loss(pred, tar, w):
return jnp.mean(optax.l2_loss(pred, tar))
# Loss Function
criterion = rms_loss
# JaxModel Working
j_m = JaxModel(forward_fn,
params,
criterion,
batch_size=100,
learning_rate=0.001,
log_frequency=2)
j_m.fit(dataset, nb_epochs=1000)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
scores = j_m.evaluate(dataset, [metric])
assert scores[metric.name] < 0.5
@pytest.mark.jax
def test_jax_model_for_regression():
tasks, dataset, transformers, metric = get_dataset('regression',
featurizer='ECFP')
# sample network
def forward_model(x):
net = hk.nets.MLP([512, 256, 128, 2])
return net(x)
def rms_loss(pred, tar, w):
return jnp.mean(optax.l2_loss(pred, tar))
# Model Initialization
params_init, forward_fn = hk.transform(forward_model)
rng = jax.random.PRNGKey(500)
inputs, _, _, _ = next(iter(dataset.iterbatches(batch_size=256)))
modified_inputs = jnp.array(
[x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs])
params = params_init(rng, modified_inputs)
# Loss Function
criterion = rms_loss
# JaxModel Working
j_m = JaxModel(forward_fn,
params,
criterion,
batch_size=256,
learning_rate=0.001,
log_frequency=2)
_ = j_m.fit(dataset, nb_epochs=25, deterministic=True)
scores = j_m.evaluate(dataset, [metric])
assert scores[metric.name] < 0.5
@pytest.mark.jax
def test_jax_model_for_classification():
tasks, dataset, transformers, metric = get_dataset('classification',
featurizer='ECFP')
# sample network
class Encoder(hk.Module):
def __init__(self, output_size: int = 2):
super().__init__()
self._network = hk.nets.MLP([512, 256, 128, output_size])
def __call__(self, x: jnp.ndarray):
x = self._network(x)
return x, jax.nn.softmax(x)
def bce_loss(pred, tar, w):
tar = jnp.array(
[x.astype(np.float32) if x.dtype != np.float32 else x for x in tar])
return jnp.mean(optax.softmax_cross_entropy(pred[0], tar))
# Model Initilisation
params_init, forward_fn = hk.transform(lambda x: Encoder()(x)) # noqa
rng = jax.random.PRNGKey(500)
inputs, _, _, _ = next(iter(dataset.iterbatches(batch_size=256)))
modified_inputs = jnp.array(
[x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs])
params = params_init(rng, modified_inputs)
# Loss Function
criterion = bce_loss
# JaxModel Working
j_m = JaxModel(forward_fn,
params,
criterion,
output_types=['loss', 'prediction'],
batch_size=256,
learning_rate=0.001,
log_frequency=2)
_ = j_m.fit(dataset, nb_epochs=25, deterministic=True)
scores = j_m.evaluate(dataset, [metric])
assert scores[metric.name] > 0.8
@pytest.mark.jax
def test_overfit_subclass_model():
"""Test fitting a JaxModel defined by subclassing Module."""
n_data_points = 10
n_features = 2
np.random.seed(1234)
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, np.expand_dims(y, axis=1))
class Encoder(hk.Module):
def __init__(self, output_size: int = 1):
super().__init__()
self._network = hk.nets.MLP([512, 256, 128, output_size])
def __call__(self, x: jnp.ndarray):
x = self._network(x)
return x, jax.nn.sigmoid(x)
# Model Initilisation
params_init, forward_fn = hk.transform(lambda x: Encoder()(x)) # noqa
rng = jax.random.PRNGKey(500)
inputs, _, _, _ = next(iter(dataset.iterbatches(batch_size=100)))
modified_inputs = jnp.array(
[x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs])
params = params_init(rng, modified_inputs)
# Loss Function
criterion = lambda pred, tar, w: jnp.mean( # noqa: E731
optax.sigmoid_binary_cross_entropy(pred[0], tar)) # noqa
# JaxModel Working
j_m = JaxModel(forward_fn,
params,
criterion,
output_types=['loss', 'prediction'],
batch_size=100,
learning_rate=0.001,
log_frequency=2)
j_m.fit(dataset, nb_epochs=1000)
prediction = np.squeeze(j_m.predict_on_batch(X))
assert np.array_equal(y, np.round(prediction))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
scores = j_m.evaluate(dataset, [metric])
assert scores[metric.name] > 0.9
@pytest.mark.jax
def test_overfit_sequential_model():
"""Test fitting a JaxModel defined by subclassing Module."""
n_data_points = 10
n_features = 1
np.random.seed(1234)
X = np.random.rand(n_data_points, n_features)
y = X * X + X + 1
dataset = dc.data.NumpyDataset(X, y)
def forward_fn(x):
mlp = hk.Sequential([
hk.Linear(300),
jax.nn.relu,
hk.Linear(100),
jax.nn.relu,
hk.Linear(1),
])
return mlp(x)
def rms_loss(pred, tar, w):
return jnp.mean(optax.l2_loss(pred, tar))
# Model Initilisation
params_init, forward_fn = hk.transform(forward_fn) # noqa
rng = jax.random.PRNGKey(500)
inputs, _, _, _ = next(iter(dataset.iterbatches(batch_size=100)))
modified_inputs = jnp.array(
[x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs])
params = params_init(rng, modified_inputs)
# Loss Function
criterion = rms_loss
# JaxModel Working
j_m = JaxModel(forward_fn,
params,
criterion,
batch_size=100,
learning_rate=0.001,
log_frequency=2)
j_m.fit(dataset, nb_epochs=1000)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
scores = j_m.evaluate(dataset, [metric])
assert scores[metric.name] < 0.5
@pytest.mark.jax
def test_fit_use_all_losses():
"""Test fitting a TorchModel defined by subclassing Module."""
n_data_points = 10
n_features = 2
np.random.seed(1234)
X = np.random.rand(n_data_points, n_features)
y = (X[:, 0] > X[:, 1]).astype(np.float32)
dataset = dc.data.NumpyDataset(X, np.expand_dims(y, axis=1))
class Encoder(hk.Module):
def __init__(self, output_size: int = 1):
super().__init__()
self._network = hk.nets.MLP([512, 256, 128, output_size])
def __call__(self, x: jnp.ndarray):
x = self._network(x)
return x, jax.nn.sigmoid(x)
def f(x):
net = Encoder(1)
return net(x)
# Model Initilisation
model = hk.transform(f)
rng = jax.random.PRNGKey(500)
inputs, _, _, _ = next(iter(dataset.iterbatches(batch_size=100)))
modified_inputs = jnp.array(
[x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs])
params = model.init(rng, modified_inputs)
# Loss Function
criterion = lambda pred, tar, w: jnp.mean( # noqa: E731
optax.sigmoid_binary_cross_entropy(pred[0], tar)) # noqa
# JaxModel Working
j_m = JaxModel(model.apply,
params,
criterion,
output_types=['loss', 'prediction'],
learning_rate=0.005,
log_frequency=10)
losses = []
j_m.fit(dataset, nb_epochs=1000, all_losses=losses)
# Each epoch is a single step for this model
assert len(losses) == 100
assert np.count_nonzero(np.array(losses)) == 100
# @pytest.mark.jax
# @pytest.mark.slow
# def test_uncertainty():
# """Test estimating uncertainty a TorchModel."""
# n_samples = 30
# n_features = 1
# noise = 0.1
# X = np.random.rand(n_samples, n_features)
# y = (10 * X + np.random.normal(scale=noise, size=(n_samples, n_features)))
# dataset = dc.data.NumpyDataset(X, y)
# class Net(hk.Module):
# def __init__(self, output_size: int = 1):
# super().__init__()
# self._network1 = hk.Sequential([hk.Linear(200), jax.nn.relu])
# self._network2 = hk.Sequential([hk.Linear(200), jax.nn.relu])
# self.output = hk.Linear(output_size)
# self.log_var = hk.Linear(output_size)
# def __call__(self, x):
# x = self._network1(x)
# x = hk.dropout(hk.next_rng_key(), 0.1, x)
# x = self._network2(x)
# x = hk.dropout(hk.next_rng_key(), 0.1, x)
# output = self.output(x)
# log_var = self.log_var(x)
# var = jnp.exp(log_var)
# return output, var, output, log_var
# def f(x):
# net = Net(1)
# return net(x)
# def loss(outputs, labels, weights):
# diff = labels[0] - outputs[0]
# log_var = outputs[1]
# var = jnp.exp(log_var)
# return jnp.mean(diff * diff / var + log_var)
# class UncertaintyModel(JaxModel):
# def default_generator(self,
# dataset,
# epochs=1,
# mode='fit',
# deterministic=True,
# pad_batches=True):
# for epoch in range(epochs):
# for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
# batch_size=self.batch_size,
# deterministic=deterministic,
# pad_batches=pad_batches):
# yield ([X_b], [y_b], [w_b])
# jm_model = hk.transform(f)
# rng = jax.random.PRNGKey(500)
# inputs, _, _, _ = next(iter(dataset.iterbatches(batch_size=100)))
# modified_inputs = jnp.array(
# [x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs])
# params = jm_model.init(rng, modified_inputs)
# model = UncertaintyModel(
# jm_model.apply,
# params,
# loss,
# output_types=['prediction', 'variance', 'loss', 'loss'],
# learning_rate=0.003)
# model.fit(dataset, nb_epochs=2500)
# pred, std = model.predict_uncertainty(dataset)
# assert np.mean(np.abs(y - pred)) < 2.0
# assert noise < np.mean(std) < 1.0
<file_sep>"""
Script that trains MPNN models on qm8 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load QM8 dataset
tasks, datasets, transformers = dc.molnet.load_qm8(featurizer='MP')
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = [dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")]
# Batch size of models
batch_size = 32
n_atom_feat = 70
n_pair_feat = 8
model = dc.models.MPNNModel(
len(tasks),
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=5,
M=10,
batch_size=batch_size,
learning_rate=0.0001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=100)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import logging
import time
from deepchem.utils.rdkit_utils import MoleculeLoadException, load_molecule, compute_ecfp_features
from deepchem.utils.geometry_utils import rotate_molecules, compute_pairwise_distances, compute_centroid, subtract_centroid
from deepchem.utils.hash_utils import hash_ecfp, hash_ecfp_pair, hash_sybyl, vectorize
from deepchem.utils.noncovalent_utils import compute_hydrogen_bonds, compute_salt_bridges, compute_binding_pocket_cation_pi
from deepchem.utils.voxel_utils import convert_atom_to_voxel, convert_atom_pair_to_voxel, voxelize, voxelize_pi_stack
from deepchem.feat.complex_featurizers.contact_fingerprints import featurize_contacts_ecfp, featurize_binding_pocket_sybyl
from deepchem.feat.complex_featurizers.splif_fingerprints import featurize_splif
from deepchem.feat.complex_featurizers.grid_featurizers import compute_charge_dictionary
import numpy as np
from deepchem.feat import ComplexFeaturizer
logger = logging.getLogger(__name__)
class RdkitGridFeaturizer(ComplexFeaturizer):
"""Featurizes protein-ligand complex using flat features or a 3D grid (in which
each voxel is described with a vector of features).
"""
def __init__(self,
nb_rotations=0,
feature_types=None,
ecfp_degree=2,
ecfp_power=3,
splif_power=3,
box_width=16.0,
voxel_width=1.0,
flatten=False,
verbose=True,
sanitize=False,
**kwargs):
"""
Parameters
----------
nb_rotations: int, optional (default 0)
Number of additional random rotations of a complex to generate.
feature_types: list, optional (default ['ecfp'])
Types of features to calculate. Available types are
flat features -> 'ecfp_ligand', 'ecfp_hashed', 'splif_hashed', 'hbond_count'
voxel features -> 'ecfp', 'splif', 'sybyl', 'salt_bridge', 'charge', 'hbond', 'pi_stack, 'cation_pi'
There are also 3 predefined sets of features
'flat_combined', 'voxel_combined', and 'all_combined'.
Calculated features are concatenated and their order is preserved
(features in predefined sets are in alphabetical order).
ecfp_degree: int, optional (default 2)
ECFP radius.
ecfp_power: int, optional (default 3)
Number of bits to store ECFP features (resulting vector will be
2^ecfp_power long)
splif_power: int, optional (default 3)
Number of bits to store SPLIF features (resulting vector will be
2^splif_power long)
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box is centered on a
ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
flatten: bool, optional (defaul False)
Indicate whether calculated features should be flattened. Output is always
flattened if flat features are specified in feature_types.
verbose: bool, optional (defaul True)
Verbolity for logging
sanitize: bool, optional (defaul False)
If set to True molecules will be sanitized. Note that calculating some
features (e.g. aromatic interactions) require sanitized molecules.
**kwargs: dict, optional
Keyword arguments can be usaed to specify custom cutoffs and bins (see
default values below).
Default cutoffs and bins
------------------------
hbond_dist_bins: [(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)]
hbond_angle_cutoffs: [5, 50, 90]
splif_contact_bins: [(0, 2.0), (2.0, 3.0), (3.0, 4.5)]
ecfp_cutoff: 4.5
sybyl_cutoff: 7.0
salt_bridges_cutoff: 5.0
pi_stack_dist_cutoff: 4.4
pi_stack_angle_cutoff: 30.0
cation_pi_dist_cutoff: 6.5
cation_pi_angle_cutoff: 30.0
"""
# check if user tries to set removed arguments
deprecated_args = [
'box_x', 'box_y', 'box_z', 'save_intermediates',
'voxelize_features', 'parallel', 'voxel_feature_types'
]
# list of features that require sanitized molecules
require_sanitized = ['pi_stack', 'cation_pi', 'ecfp_ligand']
# not implemented featurization types
not_implemented = ['sybyl']
for arg in deprecated_args:
if arg in kwargs and verbose:
logger.warning(
'%s argument was removed and it is ignored,'
' using it will result in error in version 1.4' % arg,
DeprecationWarning)
self.verbose = verbose
self.sanitize = sanitize
self.flatten = flatten
self.ecfp_degree = ecfp_degree
self.ecfp_power = ecfp_power
self.splif_power = splif_power
self.nb_rotations = nb_rotations
# default values
self.cutoffs = {
'hbond_dist_bins': [(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)],
'hbond_angle_cutoffs': [5, 50, 90],
'splif_contact_bins': [(0, 2.0), (2.0, 3.0), (3.0, 4.5)],
'ecfp_cutoff': 4.5,
'sybyl_cutoff': 7.0,
'salt_bridges_cutoff': 5.0,
'pi_stack_dist_cutoff': 4.4,
'pi_stack_angle_cutoff': 30.0,
'cation_pi_dist_cutoff': 6.5,
'cation_pi_angle_cutoff': 30.0,
}
# update with cutoffs specified by the user
for arg, value in kwargs.items():
if arg in self.cutoffs:
self.cutoffs[arg] = value
self.box_width = float(box_width)
self.voxel_width = float(voxel_width)
self.voxels_per_edge = int(self.box_width / self.voxel_width)
self.sybyl_types = [
"C3", "C2", "C1", "Cac", "Car", "N3", "N3+", "Npl", "N2", "N1",
"Ng+", "Nox", "Nar", "Ntr", "Nam", "Npl3", "N4", "O3", "O-", "O2",
"O.co2", "O.spc", "O.t3p", "S3", "S3+", "S2", "So2", "Sox"
"Sac"
"SO", "P3", "P", "P3+", "F", "Cl", "Br", "I"
]
self.FLAT_FEATURES = [
'ecfp_ligand', 'ecfp_hashed', 'splif_hashed', 'hbond_count'
]
self.VOXEL_FEATURES = [
'ecfp', 'splif', 'sybyl', 'salt_bridge', 'charge', 'hbond',
'pi_stack', 'cation_pi'
]
if feature_types is None:
feature_types = ['ecfp']
# each entry is a tuple (is_flat, feature_name)
self.feature_types = []
# list of features that cannot be calculated with specified parameters
# this list is used to define <flat/voxel/all>_combined subset
ignored_features = []
if self.sanitize is False:
ignored_features += require_sanitized
ignored_features += not_implemented
# parse provided feature types
for feature_type in feature_types:
if self.sanitize is False and feature_type in require_sanitized:
if self.verbose:
logger.warning(
'sanitize is set to False, %s feature will be ignored' %
feature_type)
continue
if feature_type in not_implemented:
if self.verbose:
logger.warning(
'%s feature is not implemented yet and will be ignored'
% feature_type)
continue
if feature_type in self.FLAT_FEATURES:
self.feature_types.append((True, feature_type))
if self.flatten is False:
if self.verbose:
logger.warning(
'%s feature is used, output will be flattened' %
feature_type)
self.flatten = True
elif feature_type in self.VOXEL_FEATURES:
self.feature_types.append((False, feature_type))
elif feature_type == 'flat_combined':
self.feature_types += [(True, ftype)
for ftype in sorted(self.FLAT_FEATURES)
if ftype not in ignored_features]
if self.flatten is False:
if self.verbose:
logger.warning(
'Flat features are used, output will be flattened')
self.flatten = True
elif feature_type == 'voxel_combined':
self.feature_types += [(False, ftype)
for ftype in sorted(self.VOXEL_FEATURES)
if ftype not in ignored_features]
elif feature_type == 'all_combined':
self.feature_types += [(True, ftype)
for ftype in sorted(self.FLAT_FEATURES)
if ftype not in ignored_features]
self.feature_types += [(False, ftype)
for ftype in sorted(self.VOXEL_FEATURES)
if ftype not in ignored_features]
if self.flatten is False:
if self.verbose:
logger.warning(
'Flat feature are used, output will be flattened')
self.flatten = True
elif self.verbose:
logger.warning('Ignoring unknown feature %s' % feature_type)
def _compute_feature(self, feature_name, prot_xyz, prot_rdk, lig_xyz,
lig_rdk, distances):
if feature_name == 'ecfp_ligand':
return [
compute_ecfp_features(lig_rdk, self.ecfp_degree,
self.ecfp_power)
]
if feature_name == 'ecfp_hashed':
return [
vectorize(hash_ecfp,
feature_dict=ecfp_dict,
size=2**self.ecfp_power)
for ecfp_dict in featurize_contacts_ecfp(
(prot_xyz, prot_rdk), (lig_xyz, lig_rdk),
distances,
cutoff=self.cutoffs['ecfp_cutoff'],
ecfp_degree=self.ecfp_degree)
]
if feature_name == 'splif_hashed':
return [
vectorize(hash_ecfp_pair,
feature_dict=splif_dict,
size=2**self.splif_power)
for splif_dict in featurize_splif((prot_xyz, prot_rdk), (
lig_xyz, lig_rdk), self.cutoffs['splif_contact_bins'],
distances, self.ecfp_degree)
]
if feature_name == 'hbond_count':
return [
vectorize(hash_ecfp_pair, feature_list=hbond_list, size=2**0)
for hbond_list in
compute_hydrogen_bonds((prot_xyz, prot_rdk), (
lig_xyz,
lig_rdk), distances, self.cutoffs['hbond_dist_bins'],
self.cutoffs['hbond_angle_cutoffs'])
]
if feature_name == 'ecfp':
return [
sum([
voxelize(
convert_atom_to_voxel,
xyz,
box_width=self.box_width,
voxel_width=self.voxel_width,
hash_function=hash_ecfp,
feature_dict=ecfp_dict,
nb_channel=2**self.ecfp_power,
) for xyz, ecfp_dict in zip(
(prot_xyz, lig_xyz),
featurize_contacts_ecfp(
(prot_xyz, prot_rdk), (lig_xyz, lig_rdk),
distances,
cutoff=self.cutoffs['ecfp_cutoff'],
ecfp_degree=self.ecfp_degree))
])
]
if feature_name == 'splif':
return [
voxelize(
convert_atom_pair_to_voxel,
(prot_xyz, lig_xyz),
box_width=self.box_width,
voxel_width=self.voxel_width,
hash_function=hash_ecfp_pair,
feature_dict=splif_dict,
nb_channel=2**self.splif_power,
) for splif_dict in featurize_splif((prot_xyz, prot_rdk), (
lig_xyz, lig_rdk), self.cutoffs['splif_contact_bins'],
distances, self.ecfp_degree)
]
if feature_name == 'sybyl':
def hash_sybyl_func(x):
hash_sybyl(x, sybyl_types=self.sybyl_types)
return [
voxelize(
convert_atom_to_voxel,
xyz,
box_width=self.box_width,
voxel_width=self.voxel_width,
hash_function=hash_sybyl_func,
feature_dict=sybyl_dict,
nb_channel=len(self.sybyl_types),
) for xyz, sybyl_dict in zip(
(prot_xyz, lig_xyz),
featurize_binding_pocket_sybyl(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['sybyl_cutoff']))
]
if feature_name == 'salt_bridge':
return [
voxelize(
convert_atom_pair_to_voxel,
(prot_xyz, lig_xyz),
box_width=self.box_width,
voxel_width=self.voxel_width,
feature_list=compute_salt_bridges(
prot_rdk,
lig_rdk,
distances,
cutoff=self.cutoffs['salt_bridges_cutoff']),
nb_channel=1,
)
]
if feature_name == 'charge':
return [
sum([
voxelize(convert_atom_to_voxel,
xyz,
box_width=self.box_width,
voxel_width=self.voxel_width,
feature_dict=compute_charge_dictionary(mol),
nb_channel=1,
dtype="np.float16")
for xyz, mol in ((prot_xyz, prot_rdk), (lig_xyz, lig_rdk))
])
]
if feature_name == 'hbond':
return [
voxelize(
convert_atom_pair_to_voxel,
(prot_xyz, lig_xyz),
box_width=self.box_width,
voxel_width=self.voxel_width,
feature_list=hbond_list,
nb_channel=2**0,
) for hbond_list in
compute_hydrogen_bonds((prot_xyz, prot_rdk), (
lig_xyz,
lig_rdk), distances, self.cutoffs['hbond_dist_bins'],
self.cutoffs['hbond_angle_cutoffs'])
]
if feature_name == 'pi_stack':
return voxelize_pi_stack(prot_xyz, prot_rdk, lig_xyz, lig_rdk,
distances,
self.cutoffs['pi_stack_dist_cutoff'],
self.cutoffs['pi_stack_angle_cutoff'],
self.box_width, self.voxel_width)
if feature_name == 'cation_pi':
return [
sum([
voxelize(
convert_atom_to_voxel,
xyz,
box_width=self.box_width,
voxel_width=self.voxel_width,
feature_dict=cation_pi_dict,
nb_channel=1,
) for xyz, cation_pi_dict in zip(
(prot_xyz, lig_xyz),
compute_binding_pocket_cation_pi(
prot_rdk,
lig_rdk,
dist_cutoff=self.cutoffs['cation_pi_dist_cutoff'],
angle_cutoff=self.cutoffs['cation_pi_angle_cutoff'],
))
])
]
raise ValueError('Unknown feature type "%s"' % feature_name)
def _featurize(self, complex):
"""Computes grid featurization of protein/ligand complex.
Takes as input filenames pdb of the protein, pdb of the ligand.
This function then computes the centroid of the ligand; decrements this
centroid from the atomic coordinates of protein and ligand atoms, and then
merges the translated protein and ligand. This combined system/complex is
then saved.
This function then computes a featurization with scheme specified by the user.
Parameters
----------
complex: Tuple[str, str]
Filenames for molecule and protein.
"""
try:
mol_pdb_file, protein_pdb_file = complex
time1 = time.time()
protein_xyz, protein_rdk = load_molecule(protein_pdb_file,
calc_charges=True,
sanitize=self.sanitize)
time2 = time.time()
logger.info(
"TIMING: Loading protein coordinates took %0.3f s" %
(time2 - time1), self.verbose)
time1 = time.time()
ligand_xyz, ligand_rdk = load_molecule(mol_pdb_file,
calc_charges=True,
sanitize=self.sanitize)
time2 = time.time()
logger.info(
"TIMING: Loading ligand coordinates took %0.3f s" %
(time2 - time1), self.verbose)
except MoleculeLoadException:
logger.warning("Some molecules cannot be loaded by Rdkit. Skipping")
return None
time1 = time.time()
centroid = compute_centroid(ligand_xyz)
ligand_xyz = subtract_centroid(ligand_xyz, centroid)
protein_xyz = subtract_centroid(protein_xyz, centroid)
time2 = time.time()
logger.info(
"TIMING: Centroid processing took %0.3f s" % (time2 - time1),
self.verbose)
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
transformed_systems = {}
transformed_systems[(0, 0)] = [protein_xyz, ligand_xyz]
for i in range(self.nb_rotations):
rotated_system = rotate_molecules([protein_xyz, ligand_xyz])
transformed_systems[(i + 1, 0)] = rotated_system
features_dict = {}
for system_id, (protein_xyz, ligand_xyz) in transformed_systems.items():
feature_arrays = []
for is_flat, function_name in self.feature_types:
result = self._compute_feature(
function_name,
protein_xyz,
protein_rdk,
ligand_xyz,
ligand_rdk,
pairwise_distances,
)
feature_arrays += result
if self.flatten:
features_dict[system_id] = np.concatenate([
feature_array.flatten()
for feature_array in feature_arrays
])
else:
features_dict[system_id] = np.concatenate(feature_arrays,
axis=-1)
features = np.concatenate(list(features_dict.values()))
return features
<file_sep>import pytest
from deepchem.utils.fake_data_generator import FakeGraphGenerator
try:
import torch
from deepchem.models.torch_models.layers import GraphNetwork
has_torch = True
except ModuleNotFoundError:
has_torch = False
@pytest.mark.torch
def test_graphnet_layer():
# Testing graphnet for a single graph
node_features = torch.randn(5, 10)
edge_features = torch.randn(5, 3)
global_features = torch.randn(1, 4)
edge_index = torch.tensor([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]).long()
graphnet = GraphNetwork(n_node_features=node_features.size(1),
n_edge_features=edge_features.size(1),
n_global_features=global_features.size(1))
assert repr(
graphnet
) == 'GraphNetwork(n_node_features=10, n_edge_features=3, n_global_features=4, is_undirected=True, residual_connection=True)'
new_node_features, new_edge_features, new_global_features = graphnet(
node_features, edge_index, edge_features, global_features)
assert node_features.size() == new_node_features.size()
assert edge_features.size() == new_edge_features.size()
assert global_features.size() == new_global_features.size()
# Testing for consistency
node_features = torch.tensor([[0.7, 0.7], [0.7, 0.7]]).float()
edge_features = torch.tensor([[0.7, 0.7]]).float()
global_features = torch.tensor([[1]]).float()
edge_index = torch.tensor([[0], [1]]).long()
torch.manual_seed(12345)
graphnet1 = GraphNetwork(n_node_features=2,
n_edge_features=2,
n_global_features=1)
out_node1, out_edge1, out_global1 = graphnet1(node_features, edge_index,
edge_features,
global_features)
torch.manual_seed(12345)
graphnet2 = GraphNetwork(n_node_features=2,
n_edge_features=2,
n_global_features=1)
out_node2, out_edge2, out_global2 = graphnet2(node_features, edge_index,
edge_features,
global_features)
rtol = 1e-5
atol = 1e-6
assert torch.allclose(out_node1, out_node2, rtol=rtol, atol=atol)
assert torch.allclose(out_edge1, out_edge2, rtol=rtol, atol=atol)
assert torch.allclose(out_global1, out_global2, rtol=rtol, atol=atol)
@pytest.mark.torch
def test_graphnet_for_graphs_in_batch():
# Testing with a batch of Graphs
try:
from torch_geometric.data import Batch
except ModuleNotFoundError:
raise ImportError("Tests require pytorch geometric to be installed")
n_node_features, n_edge_features, n_global_features = 3, 4, 5
fgg = FakeGraphGenerator(min_nodes=8,
max_nodes=12,
n_node_features=n_node_features,
avg_degree=10,
n_edge_features=n_edge_features,
n_classes=2,
task='graph',
z=n_global_features)
graphs = fgg.sample(n_graphs=10)
graphnet = GraphNetwork(n_node_features, n_edge_features, n_global_features)
graph_batch = Batch()
graph_batch = graph_batch.from_data_list(
[graph.to_pyg_graph() for graph in graphs.X])
new_node_features, new_edge_features, new_global_features = graphnet(
graph_batch.x, graph_batch.edge_index, graph_batch.edge_attr,
graph_batch.z, graph_batch.batch)
assert graph_batch.x.size() == new_node_features.size()
assert graph_batch.edge_attr.size() == new_edge_features.size()
assert graph_batch.z.size() == new_global_features.size()
<file_sep>"""Ops for objectives
Code borrowed from Keras.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import warnings
import tensorflow as tf
from deepchem.nn import model_ops
def mean_squared_error(y_true, y_pred):
return model_ops.mean(tf.square(y_pred - y_true), axis=-1)
def mean_absolute_error(y_true, y_pred):
return model_ops.mean(tf.abs(y_pred - y_true), axis=-1)
def mean_absolute_percentage_error(y_true, y_pred):
diff = tf.abs((y_true - y_pred) / model_ops.clip(
tf.abs(y_true), model_ops.epsilon(), None))
return 100. * model_ops.mean(diff, axis=-1)
def mean_squared_logarithmic_error(y_true, y_pred):
first_log = tf.log(model_ops.clip(y_pred, model_ops.epsilon(), None) + 1.)
second_log = tf.log(model_ops.clip(y_true, model_ops.epsilon(), None) + 1.)
return model_ops.mean(tf.square(first_log - second_log), axis=-1)
def squared_hinge(y_true, y_pred):
return model_ops.mean(
tf.square(tf.maximum(1. - y_true * y_pred, 0.)), axis=-1)
def hinge(y_true, y_pred):
return model_ops.mean(tf.maximum(1. - y_true * y_pred, 0.), axis=-1)
def categorical_crossentropy(y_true, y_pred):
return model_ops.categorical_crossentropy(y_pred, y_true)
def sparse_categorical_crossentropy(y_true, y_pred):
return model_ops.sparse_categorical_crossentropy(y_pred, y_true)
def binary_crossentropy(y_true, y_pred):
return model_ops.mean(model_ops.binary_crossentropy(y_pred, y_true), axis=-1)
def kullback_leibler_divergence(y_true, y_pred):
y_true = model_ops.clip(y_true, model_ops.epsilon(), 1)
y_pred = model_ops.clip(y_pred, model_ops.epsilon(), 1)
return model_ops.sum(y_true * tf.log(y_true / y_pred), axis=-1)
def poisson(y_true, y_pred):
return model_ops.mean(
y_pred - y_true * tf.log(y_pred + model_ops.epsilon()), axis=-1)
def cosine_proximity(y_true, y_pred):
y_true = model_ops.l2_normalize(y_true, axis=-1)
y_pred = model_ops.l2_normalize(y_pred, axis=-1)
return -model_ops.mean(y_true * y_pred, axis=-1)
# Aliases.
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
cosine = cosine_proximity
<file_sep>"""
Test for Ferminet Model.
"""
import pytest
import numpy as np
try:
from deepchem.models.torch_models.ferminet import FerminetModel
# When pytest runs without pytorch in the environment (ex: as in tensorflow workflow),
# the above import raises a ModuleNotFoundError. It is safe to ignore it
# since the below tests only run in an environment with pytorch installed.
except ModuleNotFoundError:
pass
@pytest.mark.torch
def test_FerminetModel():
# Test for the init function of FerminetModel class
FH_molecule = [['F', [0, 0, 0]], ['H', [0, 0.5, 0.5]]]
# Testing ionic initialization
mol = FerminetModel(FH_molecule, spin=1, ion_charge=-1)
assert (mol.electron_no == np.array([[10], [1]])).all()
# Testing whether error throws up when spin is wrong
with pytest.raises(ValueError):
FerminetModel(FH_molecule, spin=0, ion_charge=-1)
# Testing the spin values
Li_atom = [['Li', [0, 0, 0]]]
mol = FerminetModel(Li_atom, spin=1, ion_charge=0)
assert mol.up_spin == 2 and mol.down_spin == 1
@pytest.mark.dqc
def test_prepare_hf_solution():
# Test for the prepare_hf_solution function of FerminetModel class
H2_molecule = [['H', [0, 0, 0]], ['H', [0, 0, 0.748]]]
mol = FerminetModel(H2_molecule, spin=0, ion_charge=0)
electron_coordinates = np.random.rand(2, 3)
hf_solution = mol.prepare_hf_solution(electron_coordinates)
# The solution should be of the shape (number of electrons, number of electrons)
assert np.shape(hf_solution) == (2, 2)
<file_sep>"""
Script that trains graph-conv models on HOPV dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from deepchem.models import GraphConvModel
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_hopv
# Load HOPV dataset
hopv_tasks, hopv_datasets, transformers = load_hopv(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = hopv_datasets
# Fit models
metric = [
dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean, mode="regression"),
dc.metrics.Metric(
dc.metrics.mean_absolute_error, np.mean, mode="regression")
]
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 50
model = GraphConvModel(
len(hopv_tasks), batch_size=batch_size, mode='regression')
# Fit trained model
model.fit(train_dataset, nb_epoch=25)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep># This example shows how to use Pandas to load data directly
# without using a CSVLoader object. This may be useful if you
# want the flexibility of processing your data with Pandas
# directly.
import pandas as pd
import deepchem as dc
from rdkit import Chem
df = pd.read_csv("example.csv")
print("Original data loaded as DataFrame:")
print(df)
featurizer = dc.feat.CircularFingerprint(size=16)
mols = [Chem.MolFromSmiles(smiles) for smiles in df["smiles"]]
features = featurizer.featurize(mols)
dataset = dc.data.NumpyDataset(
X=features, y=df["log-solubility"], ids=df["Compound ID"])
print("Data converted into DeepChem Dataset")
print(dataset)
# Now let's convert from a dataset back to a pandas dataframe
converted_df = dataset.to_dataframe()
print("Data converted back into DataFrame:")
print(converted_df)
<file_sep>"""
Tests for ConvMolFeaturizer.
"""
import unittest
import numpy as np
from deepchem.feat.graph_features import ConvMolFeaturizer
class TestConvMolFeaturizer(unittest.TestCase):
"""
Test ConvMolFeaturizer featurizes properly.
"""
def test_carbon_nitrogen(self):
"""Test on carbon nitrogen molecule"""
# Note there is a central nitrogen of degree 4, with 4 carbons
# of degree 1 (connected only to central nitrogen).
raw_smiles = ['C[N+](C)(C)C']
import rdkit.Chem
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mols = featurizer.featurize(mols)
mol = mols[0]
# 5 atoms in compound
assert mol.get_num_atoms() == 5
# Get the adjacency lists grouped by degree
deg_adj_lists = mol.get_deg_adjacency_lists()
assert np.array_equal(deg_adj_lists[0], np.zeros([0, 0],
dtype=np.int32))
# The 4 outer atoms connected to central nitrogen
assert np.array_equal(deg_adj_lists[1],
np.array([[4], [4], [4], [4]], dtype=np.int32))
assert np.array_equal(deg_adj_lists[2], np.zeros([0, 2],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[3], np.zeros([0, 3],
dtype=np.int32))
# Central nitrogen connected to everything else.
assert np.array_equal(deg_adj_lists[4],
np.array([[0, 1, 2, 3]], dtype=np.int32))
assert np.array_equal(deg_adj_lists[5], np.zeros([0, 5],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[6], np.zeros([0, 6],
dtype=np.int32))
def test_single_carbon(self):
"""Test that single carbon atom is featurized properly."""
raw_smiles = ['C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# Only one carbon
assert mol.get_num_atoms() == 1
# No bonds, so degree adjacency lists are empty
deg_adj_lists = mol.get_deg_adjacency_lists()
assert np.array_equal(deg_adj_lists[0], np.zeros([1, 0],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[1], np.zeros([0, 1],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[2], np.zeros([0, 2],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[3], np.zeros([0, 3],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[4], np.zeros([0, 4],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[5], np.zeros([0, 5],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[6], np.zeros([0, 6],
dtype=np.int32))
def test_alkane(self):
"""Test on simple alkane"""
raw_smiles = ['CCC']
import rdkit.Chem
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# 3 carbonds in alkane
assert mol.get_num_atoms() == 3
deg_adj_lists = mol.get_deg_adjacency_lists()
assert np.array_equal(deg_adj_lists[0], np.zeros([0, 0],
dtype=np.int32))
# Outer two carbonds are connected to central carbon
assert np.array_equal(deg_adj_lists[1],
np.array([[2], [2]], dtype=np.int32))
# Central carbon connected to outer two
assert np.array_equal(deg_adj_lists[2],
np.array([[0, 1]], dtype=np.int32))
assert np.array_equal(deg_adj_lists[3], np.zeros([0, 3],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[4], np.zeros([0, 4],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[5], np.zeros([0, 5],
dtype=np.int32))
assert np.array_equal(deg_adj_lists[6], np.zeros([0, 6],
dtype=np.int32))
def test_per_atom_fragmentation(self):
"""checks if instantiating featurizer with per_atom_fragmentation=True
leads to as many fragments' features, as many atoms mol has for any mol"""
import rdkit.Chem
raw_smiles = ['CC(CO)Cc1ccccc1', 'CC']
mols = [rdkit.Chem.MolFromSmiles(m) for m in raw_smiles]
featurizer = ConvMolFeaturizer(per_atom_fragmentation=True)
feat = featurizer.featurize(mols)
for i, j in zip(feat, mols):
assert len(i) == j.GetNumHeavyAtoms()
<file_sep>import logging
import numpy as np
from deepchem.utils.typing import RDKitBond, RDKitMol, List
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.utils.typing import OneOrMany
from typing import Optional
logger = logging.getLogger(__name__)
class GraphMatrix:
"""
This is class used to store data for MolGAN neural networks.
Parameters
----------
node_features: np.ndarray
Node feature matrix with shape [num_nodes, num_node_features]
edge_features: np.ndarray,
Edge feature matrix with shape [num_nodes, num_nodes]
Returns
-------
graph: GraphMatrix
A molecule graph with some features.
"""
def __init__(self, adjacency_matrix: np.ndarray, node_features: np.ndarray):
self.adjacency_matrix = adjacency_matrix
self.node_features = node_features
class MolGanFeaturizer(MolecularFeaturizer):
"""
Featurizer for MolGAN de-novo molecular generation [1]_.
The default representation is in form of GraphMatrix object.
It is wrapper for two matrices containing atom and bond type information.
The class also provides reverse capabilities.
Examples
--------
>>> import deepchem as dc
>>> from rdkit import Chem
>>> rdkit_mol, smiles_mol = Chem.MolFromSmiles('CCC'), 'C1=CC=CC=C1'
>>> molecules = [rdkit_mol, smiles_mol]
>>> featurizer = dc.feat.MolGanFeaturizer()
>>> features = featurizer.featurize(molecules)
>>> len(features) # 2 molecules
2
>>> type(features[0])
<class 'deepchem.feat.molecule_featurizers.molgan_featurizer.GraphMatrix'>
>>> molecules = featurizer.defeaturize(features) # defeaturization
>>> type(molecules[0])
<class 'rdkit.Chem.rdchem.Mol'>
"""
def __init__(
self,
max_atom_count: int = 9,
kekulize: bool = True,
bond_labels: Optional[List[RDKitBond]] = None,
atom_labels: Optional[List[int]] = None,
):
"""
Parameters
----------
max_atom_count: int, default 9
Maximum number of atoms used for creation of adjacency matrix.
Molecules cannot have more atoms than this number
Implicit hydrogens do not count.
kekulize: bool, default True
Should molecules be kekulized.
Solves number of issues with defeaturization when used.
bond_labels: List[RDKitBond]
List of types of bond used for generation of adjacency matrix
atom_labels: List[int]
List of atomic numbers used for generation of node features
References
---------
.. [1] <NAME> et al. "MolGAN: An implicit generative model for
small molecular graphs" (2018), https://arxiv.org/abs/1805.11973
"""
self.max_atom_count = max_atom_count
self.kekulize = kekulize
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
# bond labels
if bond_labels is None:
self.bond_labels = [
Chem.rdchem.BondType.ZERO,
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC,
]
else:
self.bond_labels = bond_labels
# atom labels
if atom_labels is None:
self.atom_labels = [0, 6, 7, 8, 9] # C,N,O,F
else:
self.atom_labels = atom_labels
# create bond encoders and decoders
self.bond_encoder = {l: i for i, l in enumerate(self.bond_labels)}
self.bond_decoder = {i: l for i, l in enumerate(self.bond_labels)}
# create atom encoders and decoders
self.atom_encoder = {l: i for i, l in enumerate(self.atom_labels)}
self.atom_decoder = {i: l for i, l in enumerate(self.atom_labels)}
def _featurize(self, datapoint: RDKitMol,
**kwargs) -> Optional[GraphMatrix]:
"""
Calculate adjacency matrix and nodes features for RDKitMol.
It strips any chirality and charges
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit mol object.
Returns
-------
graph: GraphMatrix
A molecule graph with some features.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This method requires RDKit to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.kekulize:
Chem.Kekulize(datapoint)
A = np.zeros(shape=(self.max_atom_count, self.max_atom_count),
dtype=np.float32)
bonds = datapoint.GetBonds()
begin, end = [b.GetBeginAtomIdx() for b in bonds
], [b.GetEndAtomIdx() for b in bonds]
bond_type = [self.bond_encoder[b.GetBondType()] for b in bonds]
A[begin, end] = bond_type
A[end, begin] = bond_type
degree = np.sum(A[:datapoint.GetNumAtoms(), :datapoint.GetNumAtoms()],
axis=-1)
X = np.array(
[
self.atom_encoder[atom.GetAtomicNum()]
for atom in datapoint.GetAtoms()
] + [0] * (self.max_atom_count - datapoint.GetNumAtoms()),
dtype=np.int32,
)
graph = GraphMatrix(A, X)
return graph if (degree > 0).all() else None
def _defeaturize(self,
graph_matrix: GraphMatrix,
sanitize: bool = True,
cleanup: bool = True) -> RDKitMol:
"""
Recreate RDKitMol from GraphMatrix object.
Same featurizer need to be used for featurization and defeaturization.
It only recreates bond and atom types, any kind of additional features
like chirality or charge are not included.
Therefore, any checks of type: original_smiles == defeaturized_smiles
will fail on chiral or charged compounds.
Parameters
----------
graph_matrix: GraphMatrix
GraphMatrix object.
sanitize: bool, default True
Should RDKit sanitization be included in the process.
cleanup: bool, default True
Splits salts and removes compounds with "*" atom types
Returns
-------
mol: RDKitMol object
RDKitMol object representing molecule.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This method requires RDKit to be installed.")
if not isinstance(graph_matrix, GraphMatrix):
return None
node_labels = graph_matrix.node_features
edge_labels = graph_matrix.adjacency_matrix
mol = Chem.RWMol()
for node_label in node_labels:
mol.AddAtom(Chem.Atom(self.atom_decoder[node_label]))
for start, end in zip(*np.nonzero(edge_labels)):
if start > end:
mol.AddBond(int(start), int(end),
self.bond_decoder[edge_labels[start, end]])
if sanitize:
try:
Chem.SanitizeMol(mol)
except Exception:
mol = None
if cleanup:
try:
smiles = Chem.MolToSmiles(mol)
smiles = max(smiles.split("."), key=len)
if "*" not in smiles:
mol = Chem.MolFromSmiles(smiles)
else:
mol = None
except Exception:
mol = None
return mol
def defeaturize(self,
graphs: OneOrMany[GraphMatrix],
log_every_n: int = 1000) -> np.ndarray:
"""
Calculates molecules from corresponding GraphMatrix objects.
Parameters
----------
graphs: GraphMatrix / iterable
GraphMatrix object or corresponding iterable
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing RDKitMol objext.
"""
# Special case handling of single molecule
if isinstance(graphs, GraphMatrix):
graphs = [graphs]
else:
# Convert iterables to list
graphs = list(graphs)
molecules = []
for i, gr in enumerate(graphs):
if i % log_every_n == 0:
logger.info("Featurizing datapoint %i" % i)
try:
molecules.append(self._defeaturize(gr))
except Exception as e:
logger.warning(
"Failed to defeaturize datapoint %d, %s. Appending empty array",
i,
gr,
)
logger.warning("Exception message: {}".format(e))
molecules.append(np.array([]))
return np.asarray(molecules)
<file_sep>import json
import logging
import numpy as np
from typing import Dict, Optional
from collections import Counter
from rdkit import Chem
from deepchem.data import Dataset
from deepchem.feat.base_classes import Featurizer
from deepchem.utils.typing import RDKitMol, RDKitAtom, RDKitBond
from deepchem.feat.vocabulary_builders.vocabulary_builder import VocabularyBuilder
logger = logging.getLogger(__name__)
class GroverAtomVocabularyBuilder(VocabularyBuilder):
"""Atom Vocabulary Builder for Grover
This module can be used to generate atom vocabulary from SMILES strings for
the GROVER pretraining task. For each atom in a molecule, the vocabulary context is the
node-edge-count of the atom where node is the neighboring atom, edge is the type of bond (single
bond or double bound) and count is the number of such node-edge pairs for the atom in its
neighborhood. For example, for the molecule 'CC(=O)C', the context of the first carbon atom is
C-SINGLE1 because it's neighbor is C atom, the type of bond is SINGLE bond and the count of such
bonds is 1. The context of the second carbon atom is C-SINGLE2 and O-DOUBLE1 because
it is connected to two carbon atoms by a single bond and 1 O atom by a double bond.
The vocabulary of an atom is then computed as the `atom-symbol_contexts` where the contexts
are sorted in alphabetical order when there are multiple contexts. For example, the
vocabulary of second C is `C_C-SINGLE2_O-DOUBLE1`. The algorithm enumerates vocabulary of all atoms
in the dataset and makes a vocabulary to index mapping by sorting the vocabulary
by frequency and then alphabetically.
The algorithm enumerates vocabulary of all atoms in the dataset and makes a vocabulary to
index mapping by sorting the vocabulary by frequency and then alphabetically. The `max_size`
parameter can be used for setting the size of the vocabulary. When this parameter is set,
the algorithm stops adding new words to the index when the vocabulary size reaches `max_size`.
Parameters
----------
max_size: int (optional)
Maximum size of vocabulary
Example
-------
>>> import tempfile
>>> import deepchem as dc
>>> from rdkit import Chem
>>> file = tempfile.NamedTemporaryFile()
>>> dataset = dc.data.NumpyDataset(X=[['CCC'], ['CC(=O)C']])
>>> vocab = GroverAtomVocabularyBuilder()
>>> vocab.build(dataset)
>>> vocab.stoi
{'<pad>': 0, '<other>': 1, 'C_C-SINGLE1': 2, 'C_C-SINGLE2': 3, 'C_C-SINGLE2_O-DOUBLE1': 4, 'O_C-DOUBLE1': 5}
>>> vocab.save(file.name)
>>> loaded_vocab = GroverAtomVocabularyBuilder.load(file.name)
>>> mol = Chem.MolFromSmiles('CC(=O)C')
>>> loaded_vocab.encode(mol, mol.GetAtomWithIdx(1))
4
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
def __init__(self, max_size: Optional[int] = None):
self.specials = ('<pad>', '<other>')
self.min_freq = 1
self.size = max_size
self.itos = list(self.specials)
self.stoi = self._make_reverse_mapping(self.itos)
self.pad_index = 0
self.other_index = 1
def build(self, dataset: Dataset, log_every_n: int = 1000) -> None:
"""Builds vocabulary
Parameters
----------
dataset: dc.data.Dataset
A dataset object with SMILEs strings in X attribute.
log_every_n: int, default 1000
Logs vocabulary building progress every `log_every_n` steps.
"""
counter: Dict[str, int] = Counter()
logger.info('Starting to build atom vocabulary')
for i, (x, _, _, _) in enumerate(dataset.itersamples()):
if i % log_every_n == 0:
logger.info(
'Computing contextual property of atoms in molecule %i' % i)
if isinstance(x, str):
smiles = x
elif isinstance(x, np.ndarray):
x = x.squeeze()
assert x.ndim == 0, 'expected x attribute of dataset to be a 1-D array of SMILES strings'
smiles = x.item()
mol = Chem.MolFromSmiles(smiles)
for atom in mol.GetAtoms():
v = self.atom_to_vocab(mol, atom)
counter[v] += 1
logger.info('Completed enumeration of atom contextual properties.')
# sort first by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if len(self.itos) == self.size:
break
self.itos.append(word)
if self.size is None:
self.size = len(self.itos)
self.stoi = self._make_reverse_mapping(self.itos)
logger.info('Completed building of atom vocabulary')
def save(self, fname: str) -> None:
"""Saves a vocabulary in json format
Parameter
---------
fname: str
Filename to save vocabulary
"""
vocab = {'stoi': self.stoi, 'itos': self.itos, 'size': self.size}
with open(fname, 'w') as f:
json.dump(vocab, f)
@classmethod
def load(cls, fname: str) -> 'GroverAtomVocabularyBuilder':
"""Loads vocabulary from the specified json file
Parameters
----------
fname: str
JSON file containing vocabulary
Returns
-------
vocab: GroverAtomVocabularyBuilder
A grover atom vocabulary builder which can be used for encoding
"""
with open(fname, 'r') as f:
data = json.load(f)
vocab = cls()
vocab.stoi, vocab.itos, vocab.size = data['stoi'], data['itos'], data[
'size']
return vocab
@staticmethod
def atom_to_vocab(mol: RDKitMol, atom: RDKitAtom) -> str:
"""Convert atom to vocabulary.
Parameters
----------
mol: RDKitMol
an molecule object
atom: RDKitAtom
the target atom.
Returns
-------
vocab: str
The generated atom vocabulary with its contexts.
Example
-------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('[C@@H](C)C(=O)O')
>>> GroverAtomVocabularyBuilder.atom_to_vocab(mol, mol.GetAtomWithIdx(0))
'C_C-SINGLE2'
>>> GroverAtomVocabularyBuilder.atom_to_vocab(mol, mol.GetAtomWithIdx(3))
'O_C-DOUBLE1'
"""
atom_neighbors: Dict[str, int] = Counter()
for a in atom.GetNeighbors():
bond = mol.GetBondBetweenAtoms(atom.GetIdx(), a.GetIdx())
atom_neighbors[str(a.GetSymbol()) + "-" +
str(bond.GetBondType())] += 1
keys = list(atom_neighbors.keys())
# sorting the atoms neighbors
keys.sort()
output = atom.GetSymbol()
# concatenating the sorted neighbors
for key in keys:
output = "%s_%s%d" % (output, key, atom_neighbors[key])
return output
def _make_reverse_mapping(self, itos):
return {tok: i for i, tok in enumerate(itos)}
def encode(self, mol: RDKitMol, atom: RDKitAtom) -> str:
"""Encodes an atom in a molecule
Parameter
---------
mol: RDKitMol
An RDKitMol object
atom: RDKitAtom
An atom in the molecule
Returns
-------
vocab: str
The vocabulary of the atom in the molecule.
"""
return self.stoi.get(self.atom_to_vocab(mol, atom))
class GroverBondVocabularyBuilder(VocabularyBuilder):
"""Bond Vocabulary Builder for Grover
This module can be used to generate bond vocabulary from SMILES strings
for the GROVER pretraining task.
For assigning the vocabulary of a bond, we consider the features of the bond
and the context of the bond. The context of bond is the feature of the bond under
consideration and the feature of the bonds of atom in which the bond begins and ends.
It is formed by the concatenation of atomSymbol-bondFeature-Count where atomSymbol
is the symbol of neighboring atom, bondFeature is the type of bond and count is the
number of such atomSymbol-bondFeature pairs in the surrounding context.
The feature of a bond is determined by three sub-features: the type of bond (single or double bond),
the RDKit StereoConfiguration of the bond and RDKit BondDir. For the C-C bond
in CCC, the type of bond is SINGLE, its stereo is NONE and the bond does not have
direction. Hence, the feature of the bond is SINGLE-STEREONONE-NONE.
For assigning the vocabulary, we should also have to look at the neighboring bonds.
Consider the molecule 'CC(=O)C'. It has three bonds. The C-C bond has two neighbors.
The first C atom has no other bonds, so it contributes no context. The second C atom
has one bond with an O atom and one bond with a C atom. Consider the C=O double bond.
The bond feature is DOUBLE-STEREONONE-NONE. The corresponding context is
atomSymbol-bondFeature-Count. This gives us C-(DOUBLE-STEREONONE-NONE)1.
Similary, it also has another bond with a C atom which gives the
context C-(SINGLE-STEREONONE-NONE)1. Hence, the vocabulary of
the bond is '(SINGLE-STEREONONE-NONE)_C-(DOUBLE-STEREONONE-NONE)1_C-(SINGLE-STEREONONE-NONE)1'
The algorithm enumerates vocabulary of all bonds in the dataset and makes a vocabulary to
index mapping by sorting the vocabulary by frequency and then alphabetically. The `max_size`
parameter can be used for setting the size of the vocabulary. When this parameter is set,
the algorithm stops adding new words to the index when the vocabulary size reaches `max_size`.
Parameters
----------
max_size: int (optional)
Maximum size of vocabulary
Example
-------
>>> import tempfile
>>> import deepchem as dc
>>> from rdkit import Chem
>>> file = tempfile.NamedTemporaryFile()
>>> dataset = dc.data.NumpyDataset(X=[['CCC']])
>>> vocab = GroverBondVocabularyBuilder()
>>> vocab.build(dataset)
>>> vocab.stoi
{'<pad>': 0, '<other>': 1, '(SINGLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)1': 2}
>>> vocab.save(file.name)
>>> loaded_vocab = GroverBondVocabularyBuilder.load(file.name)
>>> mol = Chem.MolFromSmiles('CCC')
>>> loaded_vocab.encode(mol, mol.GetBondWithIdx(0))
2
Reference
---------
.. <NAME>, et al. "Self-supervised graph transformer on large-scale molecular data." Advances in Neural Information Processing Systems 33 (2020): 12559-12571.
"""
BOND_FEATURES = ['BondType', 'Stereo', 'BondDir']
def __init__(self, max_size: Optional[int] = None):
self.specials = ('<pad>', '<other>')
self.min_freq = 1
self.size = max_size
self.itos = list(self.specials)
self.stoi = self._make_reverse_mapping(self.itos)
self.pad_index = 0
self.other_index = 1
def build(self, dataset: Dataset, log_every_n: int = 1000) -> None:
"""Builds vocabulary
Parameters
----------
dataset: dc.data.Dataset
A dataset object with SMILEs strings in X attribute.
log_every_n: int, default 1000
Logs vocabulary building progress every `log_every_n` steps.
"""
counter: Dict[str, int] = Counter()
logger.info('Starting to build bond vocabulary')
for i, (x, _, _, _) in enumerate(dataset.itersamples()):
if i % log_every_n == 0:
logger.info(
'Computing contextual property of bonds in molecule %i' % i)
if isinstance(x, str):
smiles = x
elif isinstance(x, np.ndarray):
smiles = x.squeeze().item()
mol = Chem.MolFromSmiles(smiles)
for bond in mol.GetBonds():
v = self.bond_to_vocab(mol, bond)
counter[v] += 1
logger.info('Completed enumeration of bond contextual properties.')
# sort first by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if len(self.itos) == self.size:
break
self.itos.append(word)
if self.size is None:
self.size = len(self.itos)
self.stoi = self._make_reverse_mapping(self.itos)
logger.info('Completed building of bond vocabulary')
def save(self, fname: str) -> None:
"""Saves a vocabulary in json format
Parameter
---------
fname: str
Filename to save vocabulary
"""
vocab = {'stoi': self.stoi, 'itos': self.itos, 'size': self.size}
with open(fname, 'w') as f:
json.dump(vocab, f)
@classmethod
def load(cls, fname: str) -> 'GroverBondVocabularyBuilder':
"""Loads vocabulary from the specified json file
Parameters
----------
fname: str
JSON file containing vocabulary
Returns
-------
vocab: GroverAtomVocabularyBuilder
A grover atom vocabulary builder which can be used for encoding
"""
with open(fname, 'r') as f:
data = json.load(f)
vocab = cls()
vocab.stoi, vocab.itos, vocab.size = data['stoi'], data['itos'], data[
'size']
return vocab
@staticmethod
def bond_to_vocab(mol: RDKitMol, bond: RDKitBond):
"""Convert bond to vocabulary.
The algorithm considers only one-hop neighbor atoms.
Parameters
----------
mol: RDKitMole
the molecule object
bond: RDKitBond
the target bond
Returns
-------
vocab: str
the generated bond vocabulary with its contexts.
Example
-------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('[C@@H](C)C(=O)O')
>>> GroverBondVocabularyBuilder.bond_to_vocab(mol, mol.GetBondWithIdx(0))
'(SINGLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)1'
>>> GroverBondVocabularyBuilder.bond_to_vocab(mol, mol.GetBondWithIdx(2))
'(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'
"""
bond_neighbors: Dict[str, int] = Counter()
two_neighbors = (bond.GetBeginAtom(), bond.GetEndAtom())
two_indices = [a.GetIdx() for a in two_neighbors]
for nei_atom in two_neighbors:
for a in nei_atom.GetNeighbors():
a_idx = a.GetIdx()
if a_idx in two_indices:
continue
tmp_bond = mol.GetBondBetweenAtoms(nei_atom.GetIdx(), a_idx)
bond_neighbors[str(nei_atom.GetSymbol()) + '-' +
GroverBondVocabularyBuilder.
_get_bond_feature_name(tmp_bond)] += 1
keys = list(bond_neighbors.keys())
keys.sort()
output = GroverBondVocabularyBuilder._get_bond_feature_name(bond)
for k in keys:
output = "%s_%s%d" % (output, k, bond_neighbors[k])
return output
@staticmethod
def _get_bond_feature_name(bond: RDKitBond):
"""Return the string format of bond features."""
ret = []
for bond_feature in GroverBondVocabularyBuilder.BOND_FEATURES:
fea = eval(f"bond.Get{bond_feature}")()
ret.append(str(fea))
return '(' + '-'.join(ret) + ')'
def _make_reverse_mapping(self, itos):
return {tok: i for i, tok in enumerate(itos)}
def encode(self, mol: RDKitMol, bond: RDKitBond) -> str:
"""Encodes a bond in a molecule
Parameter
---------
mol: RDKitMol
An RDKitMol object
bond: RDKitBond
A bond in the molecule
Returns
-------
vocab: str
The vocabulary of the bond in the molecule.
"""
return self.stoi.get(self.bond_to_vocab(mol, bond))
class GroverAtomVocabTokenizer(Featurizer):
"""Grover Atom Vocabulary Tokenizer
The Grover Atom vocab tokenizer is used for tokenizing an atom using a
vocabulary generated by GroverAtomVocabularyBuilder.
Example
-------
>>> import tempfile
>>> import deepchem as dc
>>> from deepchem.feat.vocabulary_builders.grover_vocab import GroverAtomVocabularyBuilder
>>> file = tempfile.NamedTemporaryFile()
>>> dataset = dc.data.NumpyDataset(X=[['CC(=O)C', 'CCC']])
>>> vocab = GroverAtomVocabularyBuilder()
>>> vocab.build(dataset)
>>> vocab.save(file.name) # build and save the vocabulary
>>> atom_tokenizer = GroverAtomVocabTokenizer(file.name)
>>> mol = Chem.MolFromSmiles('CC(=O)C')
>>> atom_tokenizer.featurize([(mol, mol.GetAtomWithIdx(0))])[0]
2
Parameters
----------
fname: str
Filename of vocabulary generated by GroverAtomVocabularyBuilder
"""
def __init__(self, fname: str):
self.vocabulary = GroverAtomVocabularyBuilder.load(fname)
def _featurize(self, datapoint):
return self.vocabulary.encode(datapoint[0], datapoint[1])
class GroverBondVocabTokenizer(Featurizer):
"""Grover Bond Vocabulary Tokenizer
The Grover Bond vocab tokenizer is used for tokenizing a bond using a
vocabulary generated by GroverBondVocabularyBuilder.
Example
-------
>>> import tempfile
>>> import deepchem as dc
>>> from deepchem.feat.vocabulary_builders.grover_vocab import GroverBondVocabularyBuilder
>>> file = tempfile.NamedTemporaryFile()
>>> dataset = dc.data.NumpyDataset(X=[['CC(=O)C', 'CCC']])
>>> vocab = GroverBondVocabularyBuilder()
>>> vocab.build(dataset)
>>> vocab.save(file.name) # build and save the vocabulary
>>> bond_tokenizer = GroverBondVocabTokenizer(file.name)
>>> mol = Chem.MolFromSmiles('CC(=O)C')
>>> bond_tokenizer.featurize([(mol, mol.GetBondWithIdx(0))])[0]
2
Parameters
----------
fname: str
Filename of vocabulary generated by GroverAtomVocabularyBuilder
"""
def __init__(self, fname: str):
self.vocabulary = GroverBondVocabularyBuilder.load(fname)
def _featurize(self, datapoint):
return self.vocabulary.encode(datapoint[0], datapoint[1])
<file_sep>"""The functions in these utilities check that noncovalent interactions happen"""
import numpy as np
from collections import Counter
from deepchem.utils.fragment_utils import get_partial_charge
from deepchem.utils.rdkit_utils import compute_ring_center
from deepchem.utils.rdkit_utils import compute_ring_normal
from deepchem.utils.geometry_utils import angle_between
from deepchem.utils.geometry_utils import is_angle_within_cutoff
def is_salt_bridge(atom_i, atom_j):
"""Check if two atoms have correct charges to form a salt bridge"""
if np.abs(2.0 -
np.abs(get_partial_charge(atom_i) -
get_partial_charge(atom_j))) < 0.01:
return True
return False
def compute_salt_bridges(first, second, pairwise_distances, cutoff=5.0):
"""Find salt bridge contacts between two molecules.
Parameters:
-----------
first: rdkit.rdchem.Mol
Interacting molecules
second: rdkit.rdchem.Mol
Interacting molecules
pairwise_distances: np.ndarray
Array of pairwise interatomic distances between molecule atoms (Angstroms)
cutoff: float
Cutoff distance for contact consideration
Returns:
--------
salt_bridge_contacts: list of tuples
List of contacts. Tuple (i, j) indicates that atom i from
first molecule interacts with atom j from second.
"""
salt_bridge_contacts = []
contacts = np.nonzero(pairwise_distances < cutoff)
contacts = zip(contacts[0], contacts[1])
for contact in contacts:
first_atom = first.GetAtoms()[int(contact[0])]
second_atom = second.GetAtoms()[int(contact[1])]
if is_salt_bridge(first_atom, second_atom):
salt_bridge_contacts.append(contact)
return salt_bridge_contacts
def is_hydrogen_bond(frag1,
frag2,
contact,
hbond_distance_cutoff=4.0,
hbond_angle_cutoff=40.0):
"""
Determine if a pair of atoms (contact = frag1_atom_index,
frag2_atom_index) between two molecules represents a hydrogen
bond. Returns a boolean result.
Parameters
----------
frag1: tuple
Tuple of (coords, rdkit mol / MolecularFragment)
frag2: tuple
Tuple of (coords, rdkit mol / MolecularFragment)
contact: Tuple
Tuple of indices for (atom_i, atom_j) contact.
hbond_distance_cutoff: float, optional
Distance cutoff for hbond.
hbond_angle_cutoff: float, optional
Angle deviance cutoff for hbond
"""
frag1_xyz, frag2_xyz = frag1[0], frag2[0]
frag1_mol, frag2_mol = frag1[1], frag2[1]
frag1_atom_xyz = frag1_xyz[int(contact[0])]
frag2_atom_xyz = frag2_xyz[int(contact[1])]
frag1_atom = frag1_mol.GetAtoms()[int(contact[0])]
frag2_atom = frag2_mol.GetAtoms()[int(contact[1])]
# Nitrogen has atomic number 7, and oxygen 8.
if ((frag2_atom.GetAtomicNum() == 7 or frag2_atom.GetAtomicNum() == 8) and
(frag1_atom.GetAtomicNum() == 7 or frag1_atom.GetAtomicNum() == 8)):
hydrogens = []
for i, atom in enumerate(frag2_mol.GetAtoms()):
# If atom is a hydrogen
if atom.GetAtomicNum() == 1:
atom_xyz = frag2_xyz[i]
dist = np.linalg.norm(atom_xyz - frag2_atom_xyz)
# O-H distance is 0.96 A, N-H is 1.01 A. See http://www.science.uwaterloo.ca/~cchieh/cact/c120/bondel.html
if dist < 1.3:
hydrogens.append(atom_xyz)
for j, atom in enumerate(frag1_mol.GetAtoms()):
# If atom is a hydrogen
if atom.GetAtomicNum() == 1:
atom_xyz = frag1_xyz[i]
dist = np.linalg.norm(atom_xyz - frag1_atom_xyz)
# O-H distance is 0.96 A, N-H is 1.01 A. See http://www.science.uwaterloo.ca/~cchieh/cact/c120/bondel.html
if dist < 1.3:
hydrogens.append(atom_xyz)
for hydrogen_xyz in hydrogens:
hydrogen_to_frag2 = frag2_atom_xyz - hydrogen_xyz
hydrogen_to_frag1 = frag1_atom_xyz - hydrogen_xyz
return is_angle_within_cutoff(hydrogen_to_frag2, hydrogen_to_frag1,
hbond_angle_cutoff)
return False
def compute_hbonds_in_range(frag1, frag2, pairwise_distances, hbond_dist_bin,
hbond_angle_cutoff):
"""
Find all pairs of (frag1_index_i, frag2_index_j) that hydrogen bond
given a distance bin and an angle cutoff.
Parameters
----------
frag1: tuple
Tuple of (coords, rdkit mol / MolecularFragment
frag2: tuple
Tuple of (coords, rdkit mol / MolecularFragment
pairwise_distances:
Matrix of shape `(N, M)` with pairwise distances between frag1/frag2.
hbond_dist_bin: tuple
Tuple of floats `(min_dist, max_dist)` in angstroms.
hbond_angle_cutoffs: list[float]
List of angles of deviances allowed for hbonds
"""
contacts = np.nonzero((pairwise_distances > hbond_dist_bin[0]) &
(pairwise_distances < hbond_dist_bin[1]))
contacts = zip(contacts[0], contacts[1])
hydrogen_bond_contacts = []
for contact in contacts:
if is_hydrogen_bond(frag1, frag2, contact, hbond_angle_cutoff):
hydrogen_bond_contacts.append(contact)
return hydrogen_bond_contacts
def compute_hydrogen_bonds(frag1, frag2, pairwise_distances, hbond_dist_bins,
hbond_angle_cutoffs):
"""Computes hydrogen bonds between proteins and ligands.
Returns a list of sublists. Each sublist is a series of tuples
of (protein_index_i, ligand_index_j) that represent a hydrogen
bond. Each sublist represents a different type of hydrogen
bond.
Parameters
----------
frag1: tuple
Tuple of (coords, rdkit mol / MolecularFragment
frag2: tuple
Tuple of (coords, rdkit mol / MolecularFragment
pairwise_distances:
Matrix of shape `(N, M)` with pairwise distances between frag1/frag2.
hbond_dist_bins: list[tuple]
List of tuples of hbond distance ranges.
hbond_angle_cutoffs: list[float]
List of angles of deviances allowed for hbonds
Returns
-------
List
A list of hydrogen bond contacts.
"""
hbond_contacts = []
for i, hbond_dist_bin in enumerate(hbond_dist_bins):
hbond_angle_cutoff = hbond_angle_cutoffs[i]
hbond_contacts.append(
compute_hbonds_in_range(frag1, frag2, pairwise_distances,
hbond_dist_bin, hbond_angle_cutoff))
return (hbond_contacts)
def compute_cation_pi(mol1, mol2, charge_tolerance=0.01, **kwargs):
"""Finds aromatic rings in mo1 and cations in mol2 that interact with each other.
Parameters:
-----------
mol1: rdkit.rdchem.Mol
Molecule to look for interacting rings
mol2: rdkit.rdchem.Mol
Molecule to look for interacting cations
charge_tolerance: float
Atom is considered a cation if its formal charge is greater
than 1 - charge_tolerance
**kwargs:
Arguments that are passed to is_cation_pi function
Returns:
--------
mol1_pi: dict
Dictionary that maps atom indices (from mol1) to the number of cations
(in mol2) they interact with
mol2_cation: dict
Dictionary that maps atom indices (from mol2) to the number of aromatic
atoms (in mol1) they interact with
"""
mol1_pi = Counter()
mol2_cation = Counter()
conformer = mol2.GetConformer()
aromatic_atoms = set(atom.GetIdx() for atom in mol1.GetAromaticAtoms())
from rdkit import Chem
rings = [list(r) for r in Chem.GetSymmSSSR(mol1)]
for ring in rings:
# if ring from mol1 is aromatic
if set(ring).issubset(aromatic_atoms):
ring_center = compute_ring_center(mol1, ring)
ring_normal = compute_ring_normal(mol1, ring)
for atom in mol2.GetAtoms():
# ...and atom from mol2 is a cation
if atom.GetFormalCharge() > 1.0 - charge_tolerance:
cation_position = np.array(
conformer.GetAtomPosition(atom.GetIdx()))
# if angle and distance are correct
if is_cation_pi(cation_position, ring_center, ring_normal,
**kwargs):
# count atoms forming a contact
mol1_pi.update(ring)
mol2_cation.update([atom.GetIndex()])
return mol1_pi, mol2_cation
def is_cation_pi(cation_position,
ring_center,
ring_normal,
dist_cutoff=6.5,
angle_cutoff=30.0):
"""Check if a cation and an aromatic ring form contact.
Parameters:
-----------
ring_center: np.ndarray
Positions of ring center. Can be computed with the compute_ring_center
function.
ring_normal: np.ndarray
Normal of ring. Can be computed with the compute_ring_normal function.
dist_cutoff: float
Distance cutoff. Max allowed distance between ring center
and cation (in Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (0deg)
angle between ring normal and vector pointing from ring
center to cation (in degrees).
"""
cation_to_ring_vec = cation_position - ring_center
dist = np.linalg.norm(cation_to_ring_vec)
angle = angle_between(cation_to_ring_vec, ring_normal) * 180. / np.pi
if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and
(dist < dist_cutoff)):
return True
return False
def compute_pi_stack(mol1,
mol2,
pairwise_distances=None,
dist_cutoff=4.4,
angle_cutoff=30.):
"""Find aromatic rings in both molecules that form pi-pi contacts.
For each atom in the contact, count number of atoms in the other molecule
that form this contact.
Pseudocode:
for each aromatic ring in mol1:
for each aromatic ring in mol2:
compute distance between centers
compute angle between normals
if it counts as parallel pi-pi:
count interacting atoms
if it counts as pi-T:
count interacting atoms
Parameters:
-----------
mol1: rdkit.rdchem.Mol
First molecule.
mol2: rdkit.rdchem.Mol
Second molecule.
pairwise_distances: np.ndarray (optional)
Array of pairwise interatomic distances (Angstroms)
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal angle between rings.
Returns:
--------
mol1_pi_t, mol1_pi_parallel, mol2_pi_t, mol2_pi_parallel: dict
Dictionaries mapping atom indices to number of atoms they interact with.
Separate dictionary is created for each type of pi stacking (parallel and
T-shaped) and each molecule (mol1 and mol2).
"""
mol1_pi_parallel = Counter()
mol1_pi_t = Counter()
mol2_pi_parallel = Counter()
mol2_pi_t = Counter()
mol1_aromatic_rings = []
mol2_aromatic_rings = []
from rdkit import Chem
for mol, ring_list in ((mol1, mol1_aromatic_rings), (mol2,
mol2_aromatic_rings)):
aromatic_atoms = {atom.GetIdx() for atom in mol.GetAromaticAtoms()}
for ring in Chem.GetSymmSSSR(mol):
# if ring is aromatic
if set(ring).issubset(aromatic_atoms):
# save its indices, center, and normal
ring_center = compute_ring_center(mol, ring)
ring_normal = compute_ring_normal(mol, ring)
ring_list.append((ring, ring_center, ring_normal))
# remember mol1-mol2 pairs we already counted
counted_pairs_parallel = set()
counted_pairs_t = set()
for prot_ring, prot_ring_center, prot_ring_normal in mol1_aromatic_rings:
for lig_ring, lig_ring_center, lig_ring_normal in mol2_aromatic_rings:
if is_pi_parallel(prot_ring_center,
prot_ring_normal,
lig_ring_center,
lig_ring_normal,
angle_cutoff=angle_cutoff,
dist_cutoff=dist_cutoff):
prot_to_update = set()
lig_to_update = set()
for prot_atom_idx in prot_ring:
for lig_atom_idx in lig_ring:
if (prot_atom_idx,
lig_atom_idx) not in counted_pairs_parallel:
# if this pair is new, count atoms forming a contact
prot_to_update.add(prot_atom_idx)
lig_to_update.add(lig_atom_idx)
counted_pairs_parallel.add(
(prot_atom_idx, lig_atom_idx))
mol1_pi_parallel.update(prot_to_update)
mol2_pi_parallel.update(lig_to_update)
if is_pi_t(prot_ring_center,
prot_ring_normal,
lig_ring_center,
lig_ring_normal,
angle_cutoff=angle_cutoff,
dist_cutoff=dist_cutoff):
prot_to_update = set()
lig_to_update = set()
for prot_atom_idx in prot_ring:
for lig_atom_idx in lig_ring:
if (prot_atom_idx, lig_atom_idx) not in counted_pairs_t:
# if this pair is new, count atoms forming a contact
prot_to_update.add(prot_atom_idx)
lig_to_update.add(lig_atom_idx)
counted_pairs_t.add((prot_atom_idx, lig_atom_idx))
mol1_pi_t.update(prot_to_update)
mol2_pi_t.update(lig_to_update)
return (mol1_pi_t, mol1_pi_parallel, mol2_pi_t, mol2_pi_parallel)
def is_pi_t(ring1_center,
ring1_normal,
ring2_center,
ring2_normal,
dist_cutoff=5.5,
angle_cutoff=30.0):
"""Check if two aromatic rings form a T-shaped pi-pi contact.
Parameters:
-----------
ring1_center, ring2_center: np.ndarray
Positions of centers of the two rings. Can be computed with the
compute_ring_center function.
ring1_normal, ring2_normal: np.ndarray
Normals of the two rings. Can be computed with the compute_ring_normal
function.
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (90deg) angle between
the rings (in degrees).
"""
dist = np.linalg.norm(ring1_center - ring2_center)
angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi
if ((90.0 - angle_cutoff < angle < 90.0 + angle_cutoff) and
dist < dist_cutoff):
return True
return False
def is_pi_parallel(ring1_center: np.ndarray,
ring1_normal: np.ndarray,
ring2_center: np.ndarray,
ring2_normal: np.ndarray,
dist_cutoff: float = 8.0,
angle_cutoff: float = 30.0) -> bool:
"""Check if two aromatic rings form a parallel pi-pi contact.
Parameters
----------
ring1_center, ring2_center: np.ndarray
Positions of centers of the two rings. Can be computed with the
compute_ring_center function.
ring1_normal, ring2_normal: np.ndarray
Normals of the two rings. Can be computed with the compute_ring_normal
function.
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (0deg) angle between
the rings (in degrees).
Returns
-------
bool
True if two aromatic rings form a parallel pi-pi.
"""
dist = np.linalg.norm(ring1_center - ring2_center)
angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi
if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and
dist < dist_cutoff):
return True
return False
def compute_binding_pocket_cation_pi(mol1, mol2, **kwargs):
"""Finds cation-pi interactions between mol1 and mol2.
Parameters:
-----------
mol1: rdkit.rdchem.Mol
Interacting molecules
mol2: rdkit.rdchem.Mol
Interacting molecules
**kwargs:
Arguments that are passed to compute_cation_pi function
Returns:
--------
mol1_cation_pi, mol2_cation_pi: dict
Dictionaries that maps atom indices to the number of cations/aromatic
atoms they interact with
"""
# find interacting rings from mol1 and cations from mol2
mol1_pi, mol2_cation = compute_cation_pi(mol1, mol2, **kwargs)
# find interacting cations from mol1 and rings from mol2
mol2_pi, mol1_cation = compute_cation_pi(mol2, mol1, **kwargs)
# merge counters
mol1_cation_pi = Counter()
mol1_cation_pi.update(mol1_pi)
mol1_cation_pi.update(mol1_cation)
mol2_cation_pi = Counter()
mol2_cation_pi.update(mol2_pi)
mol2_cation_pi.update(mol2_cation)
return mol1_cation_pi, mol2_cation_pi
<file_sep>"""
Contains an abstract base class that supports different ML models.
"""
import os
import shutil
import tempfile
import logging
from typing import List, Optional, Sequence
import numpy as np
from deepchem.data import Dataset
from deepchem.metrics import Metric
from deepchem.trans import Transformer, undo_transforms
from deepchem.utils.evaluate import Evaluator
from deepchem.utils.typing import OneOrMany
logger = logging.getLogger(__name__)
class Model(object):
"""
Abstract base class for DeepChem models.
"""
def __init__(self,
model=None,
model_dir: Optional[str] = None,
**kwargs) -> None:
"""Abstract class for all models.
This is intended only for convenience of subclass implementations
and should not be invoked directly.
Parameters
----------
model: object
Wrapper around ScikitLearn/Keras/Tensorflow model object.
model_dir: str, optional (default None)
Path to directory where model will be stored. If not specified,
model will be stored in a temporary directory.
"""
if self.__class__.__name__ == "Model":
raise ValueError(
"This constructor is for an abstract class and should never be called directly."
"Can only call from subclass constructors.")
self.model_dir_is_temp = False
if model_dir is not None:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
else:
model_dir = tempfile.mkdtemp()
self.model_dir_is_temp = True
self.model_dir = model_dir
self.model = model
self.model_class = model.__class__
def __del__(self):
if 'model_dir_is_temp' in dir(self) and self.model_dir_is_temp:
shutil.rmtree(self.model_dir)
def fit_on_batch(self, X: Sequence, y: Sequence, w: Sequence):
"""Perform a single step of training.
Parameters
----------
X: np.ndarray
the inputs for the batch
y: np.ndarray
the labels for the batch
w: np.ndarray
the weights for the batch
"""
raise NotImplementedError(
"Each model is responsible for its own fit_on_batch method.")
def predict_on_batch(self, X: np.typing.ArrayLike):
"""
Makes predictions on given batch of new data.
Parameters
----------
X: np.ndarray
Features
"""
raise NotImplementedError(
"Each model is responsible for its own predict_on_batch method.")
def reload(self) -> None:
"""
Reload trained model from disk.
"""
raise NotImplementedError(
"Each model is responsible for its own reload method.")
@staticmethod
def get_model_filename(model_dir: str) -> str:
"""
Given model directory, obtain filename for the model itself.
"""
return os.path.join(model_dir, "model.joblib")
@staticmethod
def get_params_filename(model_dir: str) -> str:
"""
Given model directory, obtain filename for the model itself.
"""
return os.path.join(model_dir, "model_params.joblib")
def save(self) -> None:
"""Dispatcher function for saving.
Each subclass is responsible for overriding this method.
"""
raise NotImplementedError
def fit(self, dataset: Dataset):
"""
Fits a model on data in a Dataset object.
Parameters
----------
dataset: Dataset
the Dataset to train on
"""
raise NotImplementedError(
"Each model is responsible for its own fit method.")
def predict(self,
dataset: Dataset,
transformers: List[Transformer] = []) -> OneOrMany[np.ndarray]:
"""
Uses self to make predictions on provided Dataset object.
Parameters
----------
dataset: Dataset
Dataset to make prediction on
transformers: List[Transformer]
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
Returns
-------
np.ndarray
A numpy array of predictions the model produces.
"""
y_preds = []
for (X_batch, _, _,
ids_batch) in dataset.iterbatches(deterministic=True):
n_samples = len(X_batch)
y_pred_batch = self.predict_on_batch(X_batch)
# Discard any padded predictions
y_pred_batch = y_pred_batch[:n_samples]
y_pred_batch = undo_transforms(y_pred_batch, transformers)
y_preds.append(y_pred_batch)
y_pred = np.concatenate(y_preds)
return y_pred
def evaluate(self,
dataset: Dataset,
metrics: List[Metric],
transformers: List[Transformer] = [],
per_task_metrics: bool = False,
use_sample_weights: bool = False,
n_classes: int = 2):
"""
Evaluates the performance of this model on specified dataset.
This function uses `Evaluator` under the hood to perform model
evaluation. As a result, it inherits the same limitations of
`Evaluator`. Namely, that only regression and classification
models can be evaluated in this fashion. For generator models, you
will need to overwrite this method to perform a custom evaluation.
Keyword arguments specified here will be passed to
`Evaluator.compute_model_performance`.
Parameters
----------
dataset: Dataset
Dataset object.
metrics: Metric / List[Metric] / function
The set of metrics provided. This class attempts to do some
intelligent handling of input. If a single `dc.metrics.Metric`
object is provided or a list is provided, it will evaluate
`self.model` on these metrics. If a function is provided, it is
assumed to be a metric function that this method will attempt to
wrap in a `dc.metrics.Metric` object. A metric function must
accept two arguments, `y_true, y_pred` both of which are
`np.ndarray` objects and return a floating point score. The
metric function may also accept a keyword argument
`sample_weight` to account for per-sample weights.
transformers: List[Transformer]
List of `dc.trans.Transformer` objects. These transformations
must have been applied to `dataset` previously. The dataset will
be untransformed for metric evaluation.
per_task_metrics: bool, optional (default False)
If true, return computed metric for each task on multitask dataset.
use_sample_weights: bool, optional (default False)
If set, use per-sample weights `w`.
n_classes: int, optional (default None)
If specified, will use `n_classes` as the number of unique classes
in `self.dataset`. Note that this argument will be ignored for
regression metrics.
Returns
-------
multitask_scores: dict
Dictionary mapping names of metrics to metric scores.
all_task_scores: dict, optional
If `per_task_metrics == True` is passed as a keyword argument,
then returns a second dictionary of scores for each task
separately.
"""
evaluator = Evaluator(self, dataset, transformers)
return evaluator.compute_model_performance(
metrics,
per_task_metrics=per_task_metrics,
use_sample_weights=use_sample_weights,
n_classes=n_classes)
def get_task_type(self) -> str:
"""
Currently models can only be classifiers or regressors.
"""
raise NotImplementedError
def get_num_tasks(self) -> int:
"""
Get number of tasks.
"""
raise NotImplementedError
<file_sep># Dataset Description
This example is based on the DUD-E group; it contained 102 datasets that were designed for the evaluation of methods to predict interactions between proteins and small molecules (Mysinger et al., 2012)
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
arXiv preprint arXiv:1502.02072<file_sep>"""
Test for electron_sampler.py
"""
import numpy as np
from deepchem.utils.electron_sampler import ElectronSampler
def f(x):
# dummy function which can be passed as the parameter f to simultaneous_move and single_move
return 2 * np.log(np.random.uniform(low=0, high=1.0, size=np.shape(x)[0]))
def test_mean():
distribution = ElectronSampler(np.array([[1, 1, 3], [3, 2, 3]]), f)
x1 = np.array([[[[1, 2, 3]]], [[[4, 5, 6]]]])
mean = distribution.harmonic_mean(x1)
assert (mean == np.array([[[[1.3333333333333333]]],
[[[4.988597077109626]]]])).all()
def test_log_prob():
x1 = np.array([[[[1, 2, 3]]], [[[4, 5, 6]]]])
x2 = np.array([[[[10, 6, 4]]], [[[2, 1, 7]]]])
sigma = np.full(np.shape(x1), 1)
distribution = ElectronSampler(np.array([[1, 1, 3], [3, 2, 3]]), f)
move_probability = distribution.log_prob_gaussian(x1, x2, sigma)
assert (move_probability == np.array([-49, -10.5])).all()
def test_steps():
# test for gauss_initialize_position
distribution = ElectronSampler(np.array([[1, 1, 3], [3, 2, 3]]),
f,
batch_no=2,
steps=1000)
distribution.gauss_initialize_position(np.array([[1], [2]]))
assert ((distribution.x -
np.array([[[[1, 1, 3]], [[3, 2, 3]], [[3, 2, 3]]],
[[[1, 1, 3]], [[3, 2, 3]], [[3, 2, 3]]]])) != 0).any()
# testing symmetric simultaneous_move
x1 = distribution.x
distribution.move()
assert ((distribution.x - x1) != 0).all()
# testing asymmetric simultaneous_move
distribution = ElectronSampler(np.array([[1, 1, 3], [3, 2, 3]]),
f,
batch_no=2,
steps=1000,
symmetric=False)
distribution.gauss_initialize_position(np.array([[1], [2]]))
x1 = distribution.x
distribution.move(asymmetric_func=distribution.harmonic_mean)
assert ((distribution.x - x1) != 0).all()
assert np.shape(distribution.sampled_electrons) == (2000, 3, 1, 3)
# testing symmetric single_move
distribution = ElectronSampler(np.array([[1, 1, 3], [3, 2, 3]]),
f,
batch_no=2,
steps=1000,
simultaneous=False)
distribution.gauss_initialize_position(np.array([[1], [2]]))
x1 = distribution.x
distribution.move(index=1)
assert ((distribution.x[:, 1, :, :] - x1[:, 1, :, :]) != 0).all()
assert ((distribution.x[:, 2, :, :] - x1[:, 2, :, :]) == 0).all()
assert np.shape(distribution.sampled_electrons) == (2000, 3, 1, 3)
# testing asymmetric single_move
distribution = ElectronSampler(np.array([[1, 1, 3], [3, 2, 3]]),
f,
batch_no=2,
steps=1000,
simultaneous=False,
symmetric=False)
distribution.gauss_initialize_position(np.array([[1], [2]]))
x1 = distribution.x
distribution.move(asymmetric_func=distribution.harmonic_mean, index=1)
assert ((distribution.x[:, 1, :, :] - x1[:, 1, :, :]) != 0).all()
assert ((distribution.x[:, 2, :, :] - x1[:, 2, :, :]) == 0).all()
assert np.shape(distribution.sampled_electrons) == (2000, 3, 1, 3)
<file_sep># Data Loading Examples
The examples in this directory highlight a number of ways to
load datasets into DeepChem for downstream analysis:
- `pandas_csv.py` shows how to directly load a dataset from a CSV file without using a `DataLoader`.
- `sdf_load.py` shows how to load a dataset from a sdf file using `SDFLoader`.
<file_sep>import deepchem as dc
from deepchem.molnet import load_delaney
from deepchem.trans.transformers import FeaturizationTransformer
def test_featurization_transformer():
fp_size = 2048
tasks, all_dataset, transformers = load_delaney('Raw')
train = all_dataset[0]
transformer = FeaturizationTransformer(
dataset=train, featurizer=dc.feat.CircularFingerprint(size=fp_size))
new_train = transformer.transform(train)
assert new_train.y.shape == train.y.shape
assert new_train.X.shape[-1] == fp_size
<file_sep>Docking
=======
Thanks to advances in biophysics, we are often able to find the
structure of proteins from experimental techniques like Cryo-EM or
X-ray crystallography. These structures can be powerful aides in
designing small molecules. The technique of Molecular docking performs
geometric calculations to find a "binding pose" with the small
molecule interacting with the protein in question in a suitable
binding pocket (that is, a region on the protein which has a groove in
which the small molecule can rest). For more information about
docking, check out the Autodock Vina paper:
Trott, Oleg, and <NAME>. "AutoDock Vina: improving the speed and accuracy of docking with a new scoring function, efficient optimization, and multithreading." Journal of computational chemistry 31.2 (2010): 455-461.
Binding Pocket Discovery
------------------------
DeepChem has some utilities to help find binding pockets on proteins
automatically. For now, these utilities are simple, but we will
improve these in future versions of DeepChem.
.. autoclass:: deepchem.dock.binding_pocket.BindingPocketFinder
:members:
.. autoclass:: deepchem.dock.binding_pocket.ConvexHullPocketFinder
:members:
Pose Generation
---------------
Pose generation is the task of finding a "pose", that is a geometric
configuration of a small molecule interacting with a protein. Pose
generation is a complex process, so for now DeepChem relies on
external software to perform pose generation. This software is invoked
and installed under the hood.
.. autoclass:: deepchem.dock.pose_generation.PoseGenerator
:members:
.. autoclass:: deepchem.dock.pose_generation.VinaPoseGenerator
:members:
.. autoclass:: deepchem.dock.pose_generation.GninaPoseGenerator
:members:
Docking
-------
The :code:`dc.dock.docking` module provides a generic docking
implementation that depends on provide pose generation and pose
scoring utilities to perform docking. This implementation is generic.
.. autoclass:: deepchem.dock.docking.Docker
:members:
Pose Scoring
------------
This module contains some utilities for computing docking scoring
functions directly in Python. For now, support for custom pose scoring
is limited.
.. autofunction:: deepchem.dock.pose_scoring.pairwise_distances
.. autofunction:: deepchem.dock.pose_scoring.cutoff_filter
.. autofunction:: deepchem.dock.pose_scoring.vina_nonlinearity
.. autofunction:: deepchem.dock.pose_scoring.vina_repulsion
.. autofunction:: deepchem.dock.pose_scoring.vina_hydrophobic
.. autofunction:: deepchem.dock.pose_scoring.vina_hbond
.. autofunction:: deepchem.dock.pose_scoring.vina_gaussian_first
.. autofunction:: deepchem.dock.pose_scoring.vina_gaussian_second
.. autofunction:: deepchem.dock.pose_scoring.vina_energy_term
<file_sep># flake8: noqa
from deepchem.feat.vocabulary_builders.grover_vocab import (
GroverAtomVocabularyBuilder, GroverBondVocabularyBuilder,
GroverAtomVocabTokenizer, GroverBondVocabTokenizer)
from deepchem.feat.vocabulary_builders.hf_vocab import HuggingFaceVocabularyBuilder
<file_sep># Dataset overview
The MUV group data contains 17 challenging datasets specifically designed to avoid common pitfalls in virtual screening (Rohrer & Baumann, 2009)
Ref: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
arXiv preprint arXiv:1502.02072<file_sep>"""
Script that trains sklearn models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
from deepchem.molnet import load_tox21
from sklearn.ensemble import RandomForestClassifier
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = load_tox21()
(train_dataset, valid_dataset, test_dataset) = tox21_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tox21_tasks, model_builder)
# Fit trained model
print("About to fit model")
model.fit(train_dataset)
model.save()
print("About to evaluate model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Testing reload.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import math
import logging
import unittest
import deepchem as dc
logger = logging.getLogger(__name__)
class TestReload(unittest.TestCase):
"""
Test reload for datasets.
"""
def _run_muv_experiment(self, dataset_file, reload=False):
"""Loads or reloads a small version of MUV dataset."""
# Load MUV dataset
logger.info("About to featurize compounds")
featurizer = dc.feat.CircularFingerprint(size=1024)
raw_dataset = dc.utils.data_utils.load_from_disk(dataset_file)
MUV_tasks = [
'MUV-692', 'MUV-689', 'MUV-846', 'MUV-859', 'MUV-644', 'MUV-548',
'MUV-852', 'MUV-600', 'MUV-810', 'MUV-712', 'MUV-737', 'MUV-858',
'MUV-713', 'MUV-733', 'MUV-652', 'MUV-466', 'MUV-832'
]
loader = dc.data.CSVLoader(tasks=MUV_tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file)
assert len(dataset) == len(raw_dataset)
logger.info("About to split compounds into train/valid/test")
splitter = dc.splits.ScaffoldSplitter()
frac_train, frac_valid, frac_test = .8, .1, .1
train_dataset, valid_dataset, test_dataset = \
splitter.train_valid_test_split(
dataset, log_every_n=1000, frac_train=frac_train,
frac_test=frac_test, frac_valid=frac_valid)
# Do an approximate comparison since splits are sometimes slightly off from
# the exact fraction.
assert math.isclose(len(train_dataset),
frac_train * len(dataset),
rel_tol=1e-3)
assert math.isclose(len(valid_dataset),
frac_valid * len(dataset),
rel_tol=1e-3)
assert math.isclose(len(test_dataset),
frac_test * len(dataset),
rel_tol=1e-3)
# TODO(rbharath): Transformers don't play nice with reload! Namely,
# reloading will cause the transform to be reapplied. This is undesirable in
# almost all cases. Need to understand a method to fix this.
transformers = [dc.trans.BalancingTransformer(dataset=train_dataset)]
logger.info("Transforming datasets")
for dataset in [train_dataset, valid_dataset, test_dataset]:
for transformer in transformers:
dataset = transformer.transform(dataset)
return (len(train_dataset), len(valid_dataset), len(test_dataset))
def test_reload_after_gen(self):
"""Check num samples for loaded and reloaded datasets is equal."""
reload = False
current_dir = os.path.dirname(os.path.abspath(__file__))
dataset_file = os.path.join(current_dir,
"../../../datasets/mini_muv.csv.gz")
logger.info("Running experiment for first time without reload.")
(len_train, len_valid,
len_test) = self._run_muv_experiment(dataset_file, reload)
logger.info("Running experiment for second time with reload.")
reload = True
(len_reload_train, len_reload_valid,
len_reload_test) = (self._run_muv_experiment(dataset_file, reload))
assert len_train == len_reload_train
assert len_valid == len_reload_valid
assert len_test == len_reload_valid
def test_reload_twice(self):
"""Check ability to repeatedly run experiments with reload set True."""
reload = True
current_dir = os.path.dirname(os.path.abspath(__file__))
dataset_file = os.path.join(current_dir,
"../../../datasets/mini_muv.csv.gz")
logger.info("Running experiment for first time with reload.")
(len_train, len_valid,
len_test) = self._run_muv_experiment(dataset_file, reload)
logger.info("Running experiment for second time with reload.")
(len_reload_train, len_reload_valid,
len_reload_test) = (self._run_muv_experiment(dataset_file, reload))
assert len_train == len_reload_train
assert len_valid == len_reload_valid
assert len_test == len_reload_valid
<file_sep># Toxcast Examples
ToxCast is an extended data collection from the same
initiative as Tox21, providing toxicology data for a large
library of compounds based on in vitro high-throughput
screening. The processed collection includes qualitative
results of over 600 experiments on 8k compounds.
The source data file contains a csv table, in which columns
below are used:
- "smiles": SMILES representation of the molecular structure
- "ACEA_T47D_80hr_Negative" ~ "Tanguay_ZF_120hpf_YSE_up": Bioassays results. Please refer to the section "high-throughput assay information" at https://www.epa.gov/chemical-research/toxicity-forecaster-toxcasttm-data for details.
The source paper is
<NAME>., et al. "ToxCast chemical landscape: paving the road to 21st century toxicology." Chemical research in toxicology 29.8 (2016): 1225-1251.
In this example, we train a Random Forest model on the Toxcast dataset.
<file_sep>"""
ChEMBL dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from deepchem.molnet.load_function.chembl_tasks import chembl_tasks
from typing import List, Optional, Tuple, Union
CHEMBL_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/chembl_%s.csv.gz"
class _ChemblLoader(_MolnetLoader):
def __init__(self, *args, set: str, **kwargs):
super(_ChemblLoader, self).__init__(*args, **kwargs)
self.set = set
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir,
"chembl_%s.csv.gz" % self.set)
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=CHEMBL_URL % self.set,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_chembl(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
set: str = "5thresh",
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load the ChEMBL dataset.
This dataset is based on release 22.1 of the data from https://www.ebi.ac.uk/chembl/.
Two subsets of the data are available, depending on the "set" argument. "sparse"
is a large dataset with 244,245 compounds. As the name suggests, the data is
extremely sparse, with most compounds having activity data for only one target.
"5thresh" is a much smaller set (23,871 compounds) that includes only compounds
with activity data for at least five targets.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
set: str
the subset to load, either "sparse" or "5thresh"
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
if set not in ("5thresh", "sparse"):
raise ValueError("set must be either '5thresh' or 'sparse'")
loader = _ChemblLoader(featurizer,
splitter,
transformers,
chembl_tasks,
data_dir,
save_dir,
set=set,
**kwargs)
return loader.load_dataset('chembl-%s' % set, reload)
<file_sep>"""
Tests for Gaussian Process Hyperparameter Optimization.
These tests fails every so often. I think it's when the Gaussian
process optimizer doesn't find an optimal point. This is still a
valuable test suite so leaving it in despite the flakiness.
"""
import numpy as np
import sklearn
import sklearn.ensemble
import deepchem as dc
import unittest
import pytest
import tempfile
from flaky import flaky
class TestGaussianHyperparamOpt(unittest.TestCase):
"""
Test Gaussian Hyperparameter Optimization.
"""
def setUp(self):
"""Set up common resources."""
def rf_model_builder(**model_params):
rf_params = {
k: v for (k, v) in model_params.items() if k != 'model_dir'
}
model_dir = model_params['model_dir']
sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params)
return dc.models.SklearnModel(sklearn_model, model_dir)
self.rf_model_builder = rf_model_builder
self.train_dataset = dc.data.NumpyDataset(X=np.random.rand(50, 5),
y=np.random.rand(50, 1))
self.valid_dataset = dc.data.NumpyDataset(X=np.random.rand(20, 5),
y=np.random.rand(20, 1))
def test_rf_example(self):
"""Test a simple example of optimizing a RF model with a gaussian process."""
optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder,
max_iter=2)
params_dict = {"n_estimators": 10}
transformers = []
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict, self.train_dataset, self.valid_dataset, metric)
valid_score = best_model.evaluate(self.valid_dataset, [metric],
transformers)
assert valid_score["pearson_r2_score"] == max(all_results.values())
assert valid_score["pearson_r2_score"] > 0
def test_rf_example_min(self):
"""Test a simple example of optimizing a RF model with a gaussian process looking for minimum score."""
optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder,
max_iter=2)
params_dict = {"n_estimators": 10}
transformers = []
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
self.train_dataset,
self.valid_dataset,
metric,
transformers,
use_max=False)
valid_score = best_model.evaluate(self.valid_dataset, [metric],
transformers)
assert valid_score["pearson_r2_score"] == min(all_results.values())
assert valid_score["pearson_r2_score"] > 0
def test_rf_with_logdir(self):
"""Test that using a logdir can work correctly."""
optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder,
max_iter=2)
params_dict = {"n_estimators": 10}
transformers = []
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
with tempfile.TemporaryDirectory() as tmpdirname:
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
self.train_dataset,
self.valid_dataset,
metric,
transformers,
logdir=tmpdirname)
valid_score = best_model.evaluate(self.valid_dataset, [metric],
transformers)
assert valid_score["pearson_r2_score"] == max(all_results.values())
assert valid_score["pearson_r2_score"] > 0
@flaky
@pytest.mark.torch
def test_multitask_example(self):
"""Test a simple example of optimizing a multitask model with a gaussian process search."""
# Generate dummy dataset
np.random.seed(123)
train_dataset = dc.data.NumpyDataset(np.random.rand(10, 3),
np.zeros((10, 2)), np.ones(
(10, 2)), np.arange(10))
valid_dataset = dc.data.NumpyDataset(np.random.rand(5, 3),
np.zeros((5, 2)), np.ones((5, 2)),
np.arange(5))
transformers = []
optimizer = dc.hyper.GaussianProcessHyperparamOpt(
lambda **params: dc.models.MultitaskRegressor(
n_tasks=2,
n_features=3,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
learning_rate=0.003,
**params),
max_iter=1)
params_dict = {"batch_size": 10}
metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean)
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
train_dataset,
valid_dataset,
metric,
transformers,
use_max=False)
valid_score = best_model.evaluate(valid_dataset, [metric], transformers)
assert valid_score["mean-mean_squared_error"] == min(
all_results.values())
assert valid_score["mean-mean_squared_error"] > 0
@flaky
@pytest.mark.torch
def test_multitask_example_different_search_range(self):
"""Test a simple example of optimizing a multitask model with a gaussian process search with per-parameter search range."""
# Generate dummy dataset
np.random.seed(123)
train_dataset = dc.data.NumpyDataset(np.random.rand(10, 3),
np.zeros((10, 2)), np.ones(
(10, 2)), np.arange(10))
valid_dataset = dc.data.NumpyDataset(np.random.rand(5, 3),
np.zeros((5, 2)), np.ones((5, 2)),
np.arange(5))
transformers = []
# These are per-example multiplier
search_range = {"learning_rate": 10, "batch_size": 4}
optimizer = dc.hyper.GaussianProcessHyperparamOpt(
lambda **params: dc.models.MultitaskRegressor(
n_tasks=2,
n_features=3,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
**params),
search_range=search_range,
max_iter=2)
params_dict = {"learning_rate": 0.003, "batch_size": 10}
metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean)
with tempfile.TemporaryDirectory() as tmpdirname:
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
train_dataset,
valid_dataset,
metric,
transformers,
logdir=tmpdirname,
use_max=False)
valid_score = best_model.evaluate(valid_dataset, [metric],
transformers)
# Test that 2 parameters were optimized
for hp_str in all_results.keys():
# Recall that the key is a string of the form _batch_size_39_learning_rate_0.01 for example
assert "batch_size" in hp_str
assert "learning_rate" in hp_str
assert valid_score["mean-mean_squared_error"] == min(
all_results.values())
assert valid_score["mean-mean_squared_error"] > 0
@flaky
@pytest.mark.torch
def test_multitask_example_nb_epoch(self):
"""Test a simple example of optimizing a multitask model with a gaussian process search with a different number of training epochs."""
# Generate dummy dataset
np.random.seed(123)
train_dataset = dc.data.NumpyDataset(np.random.rand(10, 3),
np.zeros((10, 2)), np.ones(
(10, 2)), np.arange(10))
valid_dataset = dc.data.NumpyDataset(np.random.rand(5, 3),
np.zeros((5, 2)), np.ones((5, 2)),
np.arange(5))
transformers = []
optimizer = dc.hyper.GaussianProcessHyperparamOpt(
lambda **params: dc.models.MultitaskRegressor(
n_tasks=2,
n_features=3,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
learning_rate=0.003,
**params),
max_iter=1)
params_dict = {"batch_size": 10}
metric = dc.metrics.Metric(dc.metrics.mean_squared_error,
task_averager=np.mean)
best_model, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict,
train_dataset,
valid_dataset,
metric,
transformers,
nb_epoch=3,
use_max=False)
valid_score = best_model.evaluate(valid_dataset, [metric], transformers)
assert valid_score["mean-mean_squared_error"] == min(
all_results.values())
assert valid_score["mean-mean_squared_error"] > 0
<file_sep>import logging
from typing import Any, Callable, Optional
import numpy as np
from numpy.typing import ArrayLike
logger = logging.getLogger(__name__)
def threshold_predictions(y: np.ndarray,
threshold: Optional[float] = None) -> np.ndarray:
"""Threshold predictions from classification model.
Parameters
----------
y: np.ndarray
Must have shape `(N, n_classes)` and be class probabilities.
threshold: float, default None
The threshold probability for the positive class. Note that this
threshold will only be applied for binary classifiers (where
`n_classes==2`). If specified for multiclass problems, or if
`threshold` is None, the threshold is ignored and argmax(y) is
returned.
Returns
-------
y_out: np.ndarray
A numpy array of shape `(N,)` with class predictions as integers ranging from 0
to `n_classes-1`.
"""
if not isinstance(y, np.ndarray) or not len(y.shape) == 2:
raise ValueError("y must be a ndarray of shape (N, n_classes)")
N = y.shape[0]
n_classes = y.shape[1]
if n_classes != 2 or threshold is None:
return np.argmax(y, axis=1)
else:
return np.where(y[:, 1] >= threshold, np.ones(N), np.zeros(N))
def normalize_weight_shape(w: Optional[np.ndarray], n_samples: int,
n_tasks: int) -> np.ndarray:
"""A utility function to correct the shape of the weight array.
This utility function is used to normalize the shapes of a given
weight array.
Parameters
----------
w: np.ndarray
`w` can be `None` or a scalar or a `np.ndarray` of shape
`(n_samples,)` or of shape `(n_samples, n_tasks)`. If `w` is a
scalar, it's assumed to be the same weight for all samples/tasks.
n_samples: int
The number of samples in the dataset. If `w` is not None, we should
have `n_samples = w.shape[0]` if `w` is a ndarray
n_tasks: int
The number of tasks. If `w` is 2d ndarray, then we should have
`w.shape[1] == n_tasks`.
Examples
--------
>>> import numpy as np
>>> w_out = normalize_weight_shape(None, n_samples=10, n_tasks=1)
>>> (w_out == np.ones((10, 1))).all()
True
Returns
-------
w_out: np.ndarray
Array of shape `(n_samples, n_tasks)`
"""
if w is None:
w_out = np.ones((n_samples, n_tasks))
elif isinstance(w, np.ndarray):
if len(w.shape) == 0:
# scalar case
w_out = w * np.ones((n_samples, n_tasks))
elif len(w.shape) == 1:
if len(w) != n_samples:
raise ValueError("Length of w isn't n_samples")
# per-example case
# This is a little arcane but it repeats w across tasks.
w_out = np.tile(w, (n_tasks, 1)).T
elif len(w.shape) == 2:
if w.shape == (n_samples, 1):
# If w.shape == (n_samples, 1) handle it as 1D
w = np.squeeze(w, axis=1)
w_out = np.tile(w, (n_tasks, 1)).T
elif w.shape != (n_samples, n_tasks):
raise ValueError(
"Shape for w doens't match (n_samples, n_tasks)")
else:
# w.shape == (n_samples, n_tasks)
w_out = w
else:
raise ValueError("w must be of dimension 1, 2, or 3")
else:
# scalar case
w_out = w * np.ones((n_samples, n_tasks))
return w_out
def normalize_labels_shape(y: np.ndarray,
mode: Optional[str] = None,
n_tasks: Optional[int] = None,
n_classes: Optional[int] = None) -> np.ndarray:
"""A utility function to correct the shape of the labels.
Parameters
----------
y: np.ndarray
`y` is an array of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, 1)`.
mode: str, default None
If `mode` is "classification" or "regression", attempts to apply
data transformations.
n_tasks: int, default None
The number of tasks this class is expected to handle.
n_classes: int, default None
If specified use this as the number of classes. Else will try to
impute it as `n_classes = max(y) + 1` for arrays and as
`n_classes=2` for the case of scalars. Note this parameter only
has value if `mode=="classification"`
Returns
-------
y_out: np.ndarray
If `mode=="classification"`, `y_out` is an array of shape `(N,
n_tasks, n_classes)`. If `mode=="regression"`, `y_out` is an array
of shape `(N, n_tasks)`.
"""
if n_tasks is None:
raise ValueError("n_tasks must be specified")
if mode not in ["classification", "regression"]:
raise ValueError("mode must be either classification or regression.")
if mode == "classification" and n_classes is None:
raise ValueError("n_classes must be specified")
if not isinstance(y, np.ndarray):
raise ValueError("y must be a np.ndarray")
# Handle n_classes/n_task shape ambiguity
if mode == "classification" and len(y.shape) == 2:
if n_classes == y.shape[1] and n_tasks != 1 and n_classes != n_tasks:
raise ValueError("Shape of input doesn't match expected n_tasks=1")
elif n_classes == y.shape[1] and n_tasks == 1:
# Add in task dimension
y = np.expand_dims(y, 1)
if len(y.shape) == 1 and n_tasks != 1:
raise ValueError("n_tasks must equal 1 for a 1D set of labels.")
if (len(y.shape) == 2 or len(y.shape) == 3) and n_tasks != y.shape[1]:
raise ValueError("Shape of input doesn't match expected n_tasks=%d" %
n_tasks)
if len(y.shape) >= 4:
raise ValueError(
"Labels y must be a float scalar or a ndarray of shape `(N,)` or "
"`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems and "
"of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, 1)` for classification problems"
)
if len(y.shape) == 1:
# Insert a task dimension (we know n_tasks=1 from above0
y_out = np.expand_dims(y, 1)
elif len(y.shape) == 2:
y_out = y
elif len(y.shape) == 3:
# If 3D and last dimension isn't 1, assume this is one-hot encoded and return as-is.
if y.shape[-1] != 1:
return y
y_out = np.squeeze(y, axis=-1)
# Handle classification. We need to convert labels into one-hot representation.
if mode == "classification":
all_y_task = []
for task in range(n_tasks):
y_task = y_out[:, task]
# check whether n_classes is int or not
assert isinstance(n_classes, int)
y_hot = to_one_hot(y_task, n_classes=n_classes)
y_hot = np.expand_dims(y_hot, 1)
all_y_task.append(y_hot)
y_out = np.concatenate(all_y_task, axis=1)
return y_out
def normalize_prediction_shape(y: np.ndarray,
mode: Optional[str] = None,
n_tasks: Optional[int] = None,
n_classes: Optional[int] = None):
"""A utility function to correct the shape of provided predictions.
The metric computation classes expect that inputs for classification
have the uniform shape `(N, n_tasks, n_classes)` and inputs for
regression have the uniform shape `(N, n_tasks)`. This function
normalizes the provided input array to have the desired shape.
Examples
--------
>>> import numpy as np
>>> y = np.random.rand(10)
>>> y_out = normalize_prediction_shape(y, "regression", n_tasks=1)
>>> y_out.shape
(10, 1)
Parameters
----------
y: np.ndarray
If `mode=="classification"`, `y` is an array of shape `(N,)` or
`(N, n_tasks)` or `(N, n_tasks, n_classes)`. If
`mode=="regression"`, `y` is an array of shape `(N,)` or `(N,
n_tasks)`or `(N, n_tasks, 1)`.
mode: str, default None
If `mode` is "classification" or "regression", attempts to apply
data transformations.
n_tasks: int, default None
The number of tasks this class is expected to handle.
n_classes: int, default None
If specified use this as the number of classes. Else will try to
impute it as `n_classes = max(y) + 1` for arrays and as
`n_classes=2` for the case of scalars. Note this parameter only
has value if `mode=="classification"`
Returns
-------
y_out: np.ndarray
If `mode=="classification"`, `y_out` is an array of shape `(N,
n_tasks, n_classes)`. If `mode=="regression"`, `y_out` is an array
of shape `(N, n_tasks)`.
"""
if n_tasks is None:
raise ValueError("n_tasks must be specified")
if mode == "classification" and n_classes is None:
raise ValueError("n_classes must be specified")
if not isinstance(y, np.ndarray):
raise ValueError("y must be a np.ndarray")
# Handle n_classes/n_task shape ambiguity
if mode == "classification" and len(y.shape) == 2:
if n_classes == y.shape[1] and n_tasks != 1 and n_classes != n_tasks:
raise ValueError("Shape of input doesn't match expected n_tasks=1")
elif n_classes == y.shape[1] and n_tasks == 1:
# Add in task dimension
y = np.expand_dims(y, 1)
if (len(y.shape) == 2 or len(y.shape) == 3) and n_tasks != y.shape[1]:
raise ValueError("Shape of input doesn't match expected n_tasks=%d" %
n_tasks)
if len(y.shape) >= 4:
raise ValueError(
"Predictions y must be a float scalar or a ndarray of shape `(N,)` or "
"`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems and "
"of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, n_classes)` for classification problems"
)
if mode == "classification":
if n_classes is None:
raise ValueError("n_classes must be specified.")
if len(y.shape) == 1 or len(y.shape) == 2:
# Make everything 2D so easy to handle
if len(y.shape) == 1:
y = y[:, np.newaxis]
# Handle each task separately.
all_y_task = []
for task in range(n_tasks):
y_task = y[:, task]
if len(np.unique(y_task)) > n_classes:
# Handle continuous class probabilites of positive class for binary
if n_classes > 2:
raise ValueError(
"Cannot handle continuous probabilities for multiclass problems."
"Need a per-class probability")
# Fill in class 0 probabilities
y_task = np.array([1 - y_task, y_task]).T
# Add a task dimension to concatenate on
y_task = np.expand_dims(y_task, 1)
all_y_task.append(y_task)
else:
# Handle binary labels
# make y_hot of shape (N, n_classes)
y_task = to_one_hot(y_task, n_classes=n_classes)
# Add a task dimension to concatenate on
y_task = np.expand_dims(y_task, 1)
all_y_task.append(y_task)
y_out = np.concatenate(all_y_task, axis=1)
elif len(y.shape) == 3:
y_out = y
elif mode == "regression":
if len(y.shape) == 1:
# Insert a task dimension
y_out = np.expand_dims(y, 1)
elif len(y.shape) == 2:
y_out = y
elif len(y.shape) == 3:
if y.shape[-1] != 1:
raise ValueError(
"y must be a float scalar or a ndarray of shape `(N,)` or "
"`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems."
)
y_out = np.squeeze(y, axis=-1)
else:
raise ValueError("mode must be either classification or regression.")
return y_out
def handle_classification_mode(
y: np.ndarray,
classification_handling_mode: Optional[str],
threshold_value: Optional[float] = None) -> np.ndarray:
"""Handle classification mode.
Transform predictions so that they have the correct classification mode.
Parameters
----------
y: np.ndarray
Must be of shape `(N, n_tasks, n_classes)`
classification_handling_mode: str, default None
DeepChem models by default predict class probabilities for
classification problems. This means that for a given singletask
prediction, after shape normalization, the DeepChem prediction will be a
numpy array of shape `(N, n_classes)` with class probabilities.
`classification_handling_mode` is a string that instructs this method
how to handle transforming these probabilities. It can take on the
following values:
- None: default value. Pass in `y_pred` directy into `self.metric`.
- "threshold": Use `threshold_predictions` to threshold `y_pred`. Use
`threshold_value` as the desired threshold.
- "threshold-one-hot": Use `threshold_predictions` to threshold `y_pred`
using `threshold_values`, then apply `to_one_hot` to output.
threshold_value: float, default None
If set, and `classification_handling_mode` is "threshold" or
"threshold-one-hot" apply a thresholding operation to values with this
threshold. This option isj only sensible on binary classification tasks.
If float, this will be applied as a binary classification value.
Returns
-------
y_out: np.ndarray
If `classification_handling_mode` is "direct", then of shape `(N, n_tasks, n_classes)`.
If `classification_handling_mode` is "threshold", then of shape `(N, n_tasks)`.
If `classification_handling_mode is "threshold-one-hot", then of shape `(N, n_tasks, n_classes)"
"""
if len(y.shape) != 3:
raise ValueError("y must be of shape (N, n_tasks, n_classes)")
N, n_tasks, n_classes = y.shape
if classification_handling_mode == "direct":
return y
elif classification_handling_mode == "threshold":
thresholded = []
for task in range(n_tasks):
task_array = y[:, task, :]
# Now of shape (N,)
task_array = threshold_predictions(task_array, threshold_value)
# Now of shape (N, 1)
task_array = np.expand_dims(task_array, 1)
thresholded.append(task_array)
# Returns shape (N, n_tasks)
return np.concatenate(thresholded, axis=1)
elif classification_handling_mode == "threshold-one-hot":
thresholded = []
for task in range(n_tasks):
task_array = y[:, task, :]
# Now of shape (N,)
task_array = threshold_predictions(task_array, threshold_value)
# Now of shape (N, n_classes)
task_array = to_one_hot(task_array, n_classes=n_classes)
# Now of shape (N, 1, n_classes)
task_array = np.expand_dims(task_array, 1)
thresholded.append(task_array)
# Returns shape (N, n_tasks, n_classes)
return np.concatenate(thresholded, axis=1)
else:
raise ValueError(
"classification_handling_mode must be one of direct, threshold, threshold-one-hot"
)
def to_one_hot(y: np.ndarray, n_classes: int = 2) -> np.ndarray:
"""Transforms label vector into one-hot encoding.
Turns y into vector of shape `(N, n_classes)` with a one-hot
encoding. Assumes that `y` takes values from `0` to `n_classes - 1`.
Parameters
----------
y: np.ndarray
A vector of shape `(N,)` or `(N, 1)`
n_classes: int, default 2
If specified use this as the number of classes. Else will try to
impute it as `n_classes = max(y) + 1` for arrays and as
`n_classes=2` for the case of scalars. Note this parameter only
has value if `mode=="classification"`
Returns
-------
np.ndarray
A numpy array of shape `(N, n_classes)`.
"""
if len(y.shape) > 2:
raise ValueError("y must be a vector of shape (N,) or (N, 1)")
if len(y.shape) == 2 and y.shape[1] != 1:
raise ValueError("y must be a vector of shape (N,) or (N, 1)")
if len(np.unique(y)) > n_classes:
raise ValueError("y has more than n_class unique elements.")
N = np.shape(y)[0]
y_hot = np.zeros((N, n_classes))
y_hot[np.arange(N), y.astype(np.int64)] = 1
return y_hot
def from_one_hot(y: np.ndarray, axis: int = 1) -> np.ndarray:
"""Transforms label vector from one-hot encoding.
Parameters
----------
y: np.ndarray
A vector of shape `(n_samples, num_classes)`
axis: int, optional (default 1)
The axis with one-hot encodings to reduce on.
Returns
-------
np.ndarray
A numpy array of shape `(n_samples,)`
"""
return np.argmax(y, axis=axis)
class Metric(object):
"""Wrapper class for computing user-defined metrics.
The `Metric` class provides a wrapper for standardizing the API
around different classes of metrics that may be useful for DeepChem
models. The implementation provides a few non-standard conveniences
such as built-in support for multitask and multiclass metrics.
There are a variety of different metrics this class aims to support.
Metrics for classification and regression that assume that values to
compare are scalars are supported.
At present, this class doesn't support metric computation on models
which don't present scalar outputs. For example, if you have a
generative model which predicts images or molecules, you will need
to write a custom evaluation and metric setup.
"""
def __init__(self,
metric: Callable[..., float],
task_averager: Optional[Callable[..., Any]] = None,
name: Optional[str] = None,
threshold: Optional[float] = None,
mode: Optional[str] = None,
n_tasks: Optional[int] = None,
classification_handling_mode: Optional[str] = None,
threshold_value: Optional[float] = None):
"""
Parameters
----------
metric: function
Function that takes args y_true, y_pred (in that order) and
computes desired score. If sample weights are to be considered,
`metric` may take in an additional keyword argument
`sample_weight`.
task_averager: function, default None
If not None, should be a function that averages metrics across
tasks.
name: str, default None
Name of this metric
threshold: float, default None (DEPRECATED)
Used for binary metrics and is the threshold for the positive
class.
mode: str, default None
Should usually be "classification" or "regression."
n_tasks: int, default None
The number of tasks this class is expected to handle.
classification_handling_mode: str, default None
DeepChem models by default predict class probabilities for
classification problems. This means that for a given singletask
prediction, after shape normalization, the DeepChem labels and prediction will be
numpy arrays of shape `(n_samples, n_tasks, n_classes)` with class probabilities.
`classification_handling_mode` is a string that instructs this method
how to handle transforming these probabilities. It can take on the
following values:
- "direct": Pass `y_true` and `y_pred` directy into `self.metric`.
- "threshold": Use `threshold_predictions` to threshold `y_true` and `y_pred`.
Use `threshold_value` as the desired threshold. This converts them into
arrays of shape `(n_samples, n_tasks)`, where each element is a class index.
- "threshold-one-hot": Use `threshold_predictions` to threshold `y_true` and `y_pred`
using `threshold_values`, then apply `to_one_hot` to output.
- None: Select a mode automatically based on the metric.
threshold_value: float, default None
If set, and `classification_handling_mode` is "threshold" or
"threshold-one-hot", apply a thresholding operation to values with this
threshold. This option is only sensible on binary classification tasks.
For multiclass problems, or if `threshold_value` is None, argmax() is used
to select the highest probability class for each task.
"""
if threshold is not None:
logger.warn(
"threshold is deprecated and will be removed in a future version of DeepChem."
"Set threshold in compute_metric instead.")
self.metric = metric
if task_averager is None:
self.task_averager = np.mean
else:
self.task_averager = task_averager
if name is None:
if task_averager is None:
if hasattr(self.metric, '__name__'):
self.name = self.metric.__name__
else:
self.name = "unknown metric"
else:
if hasattr(self.metric, '__name__'):
self.name = task_averager.__name__ + "-" + self.metric.__name__
else:
self.name = "unknown metric"
else:
self.name = name
if mode is None:
# These are some smart defaults
if self.metric.__name__ in [
"roc_auc_score", "matthews_corrcoef", "recall_score",
"accuracy_score", "kappa_score", "cohen_kappa_score",
"precision_score", "precision_recall_curve",
"balanced_accuracy_score", "prc_auc_score", "f1_score",
"bedroc_score", "jaccard_score", "jaccard_index",
"pixel_error"
]:
mode = "classification"
elif self.metric.__name__ in [
"pearson_r2_score", "r2_score", "mean_squared_error",
"mean_absolute_error", "rms_score", "mae_score", "pearsonr",
"concordance_index"
]:
mode = "regression"
else:
raise ValueError(
"Please specify the mode of this metric. mode must be 'regression' or 'classification'"
)
if mode == "classification":
if classification_handling_mode is None:
# These are some smart defaults corresponding to sklearn's required
# behavior
if self.metric.__name__ in [
"matthews_corrcoef", "cohen_kappa_score", "kappa_score",
"balanced_accuracy_score", "recall_score",
"jaccard_score", "jaccard_index", "pixel_error",
"f1_score"
]:
classification_handling_mode = "threshold"
elif self.metric.__name__ in [
"accuracy_score", "precision_score", "bedroc_score"
]:
classification_handling_mode = "threshold-one-hot"
elif self.metric.__name__ in [
"roc_auc_score", "prc_auc_score",
"precision_recall_curve"
]:
classification_handling_mode = "direct"
if classification_handling_mode not in [
"direct", "threshold", "threshold-one-hot"
]:
raise ValueError(
"classification_handling_mode must be one of 'direct', 'threshold', 'threshold_one_hot'"
)
self.mode = mode
self.n_tasks = n_tasks
self.classification_handling_mode = classification_handling_mode
self.threshold_value = threshold_value
def compute_metric(self,
y_true: ArrayLike,
y_pred: ArrayLike,
w: Optional[ArrayLike] = None,
n_tasks: Optional[int] = None,
n_classes: int = 2,
per_task_metrics: bool = False,
use_sample_weights: bool = False,
**kwargs) -> Any:
"""Compute a performance metric for each task.
Parameters
----------
y_true: ArrayLike
An ArrayLike containing true values for each task. Must be of shape
`(N,)` or `(N, n_tasks)` or `(N, n_tasks, n_classes)` if a
classification metric. If of shape `(N, n_tasks)` values can either be
class-labels or probabilities of the positive class for binary
classification problems. If a regression problem, must be of shape
`(N,)` or `(N, n_tasks)` or `(N, n_tasks, 1)` if a regression metric.
y_pred: ArrayLike
An ArrayLike containing predicted values for each task. Must be
of shape `(N, n_tasks, n_classes)` if a classification metric,
else must be of shape `(N, n_tasks)` if a regression metric.
w: ArrayLike, default None
An ArrayLike containing weights for each datapoint. If
specified, must be of shape `(N, n_tasks)`.
n_tasks: int, default None
The number of tasks this class is expected to handle.
n_classes: int, default 2
Number of classes in data for classification tasks.
per_task_metrics: bool, default False
If true, return computed metric for each task on multitask dataset.
use_sample_weights: bool, default False
If set, use per-sample weights `w`.
kwargs: dict
Will be passed on to self.metric
Returns
-------
np.ndarray
A numpy array containing metric values for each task.
"""
# Attempt some limited shape imputation to find n_tasks
y_true_arr = np.asarray(y_true)
y_pred_arr = np.asarray(y_pred)
if n_tasks is None:
if self.n_tasks is None and isinstance(y_true_arr, np.ndarray):
if len(y_true_arr.shape) == 1:
n_tasks = 1
elif len(y_true_arr.shape) >= 2:
n_tasks = y_true_arr.shape[1]
else:
n_tasks = self.n_tasks
# check whether n_tasks is int or not
# This is because `normalize_weight_shape` require int value.
assert isinstance(n_tasks, int)
y_true_arr = normalize_labels_shape(y_true_arr,
mode=self.mode,
n_tasks=n_tasks,
n_classes=n_classes)
y_pred_arr = normalize_prediction_shape(y_pred_arr,
mode=self.mode,
n_tasks=n_tasks,
n_classes=n_classes)
if self.mode == "classification":
y_true_arr = handle_classification_mode(
y_true_arr, self.classification_handling_mode,
self.threshold_value)
y_pred_arr = handle_classification_mode(
y_pred_arr, self.classification_handling_mode,
self.threshold_value)
n_samples = y_true_arr.shape[0]
w = normalize_weight_shape(None if w is None else np.asarray(w),
n_samples, n_tasks)
computed_metrics = []
for task in range(n_tasks):
y_task = y_true_arr[:, task]
y_pred_arr_task = y_pred_arr[:, task]
w_task = w[:, task]
metric_value = self.compute_singletask_metric(
y_task,
y_pred_arr_task,
w_task,
use_sample_weights=use_sample_weights,
**kwargs)
computed_metrics.append(metric_value)
logger.info("computed_metrics: %s" % str(computed_metrics))
if n_tasks == 1:
# FIXME: Incompatible types in assignment
computed_metrics = computed_metrics[0] # type: ignore
if not per_task_metrics:
return self.task_averager(computed_metrics)
else:
return self.task_averager(computed_metrics), computed_metrics
def compute_singletask_metric(self,
y_true: ArrayLike,
y_pred: ArrayLike,
w: Optional[ArrayLike] = None,
n_samples: Optional[int] = None,
use_sample_weights: bool = False,
**kwargs) -> float:
"""Compute a metric value.
Parameters
----------
y_true: ArrayLike
True values array. This array must be of shape `(N,
n_classes)` if classification and `(N,)` if regression.
y_pred: ArrayLike
Predictions array. This array must be of shape `(N, n_classes)`
if classification and `(N,)` if regression.
w: ArrayLike, default None
Sample weight array. This array must be of shape `(N,)`
n_samples: int, default None (DEPRECATED)
The number of samples in the dataset. This is `N`. This argument is
ignored.
use_sample_weights: bool, default False
If set, use per-sample weights `w`.
kwargs: dict
Will be passed on to self.metric
Returns
-------
metric_value: float
The computed value of the metric.
"""
if n_samples is not None:
logger.warning(
"n_samples is a deprecated argument which is ignored.")
# Attempt to convert both into the same type
y_true_arr = np.asarray(y_true)
y_pred_arr = np.asarray(y_pred)
if self.mode == "regression":
if len(y_true_arr.shape) != 1 or len(
y_pred_arr.shape
) != 1 or y_true_arr.shape != y_pred_arr.shape:
raise ValueError(
"For regression metrics, y_true and y_pred must both be of shape (N,)"
)
elif self.mode == "classification":
pass
# if len(y_true.shape) != 2 or len(y_pred.shape) != 2 or y_true.shape != y_pred.shape:
# raise ValueError("For classification metrics, y_true and y_pred must both be of shape (N, n_classes)")
else:
raise ValueError(
"Only classification and regression are supported for metrics calculations."
)
if use_sample_weights:
metric_value = self.metric(y_true_arr,
y_pred_arr,
sample_weight=w,
**kwargs)
else:
metric_value = self.metric(y_true_arr, y_pred_arr, **kwargs)
return metric_value
<file_sep>"""
Script that trains progressive multitask models on Delaney dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
from deepchem.molnet import load_delaney
# Only for debug!
np.random.seed(123)
# Load Delaney dataset
n_features = 1024
delaney_tasks, delaney_datasets, transformers = load_delaney()
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
n_layers = 1
nb_epoch = 10
model = dc.models.ProgressiveMultitaskRegressor(
len(delaney_tasks),
n_features,
layer_sizes=[1000] * n_layers,
dropouts=[.25] * n_layers,
alpha_init_stddevs=[.02] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
learning_rate=.001,
batch_size=100)
# Fit trained model
model.fit(train_dataset)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep># flake8: noqa
from deepchem.dock.pose_generation import PoseGenerator
from deepchem.dock.pose_generation import VinaPoseGenerator
from deepchem.dock.pose_generation import GninaPoseGenerator
from deepchem.dock.docking import Docker
from deepchem.dock.binding_pocket import ConvexHullPocketFinder
<file_sep>FROM nvidia/cuda:11.3.0-cudnn8-devel
# Install some utilities
RUN apt-get update && \
apt-get install -y -q wget git libxrender1 libsm6 bzip2 && \
rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
# Install miniconda
RUN MINICONDA="Miniconda3-latest-Linux-x86_64.sh" && \
wget --quiet https://repo.continuum.io/miniconda/$MINICONDA && \
bash $MINICONDA -b -p /miniconda && \
rm -f $MINICONDA && \
echo ". /miniconda/etc/profile.d/conda.sh" >> ~/.bashrc
ENV PATH /miniconda/bin:$PATH
# install latest version deepchem
RUN conda update -n base conda && \
conda create -y --name deepchem python=3.7 && \
. /miniconda/etc/profile.d/conda.sh && \
conda activate deepchem && \
pip install tensorflow~=2.7 deepchem && \
conda clean -afy && \
rm -rf ~/.cache/pip
RUN echo "conda activate deepchem" >> ~/.bashrc
WORKDIR /root/mydir
<file_sep>"""
FACTOR dataset loader
"""
import os
import logging
import time
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
from deepchem.utils import remove_missing_entries
logger = logging.getLogger(__name__)
TRAIN_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_training_disguised_combined_full.csv.gz"
VALID_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test1_disguised_combined_full.csv.gz"
TEST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test2_disguised_combined_full.csv.gz"
TRAIN_FILENAME = "FACTORS_training_disguised_combined_full.csv.gz"
VALID_FILENAME = "FACTORS_test1_disguised_combined_full.csv.gz"
TEST_FILENAME = "FACTORS_test2_disguised_combined_full.csv.gz"
def get_transformers(train_dataset):
"""Gets transformers applied to the dataset"""
transformers = list()
# TODO: Check if anything needs to be added
return transformers
def gen_factors(FACTORS_tasks,
data_dir,
train_dir,
valid_dir,
test_dir,
shard_size=2000):
"""Loads the FACTORS dataset; does not do train/test split"""
time1 = time.time()
train_files = os.path.join(data_dir, TRAIN_FILENAME)
valid_files = os.path.join(data_dir, VALID_FILENAME)
test_files = os.path.join(data_dir, TEST_FILENAME)
if not os.path.exists(train_files):
logger.info("Downloading train file...")
deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)
logger.info("Training file download complete.")
logger.info("Downloading validation file...")
deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)
logger.info("Validation file download complete.")
logger.info("Downloading test file...")
deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)
logger.info("Test file download complete")
# Featurize the FACTORS dataset
logger.info("About to featurize the FACTORS dataset")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(tasks=FACTORS_tasks,
id_field="Molecule",
featurizer=featurizer)
logger.info("Featurizing the train dataset...")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
logger.info("Featurizing the validation dataset...")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
logger.info("Featurizing the test dataset...")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
logger.info("Remove missing entries from dataset")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
# Shuffle the training data
logger.info("Shuffling the training dataset")
train_dataset.sparse_shuffle()
# Apply transformations
logger.info("Transforming datasets with transformers")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info("Performing transformations with {}".format(
transformer.__class__.__name__))
logger.info("Transforming the training dataset...")
train_dataset = transformer.transform(train_dataset)
logger.info("Transforming the validation dataset...")
valid_dataset = transformer.transform(valid_dataset)
logger.info("Transforming the test dataset...")
test_dataset = transformer.transform(test_dataset)
logger.info("Transformations complete.")
logger.info("Moving datasets to corresponding directories")
train_dataset.move(train_dir)
logger.info("Train dataset moved.")
valid_dataset.move(valid_dir)
logger.info("Validation dataset moved.")
test_dataset.move(test_dir)
logger.info("Test dataset moved.")
time2 = time.time()
# TIMING
logger.info("TIMING: FACTORS fitting took %0.3f s" % (time2 - time1))
return train_dataset, valid_dataset, test_dataset
def load_factors(shard_size=2000, featurizer=None, split=None, reload=True):
"""Loads FACTOR dataset; does not do train/test split
The Factors dataset is an in-house dataset from Merck that was first introduced in the following paper:
<NAME>, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
It contains 1500 Merck in-house compounds that were measured
for IC50 of inhibition on 12 serine proteases. Unlike most of
the other datasets featured in MoleculeNet, the Factors
collection does not have structures for the compounds tested
since they were proprietary Merck compounds. However, the
collection does feature pre-computed descriptors for these
compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
Parameters
----------
shard_size: int, optional
Size of the DiskDataset shards to write on disk
featurizer: optional
Ignored since featurization pre-computed
split: optional
Ignored since split pre-computed
reload: bool, optional
Whether to automatically re-load from disk
"""
FACTORS_tasks = [
'T_00001', 'T_00002', 'T_00003', 'T_00004', 'T_00005', 'T_00006',
'T_00007', 'T_00008', 'T_00009', 'T_00010', 'T_00011', 'T_00012'
]
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "factors")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = gen_factors(
FACTORS_tasks=FACTORS_tasks,
data_dir=data_dir,
train_dir=train_dir,
valid_dir=valid_dir,
test_dir=test_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return FACTORS_tasks, (train_dataset, valid_dataset,
test_dataset), transformers
<file_sep>import torch
import torch.nn as nn
from deepchem.models.torch_models.torch_model import TorchModel
from deepchem.models.losses import L2Loss
class LCNNBlock(nn.Module):
"""
The Lattice Convolution layer of LCNN
The following class implements the lattice convolution function which is
based on graph convolution networks where,
[1] Each atom is represented as a node
[2] Adjacent atom based on distance are considered as neighbors.
Operations in Lattice Convolution:
[1] In graph aggregation step- node features of neighbors are concatenated and
into a linear layer. But since diffrent permutation of order of neighbors could
be considered because of which , diffrent permutation of the lattice
structure are considered in diffrent symmetrical angles (0 , 60 ,120 180 , 240 , 300 )
[2] After the linear layer on each permutations, they are added up for each node and
each node is transformed into a vector.
Examples
--------
>>> import deepchem as dc
>>> from deepchem.models.torch_models.lcnn import LCNNBlock
>>> from deepchem.feat.graph_data import GraphData
>>> import numpy as np
>>> nodes = np.array([0, 1, 2])
>>> x = np.zeros((nodes.size, nodes.max()+1))
>>> x[np.arange(nodes.size),nodes] = 1
>>> v = np.array([ 0,0, 0,0, 1,1, 1,1, 2,2, 2,2 ])
>>> u = np.array([ 1,2, 2,1, 2,0, 0,2, 1,0, 0,1 ])
>>> graph = GraphData(node_features=x, edge_index=np.array([u, v]))
>>> model = LCNNBlock(3*2, 3, 2)
>>> G = graph.to_dgl_graph()
>>> x = G.ndata.pop('x')
>>> print(model(G, x).shape)
torch.Size([3, 3])
"""
def __init__(self,
input_feature: int,
output_feature: int = 19,
n_permutation_list: int = 6,
dropout: float = 0.2,
UseBN: bool = True):
"""
Lattice Convolution Layer used in the main model
Parameters
----------
input_feature: int
Dimenion of the concatenated input vector. Node_feature_size*number of neighbors
output_feature: int, default 19
Dimension of feature size of the convolution
n_permutation_list: int, default 6
Diffrent permutations taken along diffrent directions
dropout: float
p value for dropout between 0.0 to 1.0
UseBN: bool
To use batch normalisation
"""
super(LCNNBlock, self).__init__()
self.conv_weights = nn.Linear(input_feature, output_feature)
self.batch_norm = nn.BatchNorm1d(output_feature)
self.UseBN = UseBN
self.activation = Shifted_softplus()
self.dropout = Custom_dropout(dropout, n_permutation_list)
self.permutation = n_permutation_list
def reduce_func(self, nodes):
number_of_sites = nodes.mailbox['m'].shape[0]
return {
'X_site':
nodes.mailbox['m'].view(number_of_sites, self.permutation, -1)
}
def forward(self, G, node_feats):
"""
Update node representations.
Parameters
----------
G: DGLGraph
DGLGraph for a batch of graphs.
node_feats: torch.Tensor
The node features. The shape is `(N, Node_feature_size)`.
Returns
-------
node_feats: torch.Tensor
The updated node features. The shape is `(N, Node_feature_size)`.
"""
try:
import dgl.function as fn
except:
raise ImportError("This class requires DGL to be installed.")
G = G.local_var()
G.ndata['x'] = node_feats
G.update_all(fn.copy_u('x', 'm'), self.reduce_func)
X = self.conv_weights(G.ndata['X_site'])
X = torch.stack([self.batch_norm(X_i) for X_i in X])
node_feats = torch.stack([self.dropout(X_i).sum(axis=0) for X_i in X])
return node_feats
class Atom_Wise_Convolution(nn.Module):
"""
Performs self convolution to each node
"""
def __init__(self,
input_feature: int,
output_feature: int,
dropout: float = 0.2,
UseBN: bool = True):
"""
Parameters
----------
input_feature: int
Size of input feature size
output_feature: int
Size of output feature size
dropout: float, defult 0.2
p value for dropout between 0.0 to 1.0
UseBN: bool
Setting it to True will perform Batch Normalisation
"""
super(Atom_Wise_Convolution, self).__init__()
self.conv_weights = nn.Linear(input_feature, output_feature)
self.batch_norm = nn.LayerNorm(output_feature)
self.UseBN = UseBN
self.activation = Shifted_softplus()
self.dropout = nn.Dropout(p=dropout)
def forward(self, node_feats):
"""
Update node representations.
Parameters
----------
node_feats: torch.Tensor
The node features. The shape is `(N, Node_feature_size)`.
Returns
-------
node_feats: torch.Tensor
The updated node features. The shape is `(N, Node_feature_size)`.
"""
node_feats = self.conv_weights(node_feats)
if self.UseBN:
node_feats = self.batch_norm(node_feats)
node_feats = self.activation(node_feats)
node_feats = self.dropout(node_feats)
return node_feats
class Shifted_softplus(nn.Module):
"""
Performs a Shifter softplus loss, which modifies with a value of log(2)
"""
def __init__(self):
super(Shifted_softplus, self).__init__()
self.act = nn.Softplus()
self.shift = nn.Parameter(torch.tensor([0.69310]), False)
def forward(self, X):
"""
Applies the Activation function
Parameters
----------
node_feats: torch.Tensor
The node features.
Returns
-------
node_feats: torch.Tensor
The updated node features.
"""
node_feats = self.act(X) - self.shift
return node_feats
class Custom_dropout(nn.Module):
"""
An implementation for few , Given a task perform a rowise sum of 2-d
matrix , you get a zero out the contribution of few of rows in the matrix
Given, X a 2-d matrix consisting of row vectors (1-d) x1 , x2 ,..xn.
Sum = x1 + 0.x2 + .. + 0.xi + .. +xn
"""
def __init__(self, dp_rate: float, n_permutation: int):
"""
Parameters
----------
dp_rate: float
p value of dropout.
"""
super(Custom_dropout, self).__init__()
self.dropout = nn.Dropout(p=dp_rate)
self.ones = nn.Parameter(torch.ones(n_permutation), requires_grad=False)
def forward(self, layer):
"""
Returns
-------
node_feats: torch.Tensor
Updated tensor.
"""
mask = self.dropout(self.ones).view(layer.shape[0],
1).repeat(1, layer.shape[1])
return mask * layer
class LCNN(nn.Module):
"""
The Lattice Convolution Neural Network (LCNN)
This model takes lattice representation of Adsorbate Surface to predict
coverage effects taking into consideration the adjacent elements interaction
energies.
The model follows the following steps
[1] It performs n lattice convolution operations. For more details look at the LCNNBlock class
[2] Followed by Linear layer transforming into sitewise_n_feature
[3] Transformation to scalar value for each node.
[4] Average of properties per each element in a configuration
Examples
--------
>>> import deepchem as dc
>>> from pymatgen.core import Structure
>>> import numpy as np
>>> PRIMITIVE_CELL = {
... "lattice": [[2.818528, 0.0, 0.0],
... [-1.409264, 2.440917, 0.0],
... [0.0, 0.0, 25.508255]],
... "coords": [[0.66667, 0.33333, 0.090221],
... [0.33333, 0.66667, 0.18043936],
... [0.0, 0.0, 0.27065772],
... [0.66667, 0.33333, 0.36087608],
... [0.33333, 0.66667, 0.45109444],
... [0.0, 0.0, 0.49656991]],
... "species": ['H', 'H', 'H', 'H', 'H', 'He'],
... "site_properties": {'SiteTypes': ['S1', 'S1', 'S1', 'S1', 'S1', 'A1']}
... }
>>> PRIMITIVE_CELL_INF0 = {
... "cutoff": np.around(6.00),
... "structure": Structure(**PRIMITIVE_CELL),
... "aos": ['1', '0', '2'],
... "pbc": [True, True, False],
... "ns": 1,
... "na": 1
... }
>>> DATA_POINT = {
... "lattice": [[1.409264, -2.440917, 0.0],
... [4.227792, 2.440917, 0.0],
... [0.0, 0.0, 23.17559]],
... "coords": [[0.0, 0.0, 0.099299],
... [0.0, 0.33333, 0.198598],
... [0.5, 0.16667, 0.297897],
... [0.0, 0.0, 0.397196],
... [0.0, 0.33333, 0.496495],
... [0.5, 0.5, 0.099299],
... [0.5, 0.83333, 0.198598],
... [0.0, 0.66667, 0.297897],
... [0.5, 0.5, 0.397196],
... [0.5, 0.83333, 0.496495],
... [0.0, 0.66667, 0.54654766],
... [0.5, 0.16667, 0.54654766]],
... "species": ['H', 'H', 'H', 'H', 'H', 'H',
... 'H', 'H', 'H', 'H', 'He', 'He'],
... "site_properties": {
... "SiteTypes": ['S1', 'S1', 'S1', 'S1', 'S1',
... 'S1', 'S1', 'S1', 'S1', 'S1',
... 'A1', 'A1'],
... "oss": ['-1', '-1', '-1', '-1', '-1', '-1',
... '-1', '-1', '-1', '-1', '0', '2']
... }
... }
>>> featuriser = dc.feat.LCNNFeaturizer(**PRIMITIVE_CELL_INF0)
>>> lcnn_feat = featuriser._featurize(Structure(**DATA_POINT)).to_dgl_graph()
>>> print(type(lcnn_feat))
<class 'dgl.heterograph.DGLHeteroGraph'>
>>> model = LCNN()
>>> out = model(lcnn_feat)
>>> print(type(out))
<class 'torch.Tensor'>
Refrences
-----------
[1] <NAME>,<NAME>, <NAME> , and <NAME>
"Lattice Convolutional Neural Network Modeling of Adsorbate Coverage
Effects" The Journal of Physical Chemistry
[2] https://forum.deepchem.io/t/lattice-convolutional-neural-network-modeling-of-adsorbate-coverage-effects/124
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(self,
n_occupancy: int = 3,
n_neighbor_sites: int = 19,
n_permutation: int = 6,
n_task: int = 1,
dropout_rate: float = 0.2,
n_conv: int = 2,
n_features: int = 19,
sitewise_n_feature: int = 25):
"""
Parameters
----------
n_occupancy: int, default 3
number of possible occupancy
n_neighbor_sites_list: int, default 19
Number of neighbors of each site.
n_permutation: int, default 6
Diffrent permutations taken along diffrent directions.
n_task: int, default 1
Number of tasks
dropout_rate: float, default 0.2
p value for dropout between 0.0 to 1.0
nconv: int, default 2
number of convolutions performed
n_feature: int, default 19
number of feature for each site
sitewise_n_feature: int, default 25
number of features for atoms for site-wise activation
"""
super(LCNN, self).__init__()
modules = [LCNNBlock(n_occupancy * n_neighbor_sites, n_features)]
for i in range(n_conv - 1):
modules.append(
LCNNBlock(n_features * n_neighbor_sites, n_features,
n_permutation))
self.LCNN_blocks = nn.Sequential(*modules)
self.Atom_wise_Conv = Atom_Wise_Convolution(n_features,
sitewise_n_feature)
self.Atom_wise_Lin = nn.Linear(sitewise_n_feature, sitewise_n_feature)
self.fc = nn.Linear(sitewise_n_feature, n_task)
self.activation = Shifted_softplus()
def forward(self, G):
"""
Parameters
----------
G: DGLGraph
DGLGraph for a batch of graphs.
Returns
-------
y: torch.Tensor
A single scalar value
"""
try:
import dgl
except:
raise ImportError("This class requires DGL to be installed.")
G = G.local_var()
node_feats = G.ndata.pop('x')
for conv in self.LCNN_blocks:
node_feats = conv(G, node_feats)
node_feats = self.Atom_wise_Conv(node_feats)
node_feats = self.Atom_wise_Lin(node_feats)
G.ndata['new'] = self.activation(node_feats)
y = dgl.mean_nodes(G, 'new')
y = self.fc(y)
return y
class LCNNModel(TorchModel):
"""
Lattice Convolutional Neural Network (LCNN).
Here is a simple example of code that uses the LCNNModel with
Platinum 2d Adsorption dataset.
This model takes arbitrary configurations of Molecules on an adsorbate and predicts
their formation energy. These formation energies are found using DFT calculations and
LCNNModel is to automate that process. This model defines a crystal graph using the
distance between atoms. The crystal graph is an undirected regular graph (equal neighbours)
and different permutations of the neighbours are pre-computed using the LCNNFeaturizer.
On each node for each permutation, the neighbour nodes are concatenated which are further operated.
This model has only a node representation. Please confirm the detail algorithms from [1]_.
Examples
--------
>>>
>> import deepchem as dc
>> from pymatgen.core import Structure
>> import numpy as np
>> from deepchem.feat import LCNNFeaturizer
>> from deepchem.molnet import load_Platinum_Adsorption
>> PRIMITIVE_CELL = {
.. "lattice": [[2.818528, 0.0, 0.0],
.. [-1.409264, 2.440917, 0.0],
.. [0.0, 0.0, 25.508255]],
.. "coords": [[0.66667, 0.33333, 0.090221],
.. [0.33333, 0.66667, 0.18043936],
.. [0.0, 0.0, 0.27065772],
.. [0.66667, 0.33333, 0.36087608],
.. [0.33333, 0.66667, 0.45109444],
.. [0.0, 0.0, 0.49656991]],
.. "species": ['H', 'H', 'H', 'H', 'H', 'He'],
.. "site_properties": {'SiteTypes': ['S1', 'S1', 'S1', 'S1', 'S1', 'A1']}
.. }
>> PRIMITIVE_CELL_INF0 = {
.. "cutoff": np.around(6.00),
.. "structure": Structure(**PRIMITIVE_CELL),
.. "aos": ['1', '0', '2'],
.. "pbc": [True, True, False],
.. "ns": 1,
.. "na": 1
.. }
>> tasks, datasets, transformers = load_Platinum_Adsorption(
.. featurizer= LCNNFeaturizer( **PRIMITIVE_CELL_INF0)
.. )
>> train, val, test = datasets
>> model = LCNNModel(mode='regression',
.. batch_size=8,
.. learning_rate=0.001)
>> model = LCNN()
>> out = model(lcnn_feat)
>> model.fit(train, nb_epoch=10)
References
----------
.. [1] <NAME> and <NAME>, J. Phys. Chem. C 2019, 123, 18951−18959.
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(self,
n_occupancy: int = 3,
n_neighbor_sites_list: int = 19,
n_permutation_list: int = 6,
n_task: int = 1,
dropout_rate: float = 0.4,
n_conv: int = 2,
n_features: int = 44,
sitewise_n_feature: int = 25,
**kwargs):
"""
This class accepts all the keyword arguments from TorchModel.
Parameters
----------
n_occupancy: int, default 3
number of possible occupancy.
n_neighbor_sites_list: int, default 19
Number of neighbors of each site.
n_permutation: int, default 6
Diffrent permutations taken along diffrent directions.
n_task: int, default 1
Number of tasks.
dropout_rate: float, default 0.4
p value for dropout between 0.0 to 1.0
nconv: int, default 2
number of convolutions performed.
n_feature: int, default 44
number of feature for each site.
sitewise_n_feature: int, default 25
number of features for atoms for site-wise activation.
kwargs: Dict
This class accepts all the keyword arguments from TorchModel.
"""
def init_weights(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
model = LCNN(n_occupancy, n_neighbor_sites_list, n_permutation_list,
n_task, dropout_rate, n_conv, n_features,
sitewise_n_feature)
model.apply(init_weights)
loss = L2Loss()
output_types = ['prediction']
super(LCNNModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
def _prepare_batch(self, batch):
"""
Create batch data for LCNN.
Parameters
----------
batch: Tuple
The tuple are `(inputs, labels, weights)`.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: List[torch.Tensor] or None
The labels converted to torch.Tensor
weights: List[torch.Tensor] or None
The weights for each sample or sample/task pair converted to torch.Tensor
"""
try:
import dgl
except:
raise ImportError("This class requires DGL to be installed.")
inputs, labels, weights = batch
dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(LCNNModel, self)._prepare_batch(
([], labels, weights))
return inputs, labels, weights
<file_sep>"""
Implementation of MEGNet class
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.layers import GraphNetwork as GN
from deepchem.models.torch_models import TorchModel
class MEGNet(nn.Module):
"""MatErials Graph Network
A model for predicting crystal and molecular properties using GraphNetworks.
Example
-------
>>> import numpy as np
>>> from torch_geometric.data import Batch
>>> from deepchem.feat import GraphData
>>> n_nodes, n_node_features = 5, 10
>>> n_edges, n_edge_attrs = 5, 2
>>> n_global_features = 4
>>> node_features = np.random.randn(n_nodes, n_node_features)
>>> edge_attrs = np.random.randn(n_edges, n_edge_attrs)
>>> edge_index = np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]], dtype=np.int64)
>>> global_features = np.random.randn(1, n_global_features)
>>> graph = GraphData(node_features, edge_index, edge_attrs, global_features=global_features)
>>> batch = Batch()
>>> batch = batch.from_data_list([graph.to_pyg_graph()])
>>> model = MEGNet(n_node_features=n_node_features, n_edge_features=n_edge_attrs, n_global_features=n_global_features)
>>> pred = model(batch)
Note
----
This class requires torch-geometric to be installed.
"""
def __init__(self,
n_node_features: int = 32,
n_edge_features: int = 32,
n_global_features: int = 32,
n_blocks: int = 1,
is_undirected: bool = True,
residual_connection: bool = True,
mode: str = 'regression',
n_classes: int = 2,
n_tasks: int = 1):
"""
Parameters
----------
n_node_features: int
Number of features in a node
n_edge_features: int
Number of features in a edge
n_global_features: int
Number of global features
n_blocks: int
Number of GraphNetworks block to use in update
is_undirected: bool, optional (default True)
True when the graph is undirected graph , otherwise False
residual_connection: bool, optional (default True)
If True, the layer uses a residual connection during training
n_tasks: int, default 1
The number of tasks
mode: str, default 'regression'
The model type - classification or regression
n_classes: int, default 2
The number of classes to predict (used only in classification mode).
"""
super(MEGNet, self).__init__()
try:
from torch_geometric.nn import Set2Set
except ModuleNotFoundError:
raise ImportError(
"MEGNet model requires torch_geometric to be installed")
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
self.n_node_features = n_node_features
self.n_edge_features = n_edge_features
self.n_global_features = n_global_features
self.megnet_blocks = nn.ModuleList()
self.n_blocks = n_blocks
for i in range(n_blocks):
self.megnet_blocks.append(
GN(n_node_features=n_node_features,
n_edge_features=n_edge_features,
n_global_features=n_global_features,
is_undirected=is_undirected,
residual_connection=residual_connection))
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.set2set_nodes = Set2Set(in_channels=n_node_features,
processing_steps=3,
num_layers=1)
self.set2set_edges = Set2Set(in_channels=n_edge_features,
processing_steps=3,
num_layers=1)
self.dense = nn.Sequential(
nn.Linear(in_features=2 * n_node_features + 2 * n_edge_features +
n_global_features,
out_features=32),
nn.Linear(in_features=32, out_features=16))
if self.mode == 'regression':
self.out = nn.Linear(in_features=16, out_features=n_tasks)
elif self.mode == 'classification':
self.out = nn.Linear(in_features=16,
out_features=n_tasks * n_classes)
def forward(self, pyg_batch):
"""
Parameters
----------
pyg_batch: torch_geometric.data.Batch
A pytorch-geometric batch of graphs where node attributes are stores
as pyg_batch['x'], edge_index in pyg_batch['edge_index'], edge features
in pyg_batch['edge_attr'], global features in pyg_batch['global_features']
Returns
-------
torch.Tensor: Predictions for the graph
"""
node_features = pyg_batch['x']
edge_index, edge_features = pyg_batch['edge_index'], pyg_batch[
'edge_attr']
global_features = pyg_batch['global_features']
batch = pyg_batch['batch']
for i in range(self.n_blocks):
node_features, edge_features, global_features = self.megnet_blocks[
i](node_features, edge_index, edge_features, global_features,
batch)
node_features = self.set2set_nodes(node_features, batch)
edge_features = self.set2set_edges(edge_features, batch[edge_index[0]])
out = torch.cat([node_features, edge_features, global_features], axis=1)
out = self.out(self.dense(out))
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
elif self.mode == 'regression':
return out
class MEGNetModel(TorchModel):
"""MatErials Graph Network for Molecules and Crystals
MatErials Graph Network [1]_ are Graph Networks [2]_ which are used for property prediction
in molecules and crystals. The model implements multiple layers of Graph Network as
MEGNetBlocks and then combines the node properties and edge properties of all nodes
and edges via a Set2Set layer. The combines information is used with the global
features of the material/molecule for property prediction tasks.
Example
-------
>>> import deepchem as dc
>>> from deepchem.models import MEGNetModel
>>> from deepchem.utils.fake_data_generator import FakeGraphGenerator as FGG
>>> graphs = FGG(global_features=4, num_classes=10).sample(n_graphs=20)
>>> model = dc.models.MEGNetModel(n_node_features=5, n_edge_features=3, n_global_features=4, n_blocks=3, is_undirected=True, residual_connection=True, mode='classification', n_classes=10, batch_size=16)
>>> training_loss = model.fit(graphs)
References
----------
.. [1] <NAME>, et al. "Graph networks as a universal machine learning framework for molecules and crystals." Chemistry of Materials 31.9 (2019): 3564-3572.
.. [2] Battaglia, <NAME>., et al. "Relational inductive biases, deep learning, and graph networks." arXiv preprint arXiv:1806.01261 (2018).
Note
----
The model requires PyTorch-Geometric to be installed.
"""
def __init__(self,
n_node_features: int = 32,
n_edge_features: int = 32,
n_global_features: int = 32,
n_blocks: int = 1,
is_undirected: bool = True,
residual_connection: bool = True,
mode: str = 'regression',
n_classes: int = 2,
n_tasks: int = 1,
**kwargs):
"""
Parameters
----------
n_node_features: int
Number of features in a node
n_edge_features: int
Number of features in a edge
n_global_features: int
Number of global features
n_blocks: int
Number of GraphNetworks block to use in update
is_undirected: bool, optional (default True)
True when the model is used on undirected graphs otherwise false
residual_connection: bool, optional (default True)
If True, the layer uses a residual connection during training
n_tasks: int, default 1
The number of tasks
mode: str, default 'regression'
The model type - classification or regression
n_classes: int, default 2
The number of classes to predict (used only in classification mode).
kwargs: Dict
kwargs supported by TorchModel
"""
model = MEGNet(n_node_features=n_node_features,
n_edge_features=n_edge_features,
n_global_features=n_global_features,
n_blocks=n_blocks,
is_undirected=is_undirected,
residual_connection=residual_connection,
mode=mode,
n_classes=n_classes,
n_tasks=n_tasks)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
elif mode == 'classification':
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(MEGNetModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
def _prepare_batch(self, batch):
"""Creates batch data for MEGNet model
Note
----
Ideally, we should only override default_generator method. But the problem
here is that we _prepare_batch of TorchModel only supports non-graph
data types. Hence, we are overriding it here. This should be fixed
some time in the future.
"""
try:
from torch_geometric.data import Batch
except ModuleNotFoundError:
raise ImportError("This module requires PyTorch Geometric")
# We convert deepchem.feat.GraphData to a PyG graph and then
# batch it.
graphs, labels, weights = batch
# The default_generator method returns an array of dc.feat.GraphData objects
# nested inside a list. To access the nested array of graphs, we are
# indexing by 0 here.
graph_list = [graph.to_pyg_graph() for graph in graphs[0]]
pyg_batch = Batch()
pyg_batch = pyg_batch.from_data_list(graph_list)
_, labels, weights = super(MEGNetModel, self)._prepare_batch(
([], labels, weights))
return pyg_batch, labels, weights
<file_sep>"""
Data Structures used to represented molecules for convolutions.
"""
# flake8: noqa
import csv
import random
import numpy as np
def cumulative_sum_minus_last(l, offset=0):
"""Returns cumulative sums for set of counts, removing last entry.
Returns the cumulative sums for a set of counts with the first returned value
starting at 0. I.e [3,2,4] -> [0, 3, 5]. Note last sum element 9 is missing.
Useful for reindexing
Parameters
----------
l: list
List of integers. Typically small counts.
"""
return np.delete(np.insert(np.cumsum(l, dtype=np.int32), 0, 0), -1) + offset
def cumulative_sum(l, offset=0):
"""Returns cumulative sums for set of counts.
Returns the cumulative sums for a set of counts with the first returned value
starting at 0. I.e [3,2,4] -> [0, 3, 5, 9]. Keeps final sum for searching.
Useful for reindexing.
Parameters
----------
l: list
List of integers. Typically small counts.
"""
return np.insert(np.cumsum(l), 0, 0) + offset
class ConvMol(object):
"""Holds information about a molecules.
Resorts order of atoms internally to be in order of increasing degree. Note
that only heavy atoms (hydrogens excluded) are considered here.
"""
def __init__(self, atom_features, adj_list, max_deg=10, min_deg=0):
"""
Parameters
----------
atom_features: np.ndarray
Has shape (n_atoms, n_feat)
adj_list: list
List of length n_atoms, with neighor indices of each atom.
max_deg: int, optional
Maximum degree of any atom.
min_deg: int, optional
Minimum degree of any atom.
"""
self.atom_features = atom_features
self.n_atoms, self.n_feat = atom_features.shape
self.deg_list = np.array([len(nbrs) for nbrs in adj_list],
dtype=np.int32)
self.canon_adj_list = adj_list
self.deg_adj_lists = []
self.deg_slice = []
self.max_deg = max_deg
self.min_deg = min_deg
self.membership = self.get_num_atoms() * [0]
self._deg_sort()
# Get the degree id list (which corrects for min_deg)
self.deg_id_list = np.array(self.deg_list) - min_deg
# Get the size of each degree block
deg_size = [
self.get_num_atoms_with_deg(deg)
for deg in range(self.min_deg, self.max_deg + 1)
]
self.degree_list = []
for i, deg in enumerate(range(self.min_deg, self.max_deg + 1)):
self.degree_list.extend([deg] * deg_size[i])
# Get the the start indices for items in each block
self.deg_start = cumulative_sum(deg_size)
# Get the node indices when they are reset when the degree changes
deg_block_indices = [
i - self.deg_start[self.deg_list[i]] for i in range(self.n_atoms)
]
# Convert to numpy array
self.deg_block_indices = np.array(deg_block_indices, dtype=np.int32)
def get_atoms_with_deg(self, deg):
"""Retrieves atom_features with the specific degree"""
start_ind = self.deg_slice[deg - self.min_deg, 0]
size = self.deg_slice[deg - self.min_deg, 1]
return self.atom_features[start_ind:(start_ind + size), :]
def get_num_atoms_with_deg(self, deg):
"""Returns the number of atoms with the given degree"""
return self.deg_slice[deg - self.min_deg, 1]
def get_num_atoms(self):
return self.n_atoms
def _deg_sort(self):
"""Sorts atoms by degree and reorders internal data structures.
Sort the order of the atom_features by degree, maintaining original order
whenever two atom_features have the same degree.
"""
old_ind = range(self.get_num_atoms())
deg_list = self.deg_list
new_ind = list(np.lexsort((old_ind, deg_list)))
num_atoms = self.get_num_atoms()
# Reorder old atom_features
self.atom_features = self.atom_features[new_ind, :]
# Reorder old deg lists
self.deg_list = [self.deg_list[i] for i in new_ind]
# Sort membership
self.membership = [self.membership[i] for i in new_ind]
# Create old to new dictionary. not exactly intuitive
old_to_new = dict(zip(new_ind, old_ind))
# Reorder adjacency lists
self.canon_adj_list = [self.canon_adj_list[i] for i in new_ind]
self.canon_adj_list = [[old_to_new[k]
for k in self.canon_adj_list[i]]
for i in range(len(new_ind))]
# Get numpy version of degree list for indexing
deg_array = np.array(self.deg_list)
# Initialize adj_lists, which supports min_deg = 1 only
self.deg_adj_lists = (self.max_deg + 1 - self.min_deg) * [0]
# Parse as deg separated
for deg in range(self.min_deg, self.max_deg + 1):
# Get indices corresponding to the current degree
rng = np.array(range(num_atoms))
indices = rng[deg_array == deg]
# Extract and save adjacency list for the current degree
to_cat = [self.canon_adj_list[i] for i in indices]
if len(to_cat) > 0:
adj_list = np.vstack([self.canon_adj_list[i] for i in indices])
self.deg_adj_lists[deg - self.min_deg] = adj_list.astype(
np.int32)
else:
self.deg_adj_lists[deg - self.min_deg] = np.zeros(
[0, deg], dtype=np.int32)
# Construct the slice information
deg_slice = np.zeros([self.max_deg + 1 - self.min_deg, 2],
dtype=np.int32)
for deg in range(self.min_deg, self.max_deg + 1):
if deg == 0:
deg_size = np.sum(deg_array == deg)
else:
deg_size = self.deg_adj_lists[deg - self.min_deg].shape[0]
deg_slice[deg - self.min_deg, 1] = deg_size
# Get the cumulative indices after the first index
if deg > self.min_deg:
deg_slice[deg - self.min_deg,
0] = (deg_slice[deg - self.min_deg - 1, 0] +
deg_slice[deg - self.min_deg - 1, 1])
# Set indices with zero sized slices to zero to avoid indexing errors
deg_slice[:, 0] *= (deg_slice[:, 1] != 0)
self.deg_slice = deg_slice
def get_atom_features(self):
"""Returns canonicalized version of atom features.
Features are sorted by atom degree, with original order maintained when
degrees are same.
"""
return self.atom_features
def get_adjacency_list(self):
"""Returns a canonicalized adjacency list.
Canonicalized means that the atoms are re-ordered by degree.
Returns
-------
list
Canonicalized form of adjacency list.
"""
return self.canon_adj_list
def get_deg_adjacency_lists(self):
"""Returns adjacency lists grouped by atom degree.
Returns
-------
list
Has length (max_deg+1-min_deg). The element at position deg is
itself a list of the neighbor-lists for atoms with degree deg.
"""
return self.deg_adj_lists
def get_deg_slice(self):
"""Returns degree-slice tensor.
The deg_slice tensor allows indexing into a flattened version of the
molecule's atoms. Assume atoms are sorted in order of degree. Then
deg_slice[deg][0] is the starting position for atoms of degree deg in
flattened list, and deg_slice[deg][1] is the number of atoms with degree deg.
Note deg_slice has shape (max_deg+1-min_deg, 2).
Returns
-------
deg_slice: np.ndarray
Shape (max_deg+1-min_deg, 2)
"""
return self.deg_slice
# TODO(rbharath): Can this be removed?
@staticmethod
def get_null_mol(n_feat, max_deg=10, min_deg=0):
"""Constructs a null molecules
Get one molecule with one atom of each degree, with all the atoms
connected to themselves, and containing n_feat features.
Parameters
----------
n_feat : int
number of features for the nodes in the null molecule
"""
# Use random insted of zeros to prevent weird issues with summing to zero
atom_features = np.random.uniform(0, 1, [max_deg + 1 - min_deg, n_feat])
canon_adj_list = [
deg * [deg - min_deg] for deg in range(min_deg, max_deg + 1)
]
return ConvMol(atom_features, canon_adj_list)
@staticmethod
def agglomerate_mols(mols, max_deg=10, min_deg=0):
"""Concatenates list of ConvMol's into one mol object that can be used to feed
into tensorflow placeholders. The indexing of the molecules are preseved during the
combination, but the indexing of the atoms are greatly changed.
Parameters
----------
mols: list
ConvMol objects to be combined into one molecule.
"""
num_mols = len(mols)
# Combine the features, then sort them by (atom_degree, mol_index)
atoms_by_deg = np.concatenate([x.atom_features for x in mols])
degree_vector = np.concatenate([x.degree_list for x in mols], axis=0)
# Mergesort is a "stable" sort, so the array maintains it's secondary sort of mol_index
order = degree_vector.argsort(kind='mergesort')
ordered = np.empty(order.shape, np.int32)
ordered[order] = np.arange(order.shape[0], dtype=np.int32)
all_atoms = atoms_by_deg[order]
# Create a map from the original atom indices within each molecule to the
# indices in the combined object.
mol_atom_map = []
index_start = 0
for mol in mols:
mol_atom_map.append(ordered[index_start:index_start +
mol.get_num_atoms()])
index_start += mol.get_num_atoms()
# Sort all atoms by degree.
# Get the size of each atom list separated by molecule id, then by degree
mol_deg_sz = np.zeros([max_deg - min_deg + 1, num_mols], dtype=np.int32)
for i, mol in enumerate(mols):
mol_deg_sz[:, i] += mol.deg_slice[:, 1]
# Get the final size of each degree block
deg_sizes = np.sum(mol_deg_sz, axis=1)
# Get the index at which each degree starts, not resetting after each degree
# And not stopping at any specific molecule
deg_start = cumulative_sum_minus_last(deg_sizes)
# Get the tensorflow object required for slicing (deg x 2) matrix, with the
# first column telling the start indices of each degree block and the
# second colum telling the size of each degree block
deg_slice = np.array(list(zip(deg_start, deg_sizes)))
# Determine the membership (atom i belongs to molecule membership[i])
membership = np.empty(all_atoms.shape[0], np.int32)
for i in range(num_mols):
membership[mol_atom_map[i]] = i
# Initialize the new degree separated adjacency lists
deg_adj_lists = [
np.empty([deg_sizes[deg], deg], dtype=np.int32)
for deg in range(min_deg, max_deg + 1)
]
# Update the old adjacency lists with the new atom indices and then combine
# all together
for deg in range(min_deg, max_deg + 1):
row = 0 # Initialize counter
deg_id = deg - min_deg # Get corresponding degree id
# Iterate through all the molecules
for mol_id in range(num_mols):
# Get the adjacency lists for this molecule and current degree id
nbr_list = mols[mol_id].deg_adj_lists[deg_id]
# Correct all atom indices to the final indices, and then save the
# results into the new adjacency lists
if nbr_list.shape[0] > 0:
if nbr_list.dtype == np.int32:
final_id = mol_atom_map[mol_id][nbr_list]
deg_adj_lists[deg_id][row:(
row + nbr_list.shape[0])] = final_id
row += nbr_list.shape[0]
else:
for i in range(nbr_list.shape[0]):
for j in range(nbr_list.shape[1]):
deg_adj_lists[deg_id][
row, j] = mol_atom_map[mol_id][nbr_list[i,
j]]
# Increment once row is done
row += 1
# Get the final aggregated molecule
concat_mol = MultiConvMol(all_atoms, deg_adj_lists, deg_slice,
membership, num_mols)
return concat_mol
class MultiConvMol(object):
"""Holds information about multiple molecules, for use in feeding information
into tensorflow. Generated using the agglomerate_mols function
"""
def __init__(self, nodes, deg_adj_lists, deg_slice, membership, num_mols):
self.nodes = nodes
self.deg_adj_lists = deg_adj_lists
self.deg_slice = deg_slice
self.membership = membership
self.num_mols = num_mols
self.num_atoms = nodes.shape[0]
def get_deg_adjacency_lists(self):
return self.deg_adj_lists
def get_atom_features(self):
return self.nodes
def get_num_atoms(self):
return self.num_atoms
def get_num_molecules(self):
return self.num_mols
class WeaveMol(object):
"""Molecular featurization object for weave convolutions.
These objects are produced by WeaveFeaturizer, and feed into
WeaveModel. The underlying implementation is inspired by [1]_.
References
----------
.. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond fingerprints." Journal of computer-aided molecular design 30.8 (2016): 595-608.
"""
def __init__(self, nodes, pairs, pair_edges):
self.nodes = nodes
self.pairs = pairs
self.num_atoms = self.nodes.shape[0]
self.n_features = self.nodes.shape[1]
self.pair_edges = pair_edges
def get_pair_edges(self):
return self.pair_edges
def get_pair_features(self):
return self.pairs
def get_atom_features(self):
return self.nodes
def get_num_atoms(self):
return self.num_atoms
def get_num_features(self):
return self.n_features
<file_sep>import os
import pytest
import numpy as np
import deepchem as dc
from deepchem.feat.molecule_featurizers import SNAPFeaturizer
def get_regression_dataset():
np.random.seed(123)
featurizer = SNAPFeaturizer()
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/example_regression.csv')
loader = dc.data.CSVLoader(tasks=["outcome"],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
return dataset, metric
def compare_weights(key, model1, model2):
import torch
return torch.all(
torch.eq(model1.components[key].weight,
model2.components[key].weight)).item()
def get_multitask_regression_dataset():
featurizer = SNAPFeaturizer()
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/multitask_regression.csv')
loader = dc.data.CSVLoader(tasks=['task0', 'task1', 'task2'],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
return dataset, metric
@pytest.mark.torch
def get_multitask_classification_dataset():
featurizer = SNAPFeaturizer()
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/multitask_example.csv')
loader = dc.data.CSVLoader(tasks=['task0', 'task1', 'task2'],
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(input_file)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
return dataset, metric
@pytest.mark.torch
def test_GNN_load_from_pretrained():
from deepchem.models.torch_models.gnn import GNNModular
dataset, _ = get_regression_dataset()
model = GNNModular(task="edge_pred")
model.fit(dataset, nb_epoch=1)
model2 = GNNModular(task="edge_pred")
model2.load_from_pretrained(model_dir=model.model_dir)
assert model.components.keys() == model2.components.keys()
keys_with_weights = [
key for key in model.components.keys()
if hasattr(model.components[key], 'weight')
]
assert all(compare_weights(key, model, model2) for key in keys_with_weights)
@pytest.mark.torch
def test_gnn_reload(tmpdir):
import torch
from deepchem.models.torch_models.gnn import GNNModular
model_config = {
'gnn_type': 'gin',
'num_layers': 3,
'emb_dim': 64,
'task': 'regression',
'mask_edge': True,
'model_dir': tmpdir,
'device': torch.device('cpu')
}
old_model = GNNModular(**model_config)
old_model._ensure_built()
old_model.save_checkpoint()
old_model_state = old_model.model.state_dict()
new_model = GNNModular(**model_config)
new_model.restore()
new_model_state = new_model.model.state_dict()
for key in new_model_state.keys():
assert torch.allclose(old_model_state[key], new_model_state[key])
@pytest.mark.torch
def test_GNN_edge_pred():
"""Tests the unsupervised edge prediction task"""
from deepchem.models.torch_models.gnn import GNNModular
dataset, _ = get_regression_dataset()
model = GNNModular(task="edge_pred")
loss1 = model.fit(dataset, nb_epoch=5)
loss2 = model.fit(dataset, nb_epoch=5)
assert loss2 < loss1
@pytest.mark.torch
def test_GNN_node_masking():
"""Tests the unsupervised node masking task"""
from deepchem.models.torch_models.gnn import GNNModular
dataset, _ = get_regression_dataset()
model = GNNModular(task="mask_nodes", device="cpu")
loss1 = model.fit(dataset, nb_epoch=5)
loss2 = model.fit(dataset, nb_epoch=5)
assert loss2 < loss1
@pytest.mark.torch
def test_GNN_edge_masking():
"""Tests the unsupervised node masking task"""
from deepchem.models.torch_models.gnn import GNNModular
dataset, _ = get_regression_dataset()
model = GNNModular(task="mask_edges")
loss1 = model.fit(dataset, nb_epoch=5)
loss2 = model.fit(dataset, nb_epoch=5)
assert loss2 < loss1
@pytest.mark.torch
def test_GNN_regression():
from deepchem.models.torch_models.gnn import GNNModular
dataset, metric = get_regression_dataset()
model = GNNModular(task="regression")
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric])
assert scores['mean_absolute_error'] < 0.2
@pytest.mark.torch
def test_GNN_multitask_regression():
from deepchem.models.torch_models.gnn import GNNModular
dataset, metric = get_multitask_regression_dataset()
model = GNNModular(task="regression", gnn_type="gcn", num_tasks=3)
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric])
assert scores['mean_absolute_error'] < 0.2
@pytest.mark.torch
def test_GNN_multitask_classification():
from deepchem.models.torch_models.gnn import GNNModular
dataset, metric = get_multitask_classification_dataset()
model = GNNModular(task="classification", gnn_type='sage', num_tasks=3)
model.fit(dataset, nb_epoch=200)
scores = model.evaluate(dataset, [metric])
assert scores['mean-roc_auc_score'] >= 0.8
@pytest.mark.torch
def test_GNN_infomax():
from deepchem.models.torch_models.gnn import GNNModular
dataset, _ = get_regression_dataset()
model = GNNModular(task="infomax", gnn_type='gat')
loss1 = model.fit(dataset, nb_epoch=5)
loss2 = model.fit(dataset, nb_epoch=5)
assert loss2 < loss1
@pytest.mark.torch
def test_GNN_context_pred():
from deepchem.models.torch_models.gnn import GNNModular
dataset, _ = get_regression_dataset()
model = GNNModular(task="context_pred",
context_mode="skipgram",
jump_knowledge="concat")
loss1 = model.fit(dataset, nb_epoch=5)
loss2 = model.fit(dataset, nb_epoch=5)
assert loss2 < loss1
model = GNNModular(task="context_pred",
context_mode="cbow",
jump_knowledge="last")
loss1 = model.fit(dataset, nb_epoch=5)
loss2 = model.fit(dataset, nb_epoch=5)
assert loss2 < loss1
<file_sep>import numpy as np
import logging
from deepchem.feat import MaterialStructureFeaturizer
from collections import defaultdict
from typing import List, Dict, Tuple, DefaultDict, Any
from deepchem.utils.typing import PymatgenStructure
from deepchem.feat.graph_data import GraphData
from scipy.spatial.distance import pdist, squareform, cdist
from scipy.spatial.transform import Rotation
class LCNNFeaturizer(MaterialStructureFeaturizer):
"""
Calculates the 2-D Surface graph features in 6 different permutations-
Based on the implementation of Lattice Graph Convolution Neural
Network (LCNN). This method produces the Atom wise features ( One Hot Encoding)
and Adjacent neighbour in the specified order of permutations. Neighbors are
determined by first extracting a site local environment from the primitive cell,
and perform graph matching and distance matching to find neighbors.
First, the template of the Primitive cell needs to be defined along with periodic
boundary conditions and active and spectator site details. structure(Data Point
i.e different configuration of adsorbate atoms) is passed for featurization.
This particular featurization produces a regular-graph (equal number of Neighbors)
along with its permutation in 6 symmetric axis. This transformation can be
applied when orderering of neighboring of nodes around a site play an important role
in the propert predictions. Due to consideration of local neighbor environment,
this current implementation would be fruitful in finding neighbors for calculating
formation energy of adbsorption tasks where the local. Adsorption turns out to be important
in many applications such as catalyst and semiconductor design.
The permuted neighbors are calculated using the Primitive cells i.e periodic cells
in all the data points are built via lattice transformation of the primitive cell.
`Primitive cell Format:`
1. Pymatgen structure object with site_properties key value
- "SiteTypes" mentioning if it is a active site "A1" or spectator
site "S1".
2. ns , the number of spectator types elements. For "S1" its 1.
3. na , the number of active types elements. For "A1" its 1.
4. aos, the different species of active elements "A1".
5. pbc, the periodic boundary conditions.
`Data point Structure Format(Configuration of Atoms):`
1. Pymatgen structure object with site_properties with following key value.
- "SiteTypes", mentioning if it is a active site "A1" or spectator
site "S1".
- "oss", different occupational sites. For spectator sites make it -1.
It is highly recommended that cells of data are directly redefined from
the primitive cell, specifically, the relative coordinates between sites
are consistent so that the lattice is non-deviated.
References
----------
.. [1] <NAME> and <NAME>, J. Phys. Chem. C 2019, 123, 18951−18959
Examples
--------
>>> import deepchem as dc
>>> from pymatgen.core import Structure
>>> import numpy as np
>>> PRIMITIVE_CELL = {
... "lattice": [[2.818528, 0.0, 0.0],
... [-1.409264, 2.440917, 0.0],
... [0.0, 0.0, 25.508255]],
... "coords": [[0.66667, 0.33333, 0.090221],
... [0.33333, 0.66667, 0.18043936],
... [0.0, 0.0, 0.27065772],
... [0.66667, 0.33333, 0.36087608],
... [0.33333, 0.66667, 0.45109444],
... [0.0, 0.0, 0.49656991]],
... "species": ['H', 'H', 'H', 'H', 'H', 'He'],
... "site_properties": {'SiteTypes': ['S1', 'S1', 'S1', 'S1', 'S1', 'A1']}
... }
>>> PRIMITIVE_CELL_INF0 = {
... "cutoff": np.around(6.00),
... "structure": Structure(**PRIMITIVE_CELL),
... "aos": ['1', '0', '2'],
... "pbc": [True, True, False],
... "ns": 1,
... "na": 1
... }
>>> DATA_POINT = {
... "lattice": [[1.409264, -2.440917, 0.0],
... [4.227792, 2.440917, 0.0],
... [0.0, 0.0, 23.17559]],
... "coords": [[0.0, 0.0, 0.099299],
... [0.0, 0.33333, 0.198598],
... [0.5, 0.16667, 0.297897],
... [0.0, 0.0, 0.397196],
... [0.0, 0.33333, 0.496495],
... [0.5, 0.5, 0.099299],
... [0.5, 0.83333, 0.198598],
... [0.0, 0.66667, 0.297897],
... [0.5, 0.5, 0.397196],
... [0.5, 0.83333, 0.496495],
... [0.0, 0.66667, 0.54654766],
... [0.5, 0.16667, 0.54654766]],
... "species": ['H', 'H', 'H', 'H', 'H', 'H',
... 'H', 'H', 'H', 'H', 'He', 'He'],
... "site_properties": {
... "SiteTypes": ['S1', 'S1', 'S1', 'S1', 'S1',
... 'S1', 'S1', 'S1', 'S1', 'S1',
... 'A1', 'A1'],
... "oss": ['-1', '-1', '-1', '-1', '-1', '-1',
... '-1', '-1', '-1', '-1', '0', '2']
... }
... }
>>> featuriser = dc.feat.LCNNFeaturizer(**PRIMITIVE_CELL_INF0)
>>> print(type(featuriser._featurize(Structure(**DATA_POINT))))
<class 'deepchem.feat.graph_data.GraphData'>
Notes
-----
This Class requires pymatgen , networkx , scipy installed.
"""
def __init__(self,
structure: PymatgenStructure,
aos: List[str],
pbc: List[bool],
ns: int = 1,
na: int = 1,
cutoff: float = 6.00):
"""
Parameters
----------
structure: : PymatgenStructure
Pymatgen Structure object of the primitive cell used for calculating
neighbors from lattice transformations.It also requires site_properties
attribute with "Sitetypes"(Active or spectator site).
aos: List[str]
A list of all the active site species. For the Pt, N, NO configuration
set it as ['0', '1', '2']
pbc: List[bool]
Periodic Boundary Condition
ns: int (default 1)
The number of spectator types elements. For "S1" its 1.
na: int (default 1)
the number of active types elements. For "A1" its 1.
cutoff: float (default 6.00)
Cutoff of radius for getting local environment.Only
used down to 2 digits.
"""
try:
from pymatgen.core import Structure
except:
raise ImportError("This class requires pymatgen to be installed.")
if type(structure) is not Structure:
structure = Structure(**structure)
self.aos = aos
self.cutoff = np.around(cutoff, 2)
self.setup_env = _load_primitive_cell(structure, aos, pbc, ns, na,
cutoff)
def _featurize(self, datapoint: PymatgenStructure, **kwargs) -> GraphData:
"""
Parameters
----------
datapoint: : PymatgenStructure
Pymatgen Structure object of the surface configuration. It also requires
site_properties attribute with "Sitetypes"(Active or spectator site) and
"oss"(Species of Active site from the list of self.aos and "-1" for
spectator sites).
Returns
-------
graph: GraphData
Node features, All edges for each node in diffrent permutations
"""
if 'structure' in kwargs and datapoint is None:
datapoint = kwargs.get("structure")
raise DeprecationWarning(
'Structure is being phased out as a parameter, please pass "datapoint" instead.'
)
xSites, xNSs = self.setup_env.read_datum(datapoint)
config_size = xNSs.shape
v = np.arange(0, len(xSites)).repeat(config_size[2] * config_size[3])
u = xNSs.flatten()
graph = GraphData(node_features=xSites, edge_index=np.array([u, v]))
return graph
class _SiteEnvironment(object):
def __init__(self,
pos: np.ndarray,
sitetypes: List[str],
env2config: List[int],
permutations: List[List[int]],
cutoff: float = 6.00,
Grtol: float = 0.0,
Gatol: float = 0.01,
rtol: float = 0.01,
atol: float = 0.0,
tol: float = 0.01,
grtol: float = 1e-3):
"""
Initialize site environment
This class contains local site environment information. This is used
to find neighbor list in the datum.
Parameters
----------
pos : np.ndarray
n x 3 list or numpy array of (non-scaled) positions. n is the
number of atom.
sitetypes : List[str]
n list of string. String must be S or A followed by a
number. S indicates a spectator sites and A indicates a active
sites.
env2config: List[int]
A particular permutation of the neighbors around an active
site. These indexes will be used for lattice transformation.
permutations : List[List[int]]
p x n list of list of integer. p is the permutation
index and n is the number of sites.
cutoff : float
cutoff used for pooling neighbors.
Grtol : float (default 0.0)
relative tolerance in distance for forming an edge in graph
Gatol : float (default 0.01)
absolute tolerance in distance for forming an edge in graph
rtol : float (default 0.01)
relative tolerance in rmsd in distance for graph matching
atol : float (default 0.0)
absolute tolerance in rmsd in distance for graph matching
tol : float (default 0.01)
maximum tolerance of position RMSD to decide whether two
environment are the same
grtol : float (default 0.01)
tolerance for deciding symmetric nodes
"""
try:
import networkx.algorithms.isomorphism as iso
except:
raise ImportError("This class requires networkx to be installed.")
self.pos = pos
self.sitetypes = sitetypes
self.activesiteidx = [
i for i, s in enumerate(self.sitetypes) if 'A' in s
]
self.formula: DefaultDict[str, int] = defaultdict(int)
for s in sitetypes:
self.formula[s] += 1
self.permutations = permutations
self.env2config = env2config
self.cutoff = cutoff
# Set up site environment matcher
self.tol = tol
# Graphical option
self.Grtol = Grtol
self.Gatol = Gatol
# tolerance for grouping nodes
self.grtol = grtol
# determine minimum distance between sitetypes.
# This is used to determine the existence of an edge
dists = squareform(pdist(pos))
mindists = defaultdict(list)
for i, row in enumerate(dists):
row_dists = defaultdict(list)
for j in range(0, len(sitetypes)):
if i == j:
continue
# Sort by bond
row_dists[frozenset(
(sitetypes[i], sitetypes[j]))].append(dists[i, j])
for pair in row_dists:
mindists[pair].append(np.min(row_dists[pair]))
# You want to maximize this in order to make sure every node gets an edge
self.mindists = {}
for pair in mindists:
self.mindists[pair] = np.max(mindists[pair])
# construct graph
self.G = self._construct_graph(pos, sitetypes)
# matcher options
self._nm = iso.categorical_node_match('n', '')
self._em = iso.numerical_edge_match('d', 0, rtol, 0)
def _construct_graph(self, pos: np.ndarray, sitetypes: List[str]):
"""
Returns local environment graph using networkx and
tolerance specified.
Parameters
----------
pos: np.ndarray
ns x 3. coordinates of positions. ns is the number of sites.
sitetypes: ns. sitetype for each site
sitetypes: List[str]
List of site properties mentioning if it is a active site "Ai"
or spectator site "Si".
Returns
------
G: networkx.classes.graph.Graph
networkx graph used for matching site positions in
datum.
"""
try:
import networkx as nx
except:
raise ImportError("This class requires networkx to be installed.")
# construct graph
G = nx.Graph()
dists = cdist([[0, 0, 0]], pos - np.mean(pos, 0))[0]
sdists = np.sort(dists)
uniquedists = sdists[~(
np.triu(np.abs(sdists[:, None] - sdists) <= self.grtol, 1)).any(0)]
orderfromcenter = np.digitize(dists, uniquedists)
# Add nodes
for i, o in enumerate(orderfromcenter):
G.add_node(i, n=str(o) + sitetypes[i])
# Add edge. distance is edge attribute
dists = pdist(pos)
n = 0
for i in range(len(sitetypes)):
for j in range(i + 1, len(sitetypes)):
if dists[n] < self.mindists[frozenset((sitetypes[i], sitetypes[j]))] or\
(abs(self.mindists[frozenset((sitetypes[i], sitetypes[j]))] - dists[n]) <= self.Gatol + self.Grtol * abs(dists[n])):
G.add_edge(i, j, d=dists[n])
n += 1
return G
def get_mapping(self, env: Dict[str, Any]) -> Dict[int, int]:
"""
Returns mapping of sites from input to this object
Pymatgen molecule_matcher does not work unfortunately as it needs to be
a reasonably physical molecule.
Here, the graph is constructed by connecting the nearest neighbor, and
isomorphism is performed to find matches, then kabsch algorithm is
performed to make sure it is a match. NetworkX is used for portability.
Parameters
----------
env : Dict[str, Any]
dictionary that contains information of local environment of a
site in datum. See _get_SiteEnvironments definition in the class
_SiteEnvironments for what this variable should be.
Returns
-------
dict : Dict[int, int]
Atom mapping from Primitive cell to data point.
"""
try:
import networkx.algorithms.isomorphism as iso
except:
raise ImportError("This class requires networkx to be installed.")
# construct graph
G = self._construct_graph(env['pos'], env['sitetypes'])
if len(self.G.nodes) != len(G.nodes):
s = 'Number of nodes is not equal.\n'
raise ValueError(s)
elif len(self.G.edges) != len(G.edges):
logging.warning("Expected the number of edges to be equal",
len(self.G.edges), len(G.edges))
s = 'Number of edges is not equal.\n'
s += "- Is the data point's cell a redefined lattice of primitive cell?\n"
s += '- If relaxed structure is used, you may want to check structure or increase Gatol\n'
raise ValueError(s)
GM = iso.GraphMatcher(self.G, G, self._nm, self._em)
# Gets the isomorphic mapping. Also the most time consuming part of the code
ams = list(GM.isomorphisms_iter())
if not ams:
s = 'No isomorphism found.\n'
s += "- Is the data point's cell a redefined lattice of primitive cell?\n"
s += '- If relaxed structure is used, you may want to check structure or increase rtol\n'
raise ValueError(s)
rmsd = []
for am in ams: # Loop over isomorphism
# reconstruct graph after alinging point order
xyz = np.zeros((len(self.pos), 3))
for i in am:
xyz[i, :] = env['pos'][am[i], :]
rotation, _ = Rotation.align_vectors(self.pos, xyz)
R = rotation.as_matrix()
# RMSD
rmsd.append(
np.sqrt(
np.mean(
np.linalg.norm(np.dot(self.pos, R) - xyz, axis=1)**2)))
mini = np.argmin(rmsd)
minrmsd = rmsd[mini]
if minrmsd < self.tol:
return ams[mini]
else:
s = 'No isomorphism found.\n'
s += '-Consider increasing neighbor finding tolerance'
raise ValueError(s)
class _SiteEnvironments(object):
def __init__(self, site_envs: List[_SiteEnvironment], ns: int, na: int,
aos: List[str], eigen_tol: float, pbc: np.typing.ArrayLike,
cutoff: float):
"""
Initialize
Use Load to initialize this class.
Parameters
----------
site_envs : List[_SiteEnvironment]
list of _SiteEnvironment object
ns : int
number of spectator sites types
na : int
number of active sites types
aos : List[str]
Available occupational states for active sites
string should be the name of the occupancy. (consistent with the input data)
eigen_tol : float
tolerance for eigenanalysis of point group analysis in pymatgen.
pbc : ArrayLike
Boolean array, periodic boundary condition.
cutoff : float
Cutoff radius in angstrom for pooling sites to construct local environment
"""
self.site_envs = site_envs
self.unique_site_types: List[str] = [
env.sitetypes[0] for env in self.site_envs
]
self.ns = ns
self.na = na
self.aos = aos
self.eigen_tol = eigen_tol
self.pbc = pbc
self.cutoff = cutoff
def read_datum(self,
struct,
cutoff_factor: float = 1.1) -> Tuple[np.ndarray, np.ndarray]:
"""
Load structure data and return neighbor information
Parameters
----------
struct: : PymatgenStructure
Pymatgen Structure object of the surface configuration. It also requires
site_properties attribute with "Sitetypes"(Active or spectator site) and
"oss"(Species of Active site from the list of self.aos and "-1" for
spectator sites).
cutoff_factor : float
this is extra buffer factor multiplied to cutoff to
ensure pooling all relevant sites.
Return
------
XSites : List[float]
One hot encoding features of the site.
XNSs : List[List[int]]
Neighbors calculated in different permutations
"""
oss = [
species for species in struct.site_properties["oss"]
if species != '-1'
]
# Construct one hot encoding
XSites = np.zeros((len(oss), len(self.aos)))
for i, o in enumerate(oss):
XSites[i, self.aos.index(o)] = 1
# get mapping between all site index to active site index
alltoactive = {}
n = 0
for i, s in enumerate(struct.site_properties["SiteTypes"]):
if 'A' in s:
alltoactive[i] = n
n += 1
# Get Neighbors
# Read Data
site_envs = _get_SiteEnvironments(struct,
self.cutoff * cutoff_factor,
self.pbc,
get_permutations=False,
eigen_tol=self.eigen_tol)
XNSs: List[list] = [[] for _ in range(len(self.site_envs))]
for env in site_envs:
i = self.unique_site_types.index(env['sitetypes'][0])
new_env = self._truncate(self.site_envs[i], env)
# get map between two environment
mapping = self.site_envs[i].get_mapping(new_env)
# align input to the primitive cell (reference)
aligned_idx = [
new_env['env2config'][mapping[i]]
for i in range(len(new_env['env2config']))
]
# apply permutations
nni_perm = np.take(aligned_idx, self.site_envs[i].permutations)
# remove spectators
nni_perm = nni_perm[:, self.site_envs[i].activesiteidx]
# map it to active sites
nni_perm = np.vectorize(alltoactive.__getitem__)(nni_perm)
XNSs[i].append(nni_perm.tolist())
return np.array(XSites), np.array(XNSs)
@classmethod
def _truncate(cls, env_ref: _SiteEnvironment,
env: Dict[str, Any]) -> Dict[str, Any]:
"""
When cutoff_factor is used, it will pool more site than cutoff
factor specifies. This will rule out non-relevant sites by distance.
Parameters
----------
env_ref: _SiteEnvironment
Site information of the primitive cell
env: Dict[str, Any]
Site information of the data point
Returns
-------
env: Dict[str, Union[list, np.ndarray]]
"""
# Extract the right number of sites by distance
dists = defaultdict(list)
for i, s in enumerate(env['sitetypes']):
dists[s].append([i, env['dist'][i]])
for s in dists:
dists[s] = sorted(dists[s], key=lambda x: x[1])
siteidx = []
for s in dists:
siteidx += [i[0] for i in dists[s][:env_ref.formula[s]]]
siteidx = sorted(siteidx)
env['pos'] = [
env['pos'][i] for i in range(len(env['pos'])) if i in siteidx
]
env['pos'] = np.subtract(env['pos'], np.mean(env['pos'], 0))
env['sitetypes'] = [
env['sitetypes'][i]
for i in range(len(env['sitetypes']))
if i in siteidx
]
env['env2config'] = [env['env2config'][i] for i in siteidx]
del env['dist']
return env
def _load_primitive_cell(struct: PymatgenStructure,
aos: List[str],
pbc: List[bool],
ns: int,
na: int,
cutoff: float,
eigen_tol: float = 1e-5) -> _SiteEnvironments:
"""
This loads the primitive cell, along with all the permutations
required for creating a neighbor. This produces the site environments of
the primitive cell.
Parameters
----------
struct: PymatgenStructure
Pymatgen Structure object of the primitive cell used for calculating
neighbors from lattice transformations.It also requires site_properties
attribute with "Sitetypes"(Active or spectator site).
aos: List[str]
A list of all the active site species. For the Pt, N, NO configuration
set it as ['0', '1', '2'].
pbc: List[bool]
Periodic Boundary Condition
ns: int (default 1)
The number of spectator types elements. For "S1" its 1.
na: int (default 1)
The number of active types elements. For "A1" its 1.
cutoff: float (default 6.00)
Cutoff of radius for getting local environment.Only
used down to 2 digits.
eigen_tol : float (default)
tolerance for eigenanalysis of point group analysis in
pymatgen.
Returns
-------
SiteEnvironments: _SiteEnvironments
Instance of the _SiteEnvironments object
"""
site_envs = _get_SiteEnvironments(struct,
cutoff,
pbc,
True,
eigen_tol=eigen_tol)
site_envs_format = [
_SiteEnvironment(e['pos'], e['sitetypes'], e['env2config'],
e['permutations'], cutoff) for e in site_envs
]
ust = [env.sitetypes[0] for env in site_envs_format]
usi = np.unique(ust, return_index=True)[1]
site_envs_format = [site_envs_format[i] for i in usi]
return _SiteEnvironments(site_envs_format, ns, na, aos, eigen_tol, pbc,
cutoff)
def _get_SiteEnvironments(struct: PymatgenStructure,
cutoff: float,
PBC: np.typing.ArrayLike,
get_permutations: bool = True,
eigen_tol: float = 1e-5) -> List[Dict[str, Any]]:
"""
Used to extract information about both primitive cells and data points.
Extract local environments from Structure object by calculating neighbors
based on gaussian distance. For primitive cell, Different permutations of the
neighbors are calculated and will be later will mapped for data point in the
_SiteEnvironment.get_mapping() function.
site types ,
Parameters
----------
struct: PymatgenStructure
Pymatgen Structure object of the primitive cell used for calculating
neighbors from lattice transformations.It also requires site_properties
attribute with "Sitetypes"(Active or spectator site).
cutoff : float
cutoff distance in angstrom for collecting local
environment.
pbc : ArrayLike
Periodic boundary condition
get_permutations : bool (default True)
Whether to find permuted neighbor list or not.
eigen_tol : float (default 1e-5)
Tolerance for eigenanalysis of point group analysis in
pymatgen.
Returns
------
site_envs : List[Dict[str, Any]]
list of local_env class
"""
try:
from pymatgen.core import Molecule
from pymatgen.symmetry.analyzer import PointGroupAnalyzer
except:
raise ImportError("This class requires pymatgen to be installed.")
pbc = np.array(PBC)
structure = struct
neighbors = structure.get_all_neighbors(cutoff, include_index=True)
symbols = structure.species
site_idxs = [
i for i, sitetype in enumerate(structure.site_properties['SiteTypes'])
if sitetype == 'A1'
]
site_sym_map = {}
sym_site_map = {}
for i, new_ele in enumerate(structure.species):
sym_site_map[new_ele] = structure.site_properties['SiteTypes'][i]
site_sym_map[structure.site_properties['SiteTypes'][i]] = new_ele
site_envs = []
for site_idx in site_idxs:
local_env_sym = [symbols[site_idx]]
local_env_xyz = [structure[site_idx].coords]
local_env_dist = [0.0]
local_env_sitemap = [site_idx]
for n in neighbors[site_idx]:
# if PBC condition is fulfilled..
c = np.around(n[0].frac_coords, 10)
withinPBC = np.logical_and(0 <= c, c < 1)
if np.all(withinPBC[~pbc]):
local_env_xyz.append(n[0].coords)
local_env_sym.append(n[0].specie)
local_env_dist.append(n[1])
local_env_sitemap.append(n[2])
local_env_pos = np.subtract(local_env_xyz, np.mean(local_env_xyz, 0))
perm = []
if get_permutations:
finder = PointGroupAnalyzer(
Molecule(local_env_sym, local_env_pos), # type: ignore
eigen_tolerance=eigen_tol)
pg = finder.get_pointgroup()
for i, op in enumerate(pg):
newpos = op.operate_multi(local_env_pos)
perm.append(
np.argmin(cdist(local_env_pos, newpos), axis=1).tolist())
site_env = {
'pos': local_env_pos,
'sitetypes': [sym_site_map[s] for s in local_env_sym],
'env2config': local_env_sitemap,
'permutations': perm,
'dist': local_env_dist
}
site_envs.append(site_env)
return site_envs
<file_sep>import os
import pytest
import tempfile
import numpy as np
from deepchem.data import SDFLoader
from deepchem.feat import CoulombMatrix
from deepchem.utils import batch_coulomb_matrix_features
try:
import torch
from deepchem.models.torch_models import DTNN, DTNNModel
except ModuleNotFoundError:
pass
@pytest.mark.torch
def test_dtnn():
"""Tests DTNN for Shape and trainable parameter count.
- Used dataset files: qm9_mini.sdf, qm9_mini.sdf.csv (A subset of qm9 dataset.)
- Tasks selected are only of regression type.
"""
# Get Data
model_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dataset_file = os.path.join(model_dir, 'tests/assets/qm9_mini.sdf')
TASKS = ["alpha", "homo"]
loader = SDFLoader(tasks=TASKS, featurizer=CoulombMatrix(29), sanitize=True)
data = loader.create_dataset(dataset_file, shard_size=100)
inputs = batch_coulomb_matrix_features(data.X)
atom_number, distance, atom_membership, distance_membership_i, distance_membership_j = inputs
inputs = [
torch.tensor(atom_number).to(torch.int64),
torch.tensor(distance).to(torch.float32),
torch.tensor(atom_membership).to(torch.int64),
torch.tensor(distance_membership_i).to(torch.int64),
torch.tensor(distance_membership_j).to(torch.int64)
]
n_tasks = data.y.shape[0]
model = DTNN(n_tasks)
pred = model(inputs)
# Check Shape
assert pred.shape == (21, 21)
# Check number of parameters
assert len(list(model.parameters())) == 17
@pytest.mark.torch
def test_dtnn_model():
"""Tests DTNN Model for Shape and prediction.
- Used dataset files: qm9_mini.sdf, qm9_mini.sdf.csv (A subset of qm9 dataset.)
- Tasks selected are only of regression type.
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
dataset_file = os.path.join(current_dir, "assets/qm9_mini.sdf")
TASKS = ["alpha", "homo"]
loader = SDFLoader(tasks=TASKS, featurizer=CoulombMatrix(29), sanitize=True)
data = loader.create_dataset(dataset_file, shard_size=100)
model = DTNNModel(data.y.shape[1],
n_embedding=40,
n_distance=100,
learning_rate=0.8,
mode="regression")
model.fit(data, nb_epoch=1000)
# Eval model on train
pred = model.predict(data)
mean_rel_error = np.mean(np.abs(1 - pred / (data.y)))
assert mean_rel_error < 0.5
assert pred.shape == data.y.shape
@pytest.mark.torch
def test_dmpnn_model_reload():
"""Test DMPNNModel class for reloading the model"""
torch.manual_seed(0)
# load sample dataset
current_dir = os.path.dirname(os.path.abspath(__file__))
dataset_file = os.path.join(current_dir, "assets/qm9_mini.sdf")
TASKS = ["alpha", "homo"]
loader = SDFLoader(tasks=TASKS, featurizer=CoulombMatrix(29), sanitize=True)
data = loader.create_dataset(dataset_file, shard_size=100)
# initialize the model
model_dir = tempfile.mkdtemp()
model = DTNNModel(data.y.shape[1], model_dir=model_dir, batch_size=2)
# fit the model
model.fit(data, nb_epoch=10)
# reload the model
reloaded_model = DTNNModel(data.y.shape[1],
model_dir=model_dir,
batch_size=2)
reloaded_model.restore()
orignal_predict = model.predict(data)
reloaded_predict = reloaded_model.predict(data)
assert np.all(orignal_predict == reloaded_predict)
<file_sep>import torch
import pytorch_lightning as pl # noqa
class DCLightningModule(pl.LightningModule):
"""DeepChem Lightning Module to be used with Lightning trainer.
TODO: Add dataloader, example code and fit, once datasetmodule
is ready
The lightning module is a wrapper over deepchem's torch model.
This module directly works with pytorch lightning trainer
which runs training for multiple epochs and also is responsible
for setting up and training models on multiple GPUs.
https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.core.LightningModule.html?highlight=LightningModule
Notes
-----
This class requires PyTorch to be installed.
"""
def __init__(self, dc_model):
"""Create a new DCLightningModule.
Parameters
----------
dc_model: deepchem.models.torch_models.torch_model.TorchModel
TorchModel to be wrapped inside the lightning module.
"""
super().__init__()
self.dc_model = dc_model
self.pt_model = self.dc_model.model
self.loss = self.dc_model._loss_fn
def configure_optimizers(self):
return self.dc_model.optimizer._create_pytorch_optimizer(
self.pt_model.parameters(),)
def training_step(self, batch, batch_idx):
"""Perform a training step.
Parameters
----------
batch: A tensor, tuple or list.
batch_idx: Integer displaying index of this batch
optimizer_idx: When using multiple optimizers, this argument will also be present.
Returns
-------
loss_outputs: outputs of losses.
"""
batch = batch.batch_list
inputs, labels, weights = self.dc_model._prepare_batch(batch)
if isinstance(inputs, list):
assert len(inputs) == 1
inputs = inputs[0]
outputs = self.pt_model(inputs)
if isinstance(outputs, torch.Tensor):
outputs = [outputs]
if self.dc_model._loss_outputs is not None:
outputs = [outputs[i] for i in self.dc_model._loss_outputs]
loss_outputs = self.loss(outputs, labels, weights)
self.log(
"train_loss",
loss_outputs,
on_epoch=True,
sync_dist=True,
reduce_fx="mean",
prog_bar=True,
batch_size=self.dc_model.batch_size,
)
return loss_outputs
<file_sep>from __future__ import annotations
from abc import abstractmethod
from typing import Union
import torch
import numpy as np
from dqc.qccalc.ks import KS
from dqc.utils.datastruct import SpinParam
from deepchem.feat.dft_data import DFTEntry, DFTSystem
from deepchem.utils.dftutils import KSCalc, hashstr
from deepchem.models.dft.nnxc import BaseNNXC, HybridXC
class XCNNSCF(torch.nn.Module):
"""
Exchange Correlation Neural Network - Self Consistent Iterations
In the Kohn-Sham theory, the inclusion of the noninteracting kinetic energy functional results in a set of one-particle equations with Kohn-Sham
orbitals as their solutions after functional differentiation. It is a
variational approach that determines the lowest energy and the related
molecular orbitals and orbital energies by using the electron-electron
interaction potential. To learn more about Density Functional Theory
and the Kohn-Sham approach please use the references below.
The XCNNSCF is used for performing self-consistent iterations. The
XC functional in the Kohn-Sham model implementation is replaced by a
neural network.
Examples
--------
>>> from deepchem.models.dft.scf import XCNNSCF
>>> import torch
>>> from deepchem.feat.dft_data import DFTEntry, DFTSystem
>>> from deepchem.models.dft.nnxc import HybridXC
>>> nnmodel = (torch.nn.Sequential(
... torch.nn.Linear(2, 10),
... torch.nn.Tanh(),
... torch.nn.Linear(10, 1))).to(torch.double)
>>> e_type = 'dm'
>>> true_val = 'deepchem/feat/tests/data/dftHF_output.npy'
>>> systems = [{
>>> 'moldesc': 'H 0.86625 0 0; F -0.86625 0 0',
>>> 'basis': '6-311++G(3df,3pd)'
>>> }]
>>> entry = DFTEntry.create(e_type, true_val, systems)
>>> evl = XCNNSCF(hybridxc, entry)
>>> system = DFTSystem(systems[0])
>>> run = evl.run(system)
>>> output = run.energy()
Notes
-----
This code is derived from https://github.com/mfkasim1/xcnn/blob/f2cb9777da2961ac553f256ecdcca3e314a538ca/xcdnn2/evaluator.py
References
----------
deepchem.models.dft.nnxc
<NAME>. and <NAME>., 1965. Self-consistent equations including
exchange and correlation effects. Physical review, 140(4A), p.A1133.
"""
def __init__(self, xc: Union[BaseNNXC, HybridXC], entry: DFTEntry):
super().__init__()
"""
Parameters
----------
xc: Union[BaseNNXC, HybridXC]
exchange correlation functional that has been replaced by a
neural network.
entry: DFTEntry
"""
self.xc = xc
@abstractmethod
def get_xc(self) -> HybridXC:
"""
Returns
-------
Exchange correlation functional that has been replaced by a
neural network, based on a BaseNNXC model.
"""
return self.xc
@abstractmethod
def run(self, system: DFTSystem) -> KSCalc:
"""
Kohn Sham Model
This method runs the Quantum Chemistry calculation (Differentiable
DFT) of the given system and returns the post-run object. This method
starts with an intial density matrix, the new density matrix can be
obtained from the post-run object.
Parameters
----------
system: DFTSystem
Returns
-------
KSCalc object
"""
dm0, dmname = self._get_dm0(system)
mol = system.get_dqc_mol()
qc = KS(mol, xc=self.xc).run(dm0=dm0, bck_options={"max_niter": 50})
return KSCalc(qc)
def _dm0_name(self, obj) -> str:
"""
Returns
-------
dm0 followed by the name of the system
"""
return "dm0_" + hashstr(str(obj))
def _get_dm0(self, system: DFTSystem):
"""
This method calculates and retuns the density matrix of a system.
The matrix will vary depending on the atomic numbers, positions, and
spins.
Parameters
----------
system: DFTSystem
"""
dm_name = self._dm0_name(system)
dm0: torch.Tensor
get_dm = np.array(getattr(self, dm_name, None), dtype=bool)
dm0 = torch.Tensor(get_dm)
dm_exists = dm0 is not None
dm_written = dm_exists and torch.any(dm0 != 0.0)
if not dm_written:
dm0_res: Union[None, torch.Tensor, SpinParam[torch.Tensor]] = None
elif system.get_dqc_mol().spin != 0:
dm0_res = SpinParam(u=dm0[0].detach(), d=dm0[1].detach())
else:
dm0_res = dm0
return dm0_res, (dm_name if dm_exists else None)
<file_sep>"""
Testing construction of graph models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import unittest
import tensorflow as tf
import deepchem as dc
from tensorflow.python.framework import test_util
from deepchem.models.tf_new_models.graph_models import SequentialGraph
from deepchem.models.tf_new_models.graph_models import SequentialSupportGraph
class TestGraphModels(test_util.TensorFlowTestCase):
"""
Test Container usage.
"""
def setUp(self):
super(TestGraphModels, self).setUp()
self.root = '/tmp'
def test_sequential_graph_model(self):
"""Simple test that SequentialGraph can be initialized."""
n_atoms = 5
n_feat = 10
batch_size = 3
graph_model = SequentialGraph(n_feat)
assert len(graph_model.layers) == 0
def test_sample_sequential_architecture(self):
"""Tests that a representative architecture can be created."""
n_atoms = 5
n_feat = 10
batch_size = 3
graph_model = SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
## Gather Projection
#graph_model.add(dc.nn.Dense(128, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
# There should be 8 layers in graph_model
#assert len(graph_model.layers) == 6
assert len(graph_model.layers) == 5
def test_sample_attn_lstm_architecture(self):
"""Tests that an attention architecture can be created without crash."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
batch_size = 3
support_model = SequentialSupportGraph(n_feat)
# Add layers
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
# Apply an attention lstm layer
support_model.join(
dc.nn.AttnLSTMEmbedding(n_test, n_support, 64, max_depth))
# Gather Projection
support_model.add(dc.nn.Dense(128, 64))
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
def test_sample_resi_lstm_architecture(self):
"""Tests that an attention architecture can be created without crash."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
batch_size = 3
support_model = SequentialSupportGraph(n_feat)
# Add layers
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
# Apply an attention lstm layer
support_model.join(
dc.nn.ResiLSTMEmbedding(n_test, n_support, 64, max_depth))
# Gather Projection
support_model.add(dc.nn.Dense(128, 64))
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
<file_sep>import pytest
from os.path import join, realpath, dirname
@pytest.mark.torch
def test_featurize():
"""Test that BertFeaturizer.featurize() correctly featurizes all sequences,
correctly outputs input_ids and attention_mask."""
from deepchem.feat.bert_tokenizer import BertFeaturizer
from transformers import BertTokenizerFast
sequences = [
'[CLS] D L I P T S S K L V [SEP]', '[CLS] V K K A F F A L V T [SEP]'
]
sequence_long = ['[CLS] D L I P T S S K L V V K K A F F A L V T [SEP]']
tokenizer = BertTokenizerFast.from_pretrained("Rostlab/prot_bert",
do_lower_case=False)
featurizer = BertFeaturizer(tokenizer)
feats = featurizer(sequences)
long_feat = featurizer(sequence_long)
assert (len(feats) == 2)
assert (all([len(f) == 3 for f in feats]))
assert (len(long_feat) == 1)
assert (len(long_feat[0] == 2))
@pytest.mark.torch
def test_loading():
"""Test that the FASTA loader can load with this featurizer."""
from transformers import BertTokenizerFast
from deepchem.feat.bert_tokenizer import BertFeaturizer
from deepchem.data.data_loader import FASTALoader
tokenizer = BertTokenizerFast.from_pretrained("Rostlab/prot_bert",
do_lower_case=False)
featurizer = BertFeaturizer(tokenizer)
loader = FASTALoader(featurizer=featurizer,
legacy=False,
auto_add_annotations=True)
file_loc = realpath(__file__)
directory = dirname(file_loc)
data = loader.create_dataset(
input_files=join(directory, "data/uniprot_truncated.fasta"))
assert data.X.shape == (61, 3, 5)
<file_sep>"""
Test reload for trained models.
"""
import os
import pytest
import tempfile
import numpy as np
import deepchem as dc
import scipy.io
from flaky import flaky
from sklearn.ensemble import RandomForestClassifier
from deepchem.molnet.load_function.chembl25_datasets import CHEMBL25_TASKS
from deepchem.feat import create_char_to_idx
try:
import tensorflow as tf
has_tensorflow = True
except:
has_tensorflow = False
try:
import torch # noqa: F401
has_torch = True
except:
has_torch = False
def test_sklearn_classifier_reload():
"""Test that trained model can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model_dir = tempfile.mkdtemp()
model = dc.models.SklearnModel(sklearn_model, model_dir)
# Fit trained model
model.fit(dataset)
model.save()
# Load trained model
reloaded_model = dc.models.SklearnModel(None, model_dir)
reloaded_model.reload()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@pytest.mark.torch
def test_multitaskregressor_reload():
"""Test that MultitaskRegressor can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
learning_rate=0.003,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
learning_rate=0.003,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
@pytest.mark.torch
def test_multitaskclassification_reload():
"""Test that MultitaskClassifier can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskClassifier(n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=dc.models.optimizers.Adam(
learning_rate=0.0003,
beta1=0.9,
beta2=0.999),
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Reload trained model
reloaded_model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=dc.models.optimizers.Adam(learning_rate=0.0003,
beta1=0.9,
beta2=0.999),
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@pytest.mark.torch
def test_residual_classification_reload():
"""Test that a residual network can reload correctly."""
n_samples = 10
n_features = 5
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskClassifier(n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=500)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Reload trained model
reloaded_model = dc.models.MultitaskClassifier(n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@pytest.mark.tensorflow
def test_robust_multitask_classification_reload():
"""Test robust multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score,
task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.RobustMultitaskClassifier(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=25)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Reloaded Trained Model
reloaded_model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@pytest.mark.tensorflow
def test_atomic_conv_model_reload():
from deepchem.models.atomic_conv import AtomicConvModel
from deepchem.data import NumpyDataset
model_dir = tempfile.mkdtemp()
batch_size = 1
N_atoms = 5
acm = AtomicConvModel(n_tasks=1,
batch_size=batch_size,
layer_sizes=[
1,
],
frag1_num_atoms=5,
frag2_num_atoms=5,
complex_num_atoms=10,
model_dir=model_dir)
features = []
frag1_coords = np.random.rand(N_atoms, 3)
frag1_nbr_list = {0: [], 1: [], 2: [], 3: [], 4: []}
frag1_z = np.random.randint(10, size=(N_atoms))
frag2_coords = np.random.rand(N_atoms, 3)
frag2_nbr_list = {0: [], 1: [], 2: [], 3: [], 4: []}
frag2_z = np.random.randint(10, size=(N_atoms))
system_coords = np.random.rand(2 * N_atoms, 3)
system_nbr_list = {
0: [],
1: [],
2: [],
3: [],
4: [],
5: [],
6: [],
7: [],
8: [],
9: []
}
system_z = np.random.randint(10, size=(2 * N_atoms))
features.append(
(frag1_coords, frag1_nbr_list, frag1_z, frag2_coords, frag2_nbr_list,
frag2_z, system_coords, system_nbr_list, system_z))
features = np.asarray(features)
labels = np.random.rand(batch_size)
dataset = NumpyDataset(features, labels)
acm.fit(dataset, nb_epoch=1)
reloaded_model = AtomicConvModel(n_tasks=1,
batch_size=batch_size,
layer_sizes=[
1,
],
frag1_num_atoms=5,
frag2_num_atoms=5,
complex_num_atoms=10,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
origpred = acm.predict(dataset)
reloadpred = reloaded_model.predict(dataset)
assert np.all(origpred == reloadpred)
@pytest.mark.tensorflow
def test_normalizing_flow_model_reload():
"""Test that NormalizingFlowModel can be reloaded correctly."""
from deepchem.models.normalizing_flows import NormalizingFlow, NormalizingFlowModel
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
model_dir = tempfile.mkdtemp()
Made = tfb.AutoregressiveNetwork(params=2,
hidden_units=[512, 512],
activation='relu',
dtype='float64')
flow_layers = [tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=Made)]
# 3D Multivariate Gaussian base distribution
nf = NormalizingFlow(base_distribution=tfd.MultivariateNormalDiag(
loc=np.zeros(2), scale_diag=np.ones(2)),
flow_layers=flow_layers)
nfm = NormalizingFlowModel(nf, model_dir=model_dir)
target_distribution = tfd.MultivariateNormalDiag(loc=np.array([1., 0.]))
dataset = dc.data.NumpyDataset(X=target_distribution.sample(96))
_ = nfm.fit(dataset, nb_epoch=1)
x = np.zeros(2)
lp1 = nfm.flow.log_prob(x).numpy()
assert nfm.flow.sample().numpy().shape == (2,)
reloaded_model = NormalizingFlowModel(nf, model_dir=model_dir)
reloaded_model.restore()
# Check that reloaded model can sample from the distribution
assert reloaded_model.flow.sample().numpy().shape == (2,)
lp2 = reloaded_model.flow.log_prob(x).numpy()
# Check that density estimation is same for reloaded model
assert np.all(lp1 == lp2)
@pytest.mark.tensorflow
def test_robust_multitask_regressor_reload():
"""Test that RobustMultitaskRegressor can be reloaded correctly."""
n_tasks = 10
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.RobustMultitaskRegressor(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
@pytest.mark.tensorflow
def test_IRV_multitask_classification_reload():
"""Test IRV classifier can be reloaded."""
n_tasks = 5
n_samples = 10
n_features = 128
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.randint(2, size=(n_samples, n_features))
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
IRV_transformer = dc.trans.IRVTransformer(5, n_tasks, dataset)
dataset_trans = IRV_transformer.transform(dataset)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score,
task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskIRVClassifier(n_tasks,
K=5,
learning_rate=0.01,
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset_trans)
# Eval model on train
scores = model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
# Reload Trained Model
reloaded_model = dc.models.MultitaskIRVClassifier(n_tasks,
K=5,
learning_rate=0.01,
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.random(dataset_trans.X.shape)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
@pytest.mark.tensorflow
def test_progressive_classification_reload():
"""Test progressive multitask can reload."""
np.random.seed(123)
n_tasks = 5
n_samples = 10
n_features = 6
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score,
task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.ProgressiveMultitaskClassifier(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=400)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .85
# Reload Trained Model
reloaded_model = dc.models.ProgressiveMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .85
@pytest.mark.tensorflow
def test_progressivemultitaskregressor_reload():
"""Test that ProgressiveMultitaskRegressor can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.ProgressiveMultitaskRegressor(n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
@pytest.mark.tensorflow
def test_DAG_regression_reload():
"""Test DAG regressor reloads."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
mols = [
"CC", "CCO", "CC", "CCC", "CCCCO", "CO", "CC", "CCCCC", "CCC", "CCCO"
]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.rand(n_samples, n_tasks)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score,
task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
model_dir = tempfile.mkdtemp()
model = dc.models.DAGModel(n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .1
reloaded_model = dc.models.DAGModel(n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
predset = transformer.transform(predset)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .1
@flaky
@pytest.mark.tensorflow
def test_weave_classification_reload():
"""Test weave model can be reloaded."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
mols = ["CC", "CCCCC", "CCCCC", "CCC", "COOO", "COO", "OO"]
X = featurizer(mols)
y = [1, 1, 1, 1, 0, 0, 0]
dataset = dc.data.NumpyDataset(X, y)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
batch_size = 5
model_dir = tempfile.mkdtemp()
model = dc.models.WeaveModel(n_tasks,
batch_size=batch_size,
learning_rate=0.01,
mode="classification",
dropouts=0.0,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloaded_model = dc.models.WeaveModel(n_tasks,
batch_size=batch_size,
learning_rate=0.003,
mode="classification",
dropouts=0.0,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
@pytest.mark.tensorflow
def test_MPNN_regression_reload():
"""Test MPNN can reload datasets."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
mols = ["C", "CO", "CC"]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.rand(n_samples, n_tasks)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score,
task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
batch_size = 10
model_dir = tempfile.mkdtemp()
model = dc.models.MPNNModel(n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=2,
M=3,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=50)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
# Reload trained model
reloaded_model = dc.models.MPNNModel(n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=2,
M=3,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
reloaded_model.restore()
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
@pytest.mark.tensorflow
def test_textCNN_classification_reload():
"""Test textCNN model reloadinng."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
featurizer = dc.feat.RawFeaturizer()
mols = ["C", "CO", "CC"]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.randint(2, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, ids=mols)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
char_dict, length = dc.models.TextCNNModel.build_char_dict(dataset)
batch_size = 3
model_dir = tempfile.mkdtemp()
model = dc.models.TextCNNModel(n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
# Reload trained model
reloaded_model = dc.models.TextCNNModel(n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification",
model_dir=model_dir)
reloaded_model.restore()
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
assert len(reloaded_model.model.get_weights()) == len(
model.model.get_weights())
for (reloaded, orig) in zip(reloaded_model.model.get_weights(),
model.model.get_weights()):
assert np.all(reloaded == orig)
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred, ids=predmols)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
assert len(model.model.layers) == len(reloaded_model.model.layers)
@pytest.mark.torch
def test_1d_cnn_regression_reload():
"""Test that a 1D CNN can reload."""
n_samples = 10
n_features = 3
n_tasks = 1
np.random.seed(123)
X = np.random.rand(n_samples, 10, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.CNN(n_tasks,
n_features,
dims=1,
dropouts=0,
kernel_size=3,
mode='regression',
learning_rate=0.003,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
# Reload trained model
reloaded_model = dc.models.CNN(n_tasks,
n_features,
dims=1,
dropouts=0,
kernel_size=3,
mode='regression',
learning_rate=0.003,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, 10, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
@pytest.mark.tensorflow
def test_graphconvmodel_reload():
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
mols = ["C", "CO", "CC"]
X = featurizer(mols)
y = np.array([0, 1, 0])
dataset = dc.data.NumpyDataset(X, y)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
batch_size = 10
model_dir = tempfile.mkdtemp()
model = dc.models.GraphConvModel(len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='classification',
model_dir=model_dir)
model.fit(dataset, nb_epoch=10)
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] >= 0.6
# Reload trained Model
reloaded_model = dc.models.GraphConvModel(len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='classification',
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.allclose(origpred, reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
@pytest.mark.tensorflow
def test_chemception_reload():
"""Test that chemception models can be saved and reloaded."""
img_size = 80
img_spec = "engd"
res = 0.5
n_tasks = 1
featurizer = dc.feat.SmilesToImage(img_size=img_size,
img_spec=img_spec,
res=res)
data_points = 10
mols = ["CCCCCCCC"] * data_points
X = featurizer(mols)
y = np.random.randint(0, 2, size=(data_points, n_tasks))
w = np.ones(shape=(data_points, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, mols)
_ = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
model_dir = tempfile.mkdtemp()
model = dc.models.ChemCeption(n_tasks=n_tasks,
img_spec="engd",
model_dir=model_dir,
mode="classification")
model.fit(dataset, nb_epoch=3)
# Reload Trained Model
reloaded_model = dc.models.ChemCeption(n_tasks=n_tasks,
img_spec="engd",
model_dir=model_dir,
mode="classification")
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# TODO: This test is a little awkward. The Smiles2Vec model awkwardly depends on a dataset_file being available on disk. This needs to be cleaned up to match the standard model handling API.
@pytest.mark.tensorflow
def test_smiles2vec_reload():
"""Test that smiles2vec models can be saved and reloaded."""
dataset_file = os.path.join(os.path.dirname(__file__), "assets",
"chembl_25_small.csv")
max_len = 250
pad_len = 10
max_seq_len = 20
char_to_idx = create_char_to_idx(dataset_file,
max_len=max_len,
smiles_field="smiles")
feat = dc.feat.SmilesToSeq(char_to_idx=char_to_idx,
max_len=max_len,
pad_len=pad_len)
n_tasks = 5
data_points = 10
loader = dc.data.CSVLoader(tasks=CHEMBL25_TASKS,
smiles_field='smiles',
featurizer=feat)
dataset = loader.create_dataset(inputs=[dataset_file],
shard_size=10000,
data_dir=tempfile.mkdtemp())
y = np.random.randint(0, 2, size=(data_points, n_tasks))
w = np.ones(shape=(data_points, n_tasks))
dataset = dc.data.NumpyDataset(dataset.X[:data_points, :max_seq_len], y, w,
dataset.ids[:data_points])
_ = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
model_dir = tempfile.mkdtemp()
model = dc.models.Smiles2Vec(char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=model_dir,
mode="classification")
model.fit(dataset, nb_epoch=3)
# Reload Trained Model
reloaded_model = dc.models.Smiles2Vec(char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=model_dir,
mode="classification")
reloaded_model.restore()
# Check predictions match on original dataset
origpred = model.predict(dataset)
reloadpred = reloaded_model.predict(dataset)
assert np.all(origpred == reloadpred)
# TODO: We need a cleaner usage example for this
@pytest.mark.tensorflow
def test_DTNN_regression_reload():
"""Test DTNN can reload datasets."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "assets/example_DTNN.mat")
dataset = scipy.io.loadmat(input_file)
X = dataset['X']
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
n_tasks = y.shape[1]
model_dir = tempfile.mkdtemp()
model = dc.models.DTNNModel(n_tasks,
n_embedding=20,
n_distance=100,
learning_rate=1.0,
model_dir=model_dir,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=250)
reloaded_model = dc.models.DTNNModel(n_tasks,
n_embedding=20,
n_distance=100,
learning_rate=1.0,
model_dir=model_dir,
mode="regression")
reloaded_model.restore()
# Check predictions match on random sample
origpred = model.predict(dataset)
reloadpred = reloaded_model.predict(dataset)
assert np.all(origpred == reloadpred)
def generate_sequences(sequence_length, num_sequences):
for i in range(num_sequences):
seq = [
np.random.randint(10)
for x in range(np.random.randint(1, sequence_length + 1))
]
yield (seq, seq)
@pytest.mark.tensorflow
def test_seq2seq_reload():
"""Test reloading for seq2seq models."""
sequence_length = 8
tokens = list(range(10))
model_dir = tempfile.mkdtemp()
s = dc.models.SeqToSeq(tokens,
tokens,
sequence_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=150,
learning_rate=0.01,
dropout=0.1,
model_dir=model_dir)
# Train the model on random sequences. We aren't training long enough to
# really make it reliable, but I want to keep this test fast, and it should
# still be able to reproduce a reasonable fraction of input sequences.
s.fit_sequences(generate_sequences(sequence_length, 25000))
# Test it out.
tests = [seq for seq, target in generate_sequences(sequence_length, 50)]
pred1 = s.predict_from_sequences(tests, beam_width=1)
pred4 = s.predict_from_sequences(tests, beam_width=4)
reloaded_s = dc.models.SeqToSeq(tokens,
tokens,
sequence_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=150,
learning_rate=0.01,
dropout=0.1,
model_dir=model_dir)
reloaded_s.restore()
reloaded_pred1 = reloaded_s.predict_from_sequences(tests, beam_width=1)
assert len(pred1) == len(reloaded_pred1)
for (p1, r1) in zip(pred1, reloaded_pred1):
assert p1 == r1
reloaded_pred4 = reloaded_s.predict_from_sequences(tests, beam_width=4)
assert len(pred4) == len(reloaded_pred4)
for (p4, r4) in zip(pred4, reloaded_pred4):
assert p4 == r4
embeddings = s.predict_embeddings(tests)
pred1e = s.predict_from_embeddings(embeddings, beam_width=1)
pred4e = s.predict_from_embeddings(embeddings, beam_width=4)
reloaded_embeddings = reloaded_s.predict_embeddings(tests)
reloaded_pred1e = reloaded_s.predict_from_embeddings(reloaded_embeddings,
beam_width=1)
reloaded_pred4e = reloaded_s.predict_from_embeddings(reloaded_embeddings,
beam_width=4)
assert np.all(embeddings == reloaded_embeddings)
assert len(pred1e) == len(reloaded_pred1e)
for (p1e, r1e) in zip(pred1e, reloaded_pred1e):
assert p1e == r1e
assert len(pred4e) == len(reloaded_pred4e)
for (p4e, r4e) in zip(pred4e, reloaded_pred4e):
assert p4e == r4e
<file_sep>Licensing and Commercial Uses
=============================
DeepChem is licensed under the MIT License. We actively support
commercial users. Note that any novel molecules, materials, or other
discoveries powered by DeepChem belong entirely to the user and not to
DeepChem developers.
That said, we would very much appreciate a citation if you find our tools useful.
You can cite DeepChem with the following reference.
.. code-block::
@book{Ramsundar-et-al-2019,
title={Deep Learning for the Life Sciences},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
publisher={O'Reilly Media},
note={\url{https://www.amazon.com/Deep-Learning-Life-Sciences-Microscopy/dp/1492039837}},
year={2019}
}
<file_sep>'''
"""
Sanity tests on progressive models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
from tensorflow.python.framework import test_util
class TestProgressive(test_util.TensorFlowTestCase):
"""
Test that progressive models satisfy basic sanity checks.
"""
def setUp(self):
super(TestProgressive, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_construction(self):
"""Test that progressive models can be constructed without crash."""
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=1,
n_features=100,
alpha_init_stddevs=[.08],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=200)
def test_fit(self):
"""Test that progressive models can fit without crash."""
n_tasks = 2
n_samples = 10
n_features = 100
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=n_tasks,
n_features=n_features,
alpha_init_stddevs=[.08],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=2)
prog_model.fit(dataset)
def test_fit_lateral(self):
"""Test that multilayer model fits correctly.
Lateral connections and adapters are only added for multilayer models. Test
that fit functions with multilayer models.
"""
n_tasks = 2
n_samples = 10
n_features = 100
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_layers = 3
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=n_tasks,
n_features=n_features,
alpha_init_stddevs=[.08] * n_layers,
layer_sizes=[100] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
dropouts=[0.] * n_layers,
learning_rate=0.003,
batch_size=2)
prog_model.fit(dataset)
def test_fit_lateral_multi(self):
"""Test that multilayer model fits correctly.
Test multilayer model with multiple tasks (> 2) to verify that lateral
connections of growing size work correctly.
"""
n_tasks = 3
n_samples = 10
n_features = 100
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_layers = 3
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=n_tasks,
n_features=n_features,
alpha_init_stddevs=[.08] * n_layers,
layer_sizes=[100] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
dropouts=[0.] * n_layers,
learning_rate=0.003,
batch_size=2)
prog_model.fit(dataset)
def test_frozen_weights(self):
"""Test that fitting one task doesn't change predictions of another.
Tests that weights are frozen when training different tasks.
"""
n_tasks = 2
n_samples = 10
n_features = 100
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_layers = 3
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=n_tasks,
n_features=n_features,
alpha_init_stddevs=[.08] * n_layers,
layer_sizes=[100] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
dropouts=[0.] * n_layers,
learning_rate=0.003,
batch_size=2)
# Fit just on task zero
# Notice that we keep the session open
prog_model.fit(dataset, tasks=[0], close_session=False)
y_pred_task_zero = prog_model.predict(dataset)[:, 0]
# Fit on task one
prog_model.fit(dataset, tasks=[1])
y_pred_task_zero_after = prog_model.predict(dataset)[:, 0]
# The predictions for task zero should not change after training
# on task one.
np.testing.assert_allclose(y_pred_task_zero, y_pred_task_zero_after)
'''
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import deepchem as dc
import json
import numpy as np
import tensorflow as tf
from deepchem.models.atomic_conv import atomic_conv_model
sys.path.append("../../models")
from deepchem.models.tensorgraph.layers import Layer, Feature, Label, L2Loss, AtomicConvolution, Transpose, Dense
from deepchem.models import TensorGraph
import numpy as np
import tensorflow as tf
import itertools
import time
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
base_dir = os.getcwd()
batch_size = 24
splits = ["random", "scaffold", "stratified", "temporal"]
def params():
d2 = {
"name":
"hyper5",
"radial": [[
1.5, 2.5, 3.5, 4.5, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0,
10.5
], [0.0, 2.0, 4.0], [0.05]],
"layer_sizes": [32, 16, 8],
"learning_rate":
0.001,
"epochs":
10,
}
yield d2
d2 = {
"name":
"hyper6",
"radial": [[
1.5, 2.0, 2.5, 3.5, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0,
9.5, 10.0, 10.5
], [0.0, 2.0, 4.0], [0.1]],
"layer_sizes": [32, 32, 16],
"learning_rate":
0.001,
"epochs":
10,
}
yield d2
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
for split in splits:
data_dir = os.path.join(base_dir, "datasets")
train_dir = os.path.join(data_dir, "%s_train" % split)
test_dir = os.path.join(data_dir, "%s_test" % split)
train_dataset = dc.data.DiskDataset(train_dir)
test_dataset = dc.data.DiskDataset(test_dir)
pdbbind_tasks = ["-logKd/Ki"]
transformers = []
y_train = train_dataset.y
y_train *= -1 * 2.479 / 4.184
train_dataset = dc.data.DiskDataset.from_numpy(
train_dataset.X,
y_train,
train_dataset.w,
train_dataset.ids,
tasks=pdbbind_tasks)
y_test = test_dataset.y
y_test *= -1 * 2.479 / 4.184
test_dataset = dc.data.DiskDataset.from_numpy(
test_dataset.X,
y_test,
test_dataset.w,
test_dataset.ids,
tasks=pdbbind_tasks)
for param in params():
num_epochs = param['epochs']
del param['epochs']
name = param['name']
del param['name']
tg, feed_dict_generator, label = atomic_conv_model(**param)
tg.fit_generator(
feed_dict_generator(train_dataset, batch_size, epochs=num_epochs))
test_evaluator = dc.utils.evaluate.GeneratorEvaluator(
tg, feed_dict_generator(test_dataset, batch_size), transformers,
[label])
test_scores = test_evaluator.compute_model_performance(metric)
param.update(test_scores)
param['epochs'] = num_epochs
param['split'] = split
param['name'] = name
print("Results")
print(param)
with open('hyper_evaluation.txt', 'a') as fout:
fout.write(json.dumps(param))
fout.write("\n")
<file_sep>"""
Atomic coordinate featurizer.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import numpy as np
from rdkit import Chem
from deepchem.feat import Featurizer
from deepchem.feat import ComplexFeaturizer
from deepchem.utils import pad_array
def get_cells(coords, neighbor_cutoff):
"""Computes cells given molecular coordinates.
Parameters
----------
coords: np.array
Cartesian coordaintes [Angstrom]
neighbor_cutoff: float
Threshold distance [Angstroms] for counting neighbors.
Returns
-------
x_bins: list
List contains tuples of x_cell boundaries
y_bins: list
List contains tuples of y_cell boundaries
z_bins: list
List contains tuples of z_cell boundaries
"""
x_max, x_min = np.amax(coords[:, 0]), np.amin(coords[:, 0])
y_max, y_min = np.amax(coords[:, 1]), np.amin(coords[:, 1])
z_max, z_min = np.amax(coords[:, 2]), np.amin(coords[:, 2])
# Compute cells for this molecule. O(constant)
x_bins, y_bins, z_bins = [], [], []
x_current, y_current, z_current = x_min, y_min, z_min
# min == max if molecule is planar in some direction
# we should still create a bin
if not x_min == x_max:
while x_current < x_max:
x_bins.append((x_current, x_current + neighbor_cutoff))
x_current += neighbor_cutoff
else:
x_bins.append((x_current, x_current + neighbor_cutoff))
if not y_min == y_max:
while y_current < y_max:
y_bins.append((y_current, y_current + neighbor_cutoff))
y_current += neighbor_cutoff
else:
y_bins.append((y_current, y_current + neighbor_cutoff))
if not z_min == z_max:
while z_current < z_max:
z_bins.append((z_current, z_current + neighbor_cutoff))
z_current += neighbor_cutoff
else:
z_bins.append((z_current, z_current + neighbor_cutoff))
return x_bins, y_bins, z_bins
def put_atoms_in_cells(coords, x_bins, y_bins, z_bins):
"""Place each atom into cells. O(N) runtime.
Parameters
----------
coords: np.ndarray
(N, 3) array where N is number of atoms
x_bins: list
List of (cell_start, cell_end) for x-coordinate
y_bins: list
List of (cell_start, cell_end) for y-coordinate
z_bins: list
List of (cell_start, cell_end) for z-coordinate
Returns
-------
cell_to_atoms: dict
Dict elements contain atom indices for cell
atom_to_cell: dict
Dict elements contain cell indices for atom
"""
N = coords.shape[0]
cell_to_atoms = {}
atom_to_cell = {}
for x_ind in range(len(x_bins)):
for y_ind in range(len(y_bins)):
for z_ind in range(len(z_bins)):
cell_to_atoms[(x_ind, y_ind, z_ind)] = []
for atom in range(N):
x_coord, y_coord, z_coord = coords[atom]
x_ind, y_ind, z_ind = None, None, None
for ind, (x_cell_min, x_cell_max) in enumerate(x_bins):
if x_coord >= x_cell_min and x_coord <= x_cell_max:
x_ind = ind
break
if x_ind is None:
raise ValueError("No x-cell found!")
for ind, (y_cell_min, y_cell_max) in enumerate(y_bins):
if y_coord >= y_cell_min and y_coord <= y_cell_max:
y_ind = ind
break
if y_ind is None:
raise ValueError("No y-cell found!")
for ind, (z_cell_min, z_cell_max) in enumerate(z_bins):
if z_coord >= z_cell_min and z_coord <= z_cell_max:
z_ind = ind
break
if z_ind is None:
raise ValueError("No z-cell found!")
cell_to_atoms[(x_ind, y_ind, z_ind)].append(atom)
atom_to_cell[atom] = (x_ind, y_ind, z_ind)
return cell_to_atoms, atom_to_cell
def compute_neighbor_cell_map(N_x, N_y, N_z):
"""Compute neighbors of cells in grid.
Parameters
----------
N_x: int
Number of grid cells in x-dimension.
N_y: int
Number of grid cells in y-dimension.
N_z: int
Number of grid cells in z-dimension.
Returns
-------
neighbor_cell_map: dict
Dict elements contain neighbor cell indices
"""
#TODO(JSG): Implement non-PBC version. For now this seems fine ..
neighbor_cell_map = {}
for x_ind in range(N_x):
for y_ind in range(N_y):
for z_ind in range(N_z):
neighbors = []
offsets = [-1, 0, +1]
# Note neighbors contains self!
for x_offset in offsets:
for y_offset in offsets:
for z_offset in offsets:
neighbors.append(((x_ind + x_offset) % N_x, (y_ind + y_offset) %
N_y, (z_ind + z_offset) % N_z))
neighbor_cell_map[(x_ind, y_ind, z_ind)] = neighbors
return neighbor_cell_map
def get_coords(mol):
"""Gets coordinates in Angstrom for RDKit mol.
Parameters
----------
mol: rdkit.Chem.rdchem.mol
Molecule
Returns
-------
coords: np.array
Cartestian coordinates [Angstrom]
"""
N = mol.GetNumAtoms()
coords = np.zeros((N, 3))
coords_raw = [mol.GetConformer(0).GetAtomPosition(i) for i in range(N)]
for atom in range(N):
coords[atom, 0] = coords_raw[atom].x
coords[atom, 1] = coords_raw[atom].y
coords[atom, 2] = coords_raw[atom].z
return coords
class NeighborListAtomicCoordinates(Featurizer):
"""
Adjacency List of neighbors in 3-space
Neighbors determined by user-defined distance cutoff [in Angstrom].
https://en.wikipedia.org/wiki/Cell_list
Ref: http://www.cs.cornell.edu/ron/references/1989/Calculations%20of%20a%20List%20of%20Neighbors%20in%20Molecular%20Dynamics%20Si.pdf
Example:
>>> n_atoms = 6
>>> n_neighbors = 6
>>> cutoff = 12.0
>>> boxsize = None
>>> input_file = "test.sdf"
>>> tasks = ["energy"]
>>> featurizers = NeighborListAtomicCoordinates(n_atoms, n_neighbors, cutoff, boxsize)
>>> featurizer = dc.data.SDFLoader(tasks, smiles_field="smiles", mol_field="mol",
featurizer=featurizers)
>>> dataset = featurizer.featurize(input_file)
"""
def __init__(self,
max_num_atoms,
max_num_neighbors,
neighbor_cutoff,
boxsize=None):
"""Initialize NeighborListAtomicCoordinates featurizer.
Parameters
----------
max_num_atoms: int
Maximum number of atoms.
max_num_neighbors: int
Maximum number of neighbors per atom.
neighbor_cutoff: float
Threshold distance [Angstroms] for counting neighbors.
boxsize: float, optional (default None)
Size of periodic box. If None, no periodic boundary conditions.
"""
if boxsize is not None and boxsize < 2 * neighbor_cutoff:
raise ValueError("boxsize must be greater than 2*neighbor_cutoff")
self.max_num_atoms = max_num_atoms
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
self.boxsize = boxsize
self.dtype = object
def _featurize(self, mol):
"""Compute neighbor list.
Parameters
----------
mol: rdkit.Chem.rdchem.mol
Molecule
"""
N = mol.GetNumAtoms()
coords = get_coords(mol)
x_bins, y_bins, z_bins = get_cells(coords, self.neighbor_cutoff)
# Associate each atom with cell it belongs to. O(N)
cell_to_atoms, atom_to_cell = put_atoms_in_cells(coords, x_bins, y_bins,
z_bins)
# Associate each cell with its neighbor cells. Assumes periodic boundary
# conditions, so does wrapround. O(constant)
N_x, N_y, N_z = len(x_bins), len(y_bins), len(z_bins)
neighbor_cell_map = compute_neighbor_cell_map(N_x, N_y, N_z)
# For each atom, loop through all atoms in its cell and neighboring cells.
# Accept as neighbors only those within threshold. This computation should be
# O(Nm), where m is the number of atoms within a set of neighboring-cells.
neighbor_list = {}
if self.boxsize is not None:
for atom in range(N):
cell = atom_to_cell[atom]
neighbor_cells = neighbor_cell_map[cell]
neighbor_list[atom] = set()
for neighbor_cell in neighbor_cells:
atoms_in_cell = cell_to_atoms[neighbor_cell]
for neighbor_atom in atoms_in_cell:
if neighbor_atom == atom:
continue
dist = np.linalg.norm(coords[atom] - coords[neighbor_atom])
dist = dist - self.boxsize * np.round(dist / self.boxsize)
if dist < self.neighbor_cutoff:
neighbor_list[atom].add((neighbor_atom, dist))
# Sort neighbors by distance
closest_neighbors = sorted(
list(neighbor_list[atom]), key=lambda elt: elt[1])
closest_neighbors = [nbr for (nbr, dist) in closest_neighbors]
# Pick up to max_num_neighbors
closest_neighbors = closest_neighbors[:self.max_num_neighbors]
neighbor_list[atom] = closest_neighbors
else:
for atom in range(N):
cell = atom_to_cell[atom]
neighbor_cells = neighbor_cell_map[cell]
neighbor_list[atom] = set()
for neighbor_cell in neighbor_cells:
atoms_in_cell = cell_to_atoms[neighbor_cell]
for neighbor_atom in atoms_in_cell:
if neighbor_atom == atom:
continue
dist = np.linalg.norm(coords[atom] - coords[neighbor_atom])
if dist < self.neighbor_cutoff:
neighbor_list[atom].add((neighbor_atom, dist))
closest_neighbors = sorted(
list(neighbor_list[atom]), key=lambda elt: elt[1])
closest_neighbors = [nbr for (nbr, dist) in closest_neighbors]
closest_neighbors = closest_neighbors[:self.max_num_neighbors]
neighbor_list[atom] = closest_neighbors
Z = pad_array(
np.array([atom.GetAtomicNum()
for atom in mol.GetAtoms()]), self.max_num_atoms)
coords = pad_array(coords, (self.max_num_atoms, 3))
return (coords, neighbor_list, Z)
class ComplexNeighborListFragmentAtomicCoordinates(ComplexFeaturizer):
"""
Adjacency list of neighbors for protein-ligand complexes in 3-space.
Neighbors dtermined by user-defined distance cutoff.
Currently only compatible with pdb files.
Example:
>>> frag1_n_atoms = 3
>>> frag2_n_atoms = 3
>>> complex_n_atoms = 6
>>> n_neighbors = 6
>>> cutoff = 12.0
>>> boxsize = None
>>> featurizer = ComplexNeighborListFragmentAtomicCoordinates(frag1_n_atoms,
frag2_n_atoms, complex_n_atoms, n_neighbors, cutoff, boxsize)
>>> frag1 = "frag1.pdb"
>>> frag2 = "frag2.pdb"
>>> feature = featurizer._featurize_complex(str(frag1), str(frag2))
"""
def __init__(self,
frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
neighbor_cutoff=12.0,
boxsize=None):
"""Initialize ComplexNeighborListFragmentAtomicCoordinates featurizer
Parameters
----------
frag1_num_atoms: int
Maximum number of atoms in frag1
frag2_num_atoms: int
Maximum number of atoms in frag2
complex_num_atoms: int
Maximum number of atoms in complex
max_num_neighbors: int
Maximum number of neighbors per atom
neighbor_cutoff: float
Threshold distance [Angstroms] for counting neighbors.
boxsize: float, optional (default None)
Size of periodic box. If None, no periodic boundary conditions.
"""
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.complex_num_atoms = complex_num_atoms
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
self.boxsize = boxsize
# Type of data created by this featurizer
self.dtype = object
self.frag1_featurizer = NeighborListAtomicCoordinates(
self.frag1_num_atoms, self.max_num_neighbors, self.neighbor_cutoff,
self.boxsize)
self.frag2_featurizer = NeighborListAtomicCoordinates(
self.frag2_num_atoms, self.max_num_neighbors, self.neighbor_cutoff,
self.boxsize)
self.complex_featurizer = NeighborListAtomicCoordinates(
self.complex_num_atoms, self.max_num_neighbors, self.neighbor_cutoff,
self.boxsize)
def _featurize_complex(self, frag1_pdb_file, frag2_pdb_file):
"""Featurize fragments and complex.
Parameters
----------
frag1_pdb_file: string
Location of frag1_pdb_file.
frag2_pdb_file: string
Location of frag2_pdb_file.
Returns
-------
retval: tuple
Tuple containing coordinates, neighbor list, and atomic number for
fragment 1, fragment 2, and complex
"""
try:
frag1_mol = Chem.MolFromPDBFile(
frag1_pdb_file, sanitize=False, removeHs=False)
frag2_mol = Chem.MolFromPDBFile(
frag2_pdb_file, sanitize=False, removeHs=False)
except:
frag1_mol = None
frag2_mol = None
if frag1_mol and frag2_mol:
frag1_coords, frag1_neighbor_list, frag1_z = self.frag1_featurizer._featurize(
frag1_mol)
frag2_coords, frag2_neighbor_list, frag2_z = self.frag2_featurizer._featurize(
frag2_mol)
complex_mol = Chem.rdmolops.CombineMols(frag1_mol, frag2_mol)
complex_coords, complex_neighbor_list, complex_z = self.complex_featurizer._featurize(
complex_mol)
return (frag1_coords, frag1_neighbor_list, frag1_z, frag2_coords,
frag2_neighbor_list, frag2_z, complex_coords,
complex_neighbor_list, complex_z)
else:
print("failed to featurize")
return (None, None, None, None, None, None, None, None, None)
<file_sep>"""
Script that trains multitask models on Delaney dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
from deepchem.molnet import load_delaney
# Only for debug!
np.random.seed(123)
# Load Delaney dataset
n_features = 1024
delaney_tasks, delaney_datasets, transformers = load_delaney()
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
model = dc.models.MultitaskRegressor(
len(delaney_tasks),
n_features,
layer_sizes=[1000],
dropouts=[.25],
learning_rate=0.001,
batch_size=50,
verbosity="high")
# Fit trained model
model.fit(train_dataset)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import numpy as np
import deepchem as dc
dataset = dc.data.NumpyDataset(np.random.rand(500, 5))
print(dataset)
<file_sep>from deepchem.feat import Featurizer
from typing import List
import numpy as np
try:
from transformers import RobertaTokenizerFast
except ModuleNotFoundError:
raise ImportError(
'Transformers must be installed for RxnFeaturizer to be used!')
pass
class RxnFeaturizer(Featurizer):
"""Reaction Featurizer.
RxnFeaturizer is a wrapper class for HuggingFace's RobertaTokenizerFast,
that is intended for featurizing chemical reaction datasets. The featurizer
computes the source and target required for a seq2seq task and applies the
RobertaTokenizer on them separately. Additionally, it can also separate or
mix the reactants and reagents before tokenizing.
Examples
--------
>>> from deepchem.feat import RxnFeaturizer
>>> from transformers import RobertaTokenizerFast
>>> tokenizer = RobertaTokenizerFast.from_pretrained("seyonec/PubChem10M_SMILES_BPE_450k")
>>> featurizer = RxnFeaturizer(tokenizer, sep_reagent=True)
>>> feats = featurizer.featurize(['CCS(=O)(=O)Cl.OCCBr>CCN(CC)CC.CCOCC>CCS(=O)(=O)OCCBr'])
Notes
-----
- The featurize method expects a List of reactions.
- Use the sep_reagent toggle to enable/disable reagent separation.
- True - Separate the reactants and reagents
- False - Mix the reactants and reagents
"""
def __init__(self, tokenizer: RobertaTokenizerFast, sep_reagent: bool):
"""Initialize a ReactionFeaturizer object.
Parameters
----------
tokenizer: RobertaTokenizerFast
HuggingFace Tokenizer to be used for featurization.
sep_reagent: bool
Toggle to separate or mix the reactants and reagents.
"""
if not isinstance(tokenizer, RobertaTokenizerFast):
raise TypeError(
f"""`tokenizer` must be a constructed `RobertaTokenizerFast`
object, not {type(tokenizer)}""")
else:
self.tokenizer = tokenizer
self.sep_reagent = sep_reagent
def _featurize(self, datapoint: str, **kwargs) -> List[List[List[int]]]:
"""Featurizes a datapoint.
Processes each entry in the dataset by first applying the reactant-reagent
mixing, the source/target separation and then the pretrained tokenizer on the
separated strings.
Parameters
----------
datapoint: str
the reaction SMILES to be processed.
Returns
-------
encoding: List
List containing two lists for the source and target encodings.
The encodings are lists containing two lists: `the input_ids` and the
`attention_mask`.
"""
datapoint_list = [datapoint]
reactant = list(map(lambda x: x.split('>')[0], datapoint_list))
reagent = list(map(lambda x: x.split('>')[1], datapoint_list))
product = list(map(lambda x: x.split('>')[2], datapoint_list))
if self.sep_reagent:
source = [x + '>' + y for x, y in zip(reactant, reagent)]
else:
source = [
x + '.' + y + '>' if y else x + '>' + y
for x, y in zip(reactant, reagent)
]
target = product
source_encoding = list(
self.tokenizer(source, padding=True, **kwargs).values())
target_encoding = list(
self.tokenizer(target, padding=True, **kwargs).values())
return [source_encoding, target_encoding]
def __call__(self, *args, **kwargs) -> np.ndarray:
return self.featurize(*args, **kwargs)
def __str__(self) -> str:
"""Handles file name error.
Overrides the __str__ method of the Featurizer base class to avoid errors
while saving the dataset, due to the large default name of the HuggingFace
tokenizer.
"""
return 'RxnFeaturizer_' + str(self.sep_reagent)
<file_sep>"""
Test for Pytorch Normalizing Flow model and its transformations
"""
import pytest
import numpy as np
import unittest
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal
from deepchem.models.torch_models.layers import Affine, RealNVPLayer
from deepchem.models.torch_models.normalizing_flows_pytorch import NormalizingFlow
has_torch = True
except:
has_torch = False
@unittest.skipIf(not has_torch, 'torch is not installed')
@pytest.mark.torch
def test_Affine():
"""
This test should evaluate if the transformation its being applied
correctly. When computing the logarithm of the determinant jacobian matrix
the result must be zero for any distribution when performing the first forward
and inverse pass (initialized). This is the expected
behavior since nothing is being learned yet.
input shape: (samples, dim)
output shape: (samples, dim)
"""
dim = 2
samples = 96
data = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
tensor = data.sample(torch.Size((samples, dim)))
_, log_det_jacobian = Affine(dim).forward(tensor)
_, inverse_log_det_jacobian = Affine(dim).inverse(tensor)
# The first pass of the transformation should be 0
log_det_jacobian = log_det_jacobian.detach().numpy()
inverse_log_det_jacobian = inverse_log_det_jacobian.detach().numpy()
zeros = np.zeros((samples,))
assert np.array_equal(log_det_jacobian, zeros)
assert np.array_equal(inverse_log_det_jacobian, zeros)
@unittest.skipIf(not has_torch, 'torch is not installed')
@pytest.mark.torch
def test_normalizing_flow_pytorch():
"""
This test aims to evaluate if the normalizingFlow model is being applied
correctly. That is if the sampling, and its log_prob, are being computed
after performing the transformation layers. Also, if log_prob of an input
tensor have consistency with the NormalizingFlow model.
NormalizingFlow:
sample:
input shape: (samples)
output shape: ((samples, dim), (samples))
log_prob: Method used to learn parameter (optimizing loop)
input shape: (samples)
output shape: (samples)
"""
dim = 2
samples = 96
base_distribution = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
tensor = base_distribution.sample(torch.Size((samples, dim)))
transformation = [Affine(dim)]
model = NormalizingFlow(transformation, base_distribution, dim)
# Test sampling method
sampling, log_prob_ = model.sample(samples)
# Test log_prob method (this method is used when inverse pass)
# Output must be a Nth zero array since nothing is being learned yet
log_prob = model.log_prob(tensor)
# Featurize to assert for tests
log_prob_ = log_prob_.detach().numpy()
log_prob = log_prob.detach().numpy()
zeros = np.zeros((samples,))
# Assert errors for sample method
assert log_prob_.any()
# Assert errors for log_prob method
assert np.array_equal(log_prob, zeros)
@unittest.skipIf(not has_torch, 'torch is not installed')
@pytest.mark.torch
def test_RealNVPLayer():
"""
This test should evaluate if the transformation its being applied
correctly. When computing the logarithm of the determinant jacobian matrix
the result must be zero for any distribution when performing the first forward
and inverse pass (initialized). This is the expected
behavior since nothing is being learned yet.
input shape: (samples, dim)
output shape: (samples, dim)
"""
dim = 2
samples = 96
data = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
tensor = data.sample(torch.Size((samples, dim)))
layers = 4
hidden_size = 16
masks = F.one_hot(torch.tensor([i % 2 for i in range(layers)])).float()
layers = nn.ModuleList([RealNVPLayer(mask, hidden_size) for mask in masks])
for layer in layers:
_, inverse_log_det_jacobian = layer.inverse(tensor)
inverse_log_det_jacobian = inverse_log_det_jacobian.detach().numpy()
assert np.any(inverse_log_det_jacobian)
<file_sep>"""
Loads synthetic reaction datasets from USPTO.
This file contains loaders for synthetic reaction datasets from the US Patent Office. http://nextmovesoftware.com/blog/2014/02/27/unleashing-over-a-million-reactions-into-the-wild/.
"""
import os
import logging
from typing import List, Optional, Tuple, Union
import deepchem as dc
from deepchem.data import Dataset
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
try:
from transformers import RobertaTokenizerFast
from deepchem.feat.reaction_featurizer import RxnFeaturizer
except ModuleNotFoundError:
pass
logger = logging.getLogger(__name__)
DEFAULT_DIR = dc.utils.data_utils.get_data_dir()
USPTO_MIT_URL = "https://deepchemdata.s3.us-west-1.amazonaws.com/datasets/USPTO_MIT.csv"
USPTO_STEREO_URL = "https://deepchemdata.s3.us-west-1.amazonaws.com/datasets/USPTO_STEREO.csv"
USPTO_50K_URL = "https://deepchemdata.s3.us-west-1.amazonaws.com/datasets/USPTO_50K.csv"
USPTO_FULL_URL = "https://deepchemdata.s3.us-west-1.amazonaws.com/datasets/USPTO_FULL.csv"
USPTO_TASK: List[str] = []
class _USPTOLoader(_MolnetLoader):
def __init__(self, *args, subset: str, sep_reagent: bool, **kwargs):
super(_USPTOLoader, self).__init__(*args, **kwargs)
self.subset = subset
self.sep_reagent = sep_reagent
self.name = 'USPTO_' + subset
def create_dataset(self) -> Dataset:
if self.subset not in ['MIT', 'STEREO', '50K', 'FULL']:
raise ValueError("Valid Subset names are MIT, STEREO and 50K.")
if self.subset == 'MIT':
dataset_url = USPTO_MIT_URL
if self.subset == 'STEREO':
dataset_url = USPTO_STEREO_URL
if self.subset == '50K':
dataset_url = USPTO_50K_URL
if self.subset == 'FULL':
dataset_url = USPTO_FULL_URL
if self.splitter == 'SpecifiedSplitter':
raise ValueError(
"There is no pre computed split for the full dataset, use a custom split instead!"
)
dataset_file = os.path.join(self.data_dir, self.name + '.csv')
if not os.path.exists(dataset_file):
logger.info("Downloading dataset...")
dc.utils.data_utils.download_url(url=dataset_url,
dest_dir=self.data_dir)
logger.info("Dataset download complete.")
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="reactions",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_uspto(
featurizer: Union[dc.feat.Featurizer, str] = "RxnFeaturizer",
splitter: Union[dc.splits.Splitter, str, None] = None,
transformers: List[Union[TransformerGenerator, str]] = [],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
subset: str = "MIT",
sep_reagent: bool = True,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load USPTO Datasets.
The USPTO dataset consists of over 1.8 Million organic chemical reactions
extracted from US patents and patent applications. The dataset contains the
reactions in the form of reaction SMILES, which have the general format:
reactant>reagent>product.
Molnet provides ability to load subsets of the USPTO dataset namely MIT,
STEREO and 50K. The MIT dataset contains around 479K reactions, curated by
jin et al. The STEREO dataset contains around 1 Million Reactions, it does
not have duplicates and the reactions include stereochemical information.
The 50K dataset contatins 50,000 reactions and is the benchmark for
retrosynthesis predictions. The reactions are additionally classified into 10
reaction classes. The canonicalized version of the dataset used by the loader
is the same as that used by Somnath et. al.
The loader uses the SpecifiedSplitter to use the same splits as specified
by Schwaller et. al and Dai et. al. Custom splitters could also be used. There
is a toggle in the loader to skip the source/target transformation needed for
seq2seq tasks. There is an additional toggle to load the dataset with the
reagents and reactants separated or mixed. This alters the entries in source
by replacing the '>' with '.' , effectively loading them as an unified
SMILES string.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
subset: str (default 'MIT')
Subset of dataset to download. 'FULL', 'MIT', 'STEREO', and '50K' are supported.
sep_reagent: bool (default True)
Toggle to load dataset with reactants and reagents either separated or mixed.
skip_transform: bool (default True)
Toggle to skip the source/target transformation.
Returns
-------
tasks, datasets, transformers: tuple
tasks : list
Column names corresponding to machine learning target variables.
datasets : tuple
train, validation, test splits of data as
``deepchem.data.datasets.Dataset`` instances.
transformers : list
``deepchem.trans.transformers.Transformer`` instances applied
to dataset.
References
----------
.. [1] <NAME>. Chemical reactions from US patents (1976-Sep2016)
(Version 1). figshare (2017). https://doi.org/10.6084/m9.figshare.5104873.v1
.. [2] Somnath, <NAME>, et al. "Learning graph models for retrosynthesis
prediction." arXiv preprint arXiv:2006.07038 (2020).
.. [3] Schwaller, Philippe, et al. "Molecular transformer: a model for
uncertainty-calibrated chemical reaction prediction."
ACS central science 5.9 (2019): 1572-1583.
.. [4] <NAME>, et al. "Retrosynthesis prediction with conditional
graph logic network." arXiv preprint arXiv:2001.01408 (2020).
"""
tokenizer = RobertaTokenizerFast.from_pretrained(
"seyonec/PubChem10M_SMILES_BPE_450k")
if featurizer == "plain":
featurizer = dc.feat.DummyFeaturizer()
else:
featurizer = RxnFeaturizer(tokenizer, sep_reagent=sep_reagent)
loader = _USPTOLoader(featurizer,
splitter,
transformers,
USPTO_TASK,
data_dir,
save_dir,
subset=subset,
sep_reagent=sep_reagent,
**kwargs)
return loader.load_dataset(loader.name, reload)
<file_sep># -*- coding: utf-8 -*-
"""
Join sweetfda and aacttox data
@author <NAME>
"""
import pandas as pd
##############################################################################
### save dataset
##############################################################################
### load datasets
# load sweetfda
sweetfda_fn = 'sweetfda/sweetfda_approved_processed.csv'
sweetfda_df = pd.read_csv(sweetfda_fn, index_col=False, na_filter=False,
delimiter=',', lineterminator='\n')
# load aact
aact_fn = 'aacttox/aacttox_phase_multiclass.csv'
aact_df = pd.read_csv(aact_fn, index_col=False, na_filter=False,
delimiter=',', lineterminator='\n')
### fixup smiles for matching
def convert_smiles(s):
"""
convert smiles to a common format
"""
#return smiles
s = list(s)
s = [_.lower() for _ in s]
s = [_.replace('=', '') for _ in s]
return pd.Series(s)
# map original smiles to converted smiles
sweetfda_smiles = list(sweetfda_df['smiles'])
sweetfda_df['smiles'] = convert_smiles(sweetfda_df['smiles'])
sweetfda_smiles_map = dict(zip(list(sweetfda_df['smiles']), sweetfda_smiles))
aact_smiles = list(aact_df['smiles'])
aact_df['smiles'] = convert_smiles(aact_df['smiles'])
aact_smiles_map = dict(zip(list(aact_df['smiles']), aact_smiles))
### join dataframes, index on smiles
sweetfda_df.set_index('smiles', inplace=True)
aact_df.set_index('smiles', inplace=True)
df_join = sweetfda_df.join(aact_df, how='outer')
# map original smiles back
index_smiles = list(df_join.index)
for idx, smiles in enumerate(index_smiles):
if smiles in aact_smiles_map:
index_smiles[idx] = aact_smiles_map[smiles]
elif smiles in sweetfda_smiles_map:
index_smiles[idx] = sweetfda_smiles_map[smiles]
df_join.index = pd.Series(index_smiles)
### fill all nan with 0
df_join.fillna('0', inplace=True)
### construct datasets
datasets = [[], [], [], [], [], []]
for smiles in df_join.index:
def check_dtype(d):
"""
Convert to str(int()))
"""
if isinstance(d, pd.Series):
d = list(set(d))[0]
try:
d = str(float(d))
return '' if str(d).lower() == 'nan' else str(int(float(d)))
except (TypeError, ValueError):
return str(d)
fda = cto = check_dtype(df_join.FDA_APPROVED[smiles])
ct_tox = check_dtype(df_join.CT_TOX[smiles])
ct_tox_phase = check_dtype(df_join.CT_TOX_PHASE[smiles])
fda_tox = str(fda) if fda == ct_tox else ''
if not len(fda):
fda = cto = str(1 - int(ct_tox))
if not len(ct_tox):
ct_tox = str(1 - int(fda))
if not len(ct_tox_phase):
ct_tox_phase = '' if int(ct_tox) else '0'
ct_tox_phases = ['0', '0', '0', '0']
if ct_tox_phase.isdigit() and int(ct_tox_phase) > 0:
for phase, _ in enumerate(ct_tox_phases, start=1):
if phase >= int(ct_tox_phase):
ct_tox_phases[phase-1] = str(ct_tox)
print('\t'.join(["==>", fda, ct_tox, cto, ct_tox_phase,
'|'.join(ct_tox_phases), smiles]))
# store in new datasets
datasets[0].append([smiles, fda, ct_tox])
datasets[1].append([smiles, fda, ct_tox, ct_tox_phase])
datasets[2].append([smiles, cto])
datasets[3].append([smiles, cto, fda_tox])
datasets[4].append([smiles, fda, ct_tox] + ct_tox_phases)
### save datasets
fout = "clintox.csv"
cols = ['smiles', 'FDA_APPROVED', 'CT_TOX']
pd.DataFrame(datasets[0], columns=cols).to_csv(fout, index=False)
#fout = "aacttox_sweetfda_phase_multiclass.csv"
#cols = ['smiles', 'FDA_APPROVED', 'CT_TOX','CT_TOX_PHASE']
#pd.DataFrame(datasets[1], columns=cols).to_csv(fout, index=False)
#fout = "aacttox_sweetfda_cto_singletask.csv"
#columns=['smiles', 'CLINICAL_TRIAL_OUTCOME']
#pd.DataFrame(datasets[2], columns=cols).to_csv(fout, index=False)
#fout = "aacttox_sweetfda_cto_fdatox.csv"
#columns = ['smiles', 'CLINICAL_TRIAL_OUTCOME', 'FDA_APPROVED_TOX']
#pd.DataFrame(datasets[3], columns=cols).to_csv(fout, index=False)
#fout = "aacttox_sweetfda_phase_multitask.csv"
#columns=['smiles', 'FDA_APPROVED', 'CT_TOX',
# 'CT_TOX_PHASE_1', 'CT_TOX_PHASE_2',
# 'CT_TOX_PHASE_3', 'CT_TOX_PHASE_4']
#pd.DataFrame(datasets[4], columns=cols).to_csv(fout, index=False)
<file_sep># DeepCrystal Technologies 2017 - <NAME>
# MIT License - have fun!!
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
np.random.seed(123)
from sklearn.ensemble import RandomForestRegressor
from sklearn import svm
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.models.graph_models import GraphConvModel
BATCH_SIZE = 128
# Set to higher values to get better numbers
MAX_EPOCH = 1
LR = 1e-3
LMBDA = 1e-4
def retrieve_datasets():
os.system(
'wget -c %s' %
'https://s3-us-west-1.amazonaws.com/deep-crystal-california/az_logd.csv')
os.system(
'wget -c %s' %
'https://s3-us-west-1.amazonaws.com/deep-crystal-california/az_hppb.csv')
os.system(
'wget -c %s' %
'https://s3-us-west-1.amazonaws.com/deep-crystal-california/az_clearance.csv'
)
def load_dataset(dataset_file, featurizer='ECFP', split='index'):
tasks = ['exp']
if featurizer == 'ECFP':
featurizer = dc.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = dc.feat.ConvMolFeaturizer()
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
transformers = [
dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset)
]
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {
'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
return tasks, (train, valid, test), transformers
def experiment(dataset_file, method='GraphConv', split='scaffold'):
featurizer = 'ECFP'
if method == 'GraphConv':
featurizer = 'GraphConv'
tasks, datasets, transformers = load_dataset(
dataset_file, featurizer=featurizer, split=split)
train, val, test = datasets
model = None
if method == 'GraphConv':
model = GraphConvModel(len(tasks), batch_size=BATCH_SIZE, mode="regression")
elif method == 'RF':
def model_builder_rf(model_dir):
sklearn_model = RandomForestRegressor(n_estimators=100)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder_rf)
elif method == 'SVR':
def model_builder_svr(model_dir):
sklearn_model = svm.SVR(kernel='linear')
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder_svr)
return model, train, val, test, transformers
#======================================================================
# Run Benchmarks {GC-DNN, SVR, RF}
def main():
print("About to retrieve datasets")
retrieve_datasets()
MODEL = "GraphConv"
SPLIT = "scaffold"
DATASET = "az_hppb.csv"
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
print("About to build model")
model, train, val, test, transformers = experiment(
DATASET, method=MODEL, split=SPLIT)
if MODEL == 'GraphConv':
print("running GraphConv search")
best_val_score = 0.0
train_score = 0.0
for l in range(0, MAX_EPOCH):
print("epoch %d" % l)
model.fit(train, nb_epoch=1)
latest_train_score = model.evaluate(train, [metric],
transformers)['mean-pearson_r2_score']
latest_val_score = model.evaluate(val, [metric],
transformers)['mean-pearson_r2_score']
if latest_val_score > best_val_score:
best_val_score = latest_val_score
train_score = latest_train_score
print((MODEL, SPLIT, DATASET, train_score, best_val_score))
else:
model.fit(train)
train_score = model.evaluate(train, [metric],
transformers)['mean-pearson_r2_score']
val_score = model.evaluate(val, [metric],
transformers)['mean-pearson_r2_score']
print((MODEL, SPLIT, DATASET, train_score, val_score))
if __name__ == "__main__":
main()
<file_sep>import ftplib
import os
import time
import deepchem
def main():
ftp = ftplib.FTP("ftp.ncbi.nih.gov")
ftp.login("anonymous", "anonymous")
# First download all SDF files. We need these to get smiles
ftp.cwd("/pubchem/Compound/CURRENT-Full/SDF")
data_dir = deepchem.utils.get_data_dir()
sdf_dir = os.path.join(data_dir,"SDF")
if not os.path.exists(sdf_dir):
os.mkdir(sdf_dir)
filelist = ftp.nlst()
existingfiles = os.listdir(sdf_dir)
print("Downloading: {0} SDF files".format(len(filelist)))
i = 0
for filename in filelist:
local_filename = os.path.join(sdf_dir, filename)
if filename in existingfiles or "README" in filename:
i = i + 1
continue
with open(local_filename, 'wb') as file :
ftp.retrbinary('RETR ' + filename, file.write)
i = i + 1
# Next download all Bioassays
ftp.cwd("/pubchem/Bioassay/CSV/Data")
data_dir = deepchem.utils.get_data_dir()
bioassay_dir = os.path.join(data_dir, "Data")
if not os.path.exists(bioassay_dir):
os.mkdir(bioassay_dir)
filelist = ftp.nlst()
existingfiles = os.listdir(bioassay_dir)
print("Downloading: {0} Bioassay files".format(len(filelist)))
i = 0
for filename in filelist:
local_filename = os.path.join(bioassay_dir, filename)
if filename in existingfiles or "README" in filename:
i = i + 1
continue
with open(local_filename, 'wb') as file:
ftp.retrbinary('RETR ' + filename, file.write)
i = i + 1
print("Processed file {0} of {1}".format(i, len(filelist)))
ftp.quit()
if __name__ == "__main__" :
main()
<file_sep>"""Derived from https://github.com/mfkasim1/xcnn/blob/f2cb9777da2961ac553f256ecdcca3e314a538ca/xcdnn2/litmodule.py"""
from deepchem.models.dft.scf import XCNNSCF
import torch
from deepchem.models.dft.nnxc import HybridXC
from deepchem.models.losses import Loss, L2Loss
from deepchem.models.torch_models.torch_model import TorchModel
from typing import Tuple, Optional, List, Any
import numpy as np
class DFTXC(torch.nn.Module):
"""
This layer initializes the neural network exchange correlation functional and
the hybrid functional. It is then used to run the Kohn Sham iterations.
Examples
--------
>>> import torch
>>> from deepchem.feat.dft_data import DFTEntry
>>> from deepchem.models.dft.dftxc import DFTXC
>>> e_type = 'ie'
>>> true_val= '0.53411947056'
>>> systems = [{'moldesc': 'N 0 0 0',
>>> 'basis': '6-311++G(3df,3pd)',
>>> 'spin': '3'},
>>> {'moldesc': 'N 0 0 0',
>>> 'basis': '6-311++G(3df,3pd)',
>>> 'charge': 1,
>>> 'spin': '2'}]
>>> entry = DFTEntry.create(e_type, true_val, systems)
>>> nnmodel = _construct_nn_model(ninp=2, nhid=10, ndepths=1,modeltype=1).to(torch.double)
>>> model = DFTXC("lda_x")
>>> output = model([entry])
"""
def __init__(self, xcstr: str, nnmodel: torch.nn.Module):
"""
Parameters
----------
xcstr: str
The choice of xc to use. Some of the commonly used ones are:
lda_x, lda_c_pw, lda_c_ow, lda_c_pz, lda_xc_lp_a, lda_xc_lp_b.
nnmodel: torch.nn.Module
the PyTorch model implementing the calculation
Notes
-----
It is not necessary to use the default method(_construct_nn_model) with the XCModel.
"""
super(DFTXC, self).__init__()
self.xcstr = xcstr
self.nnmodel = nnmodel
def forward(self, inputs):
"""
Parameters
----------
inputs: list
list of entry objects that have been defined using DFTEntry
Returns
-------
output: list of torch.Tensor
Calculated value of the data point after running the Kohn Sham iterations
using the neural network XC functional.
"""
hybridxc = HybridXC(self.xcstr, self.nnmodel, aweight0=0.0)
output = []
for entry in inputs:
evl = XCNNSCF(hybridxc, entry)
qcs = []
for system in entry.get_systems():
qcs.append(evl.run(system))
if entry.entry_type == 'dm':
output.append((torch.as_tensor(entry.get_val(qcs)[0])))
else:
output.append(
torch.tensor(entry.get_val(qcs), requires_grad=True))
return output
class XCModel(TorchModel):
"""
This class is used to initialize and run Differentiable Quantum Chemistry (i.e,
DFT) calculations, using an exchange correlation functional that has been replaced
by a neural network. This model is based on the paper "Learning the exchange-correlation
functional from nature with fully differentiable density functional
theory." and is listed below for reference.
To read more about Density Functional Theory and the exchange
correlation functional please check the references below.
Examples
--------
>>> from deepchem.models.dft.dftxc import XCModel
>>> from deepchem.data.data_loader import DFTYamlLoader
>>> inputs = 'deepchem/models/tests/assets/test_dftxcdata.yaml'
>>> data = DFTYamlLoader()
>>> dataset = data.create_dataset(inputs)
>>> dataset.get_shape()
>>> model = XCModel("lda_x", batch_size=1)
>>> loss = model.fit(dataset, nb_epoch=1, checkpoint_interval=1)
Notes
-----
There are 4 types of DFT data object implementations that are used to determine the type
of calculation to be carried out on the entry object. These types are: "ae", "ie", "dm", "dens", that stand for atomization energy, ionization energy, density matrix and
density profile respectively.
The entry type "Density Matrix" cannot be used on model.evaluate as of now.
To run predictions on this data type, a dataset containing only "dm" entries must
be used.
References
----------
https://github.com/deepchem/deepchem/blob/3f06168a6c9c16fd90cde7f5246b94f484ea3890/deepchem/models/dft/nnxc.py
Encyclopedia of Condensed Matter Physics, 2005.
Kasim, <NAME>., and <NAME>. "Learning the exchange-correlation
functional from nature with fully differentiable density functional
theory." Physical Review Letters 127.12 (2021): 126403.
"""
def __init__(self,
xcstr: str,
nnmodel: Optional[torch.nn.Module] = None,
input_size: int = 2,
hidden_size: int = 10,
n_layers: int = 1,
modeltype: int = 1,
n_tasks: int = 0,
log_frequency: int = 0,
mode: str = 'classification',
device: Optional[torch.device] = None,
**kwargs) -> None:
"""
Parameters
----------
xcstr: str
The choice of xc to use.
nnmodel: torch.nn.Module
the PyTorch model implementing the calculation
input_size: int
size of neural network input
hidden_size: int
size of the hidden layers ; the number of hidden layers is fixed
in the default method.
n_layers: int
number of layers in the neural network
modeltype: int
model type 2 includes an activation layer whereas type 1 does not.
"""
if nnmodel is None:
nnmodel = _construct_nn_model(input_size, hidden_size, n_layers,
modeltype).to(torch.double)
model = (DFTXC(xcstr, nnmodel)).to(device)
self.xc = xcstr
loss: Loss = L2Loss()
output_types = ['loss', 'predict']
self.mode = mode
super(XCModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
def _prepare_batch(
self,
batch) -> Tuple[List[Any], List[torch.Tensor], List[torch.Tensor]]:
"""
Method to compute inputs, labels and weight for the Torch Model.
Parameters
----------
batch: Tuple[Any, Any, Any]
Returns
------
Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]
"""
inputs, labels, weights = batch
if labels is not None:
labels = [
x.astype(np.float32) if x.dtype == np.float64 else x
for x in labels
]
label_tensors = [
torch.as_tensor(x, dtype=torch.float64,
device=self.device).requires_grad_()
for x in labels
]
else:
label_tensors = []
if weights is not None:
weights = [
x.astype(np.float32) if x.dtype == np.float64 else x
for x in weights
]
weight_tensors = [
torch.as_tensor(x, dtype=torch.float64, device=self.device)
for x in weights
]
else:
weight_tensors = []
return (inputs, label_tensors, weight_tensors)
class ExpM1Activation(torch.nn.Module):
"""
This class is an activation layer that is used with model_type 2.
Examples
--------
>>> from deepchem.models.dft.dftxc import ExpM1Activation
>>> import torch
>>> model = ExpM1Activation()
>>> x = torch.tensor(2.5)
>>> output = model(x)
"""
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.exp(x) - 1
def _construct_nn_model(input_size: int, hidden_size: int, n_layers: int,
modeltype: int):
"""
Constructs Neural Network
Parameters
----------
input_size: int
size of neural network input
hidden_size: int
size of the hidden layers ; there are 3 hidden layers in this method
n_layers: int
number of layers in the neural network
modeltype: int
model type 2 includes an activation layer whereas type 1 does not.
Returns
-------
torch.nn.Sequential(*layers)
Notes
-----
It is not necessary to use this method with the XCModel, user defined pytorch
models will work.
"""
if modeltype == 1:
layers: List[Any]
layers = []
for i in range(n_layers):
n1 = input_size if i == 0 else hidden_size
layers.append(torch.nn.Linear(n1, hidden_size))
layers.append(torch.nn.Softplus())
layers.append(torch.nn.Linear(hidden_size, 1, bias=False))
return torch.nn.Sequential(*layers)
elif modeltype == 2:
layers = []
for i in range(n_layers):
n1 = input_size if i == 0 else hidden_size
layers.append(torch.nn.Linear(n1, hidden_size))
if i < n_layers - 1:
layers.append(torch.nn.Softplus())
else:
layers.append(ExpM1Activation())
layers.append(torch.nn.Linear(hidden_size, 1, bias=False))
return torch.nn.Sequential(*layers)
<file_sep>"""
Script that trains graph-conv models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from deepchem.models import GraphConvModel
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_delaney
# Load Delaney dataset
delaney_tasks, delaney_datasets, transformers = load_delaney(
featurizer='GraphConv', split='index')
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
# Do setup required for tf/keras models
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 128
model = GraphConvModel(
len(delaney_tasks), batch_size=batch_size, mode='regression')
# Fit trained model
model.fit(train_dataset, nb_epoch=20)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Tests for JsonLoader class.
"""
import os
import numpy as np
from deepchem.data.data_loader import JsonLoader
from deepchem.feat import SineCoulombMatrix
def test_json_loader():
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, 'inorganic_crystal_sample_data.json')
featurizer = SineCoulombMatrix(max_atoms=5)
loader = JsonLoader(tasks=['e_form'],
feature_field='structure',
id_field='formula',
label_field='e_form',
featurizer=featurizer)
dataset = loader.create_dataset(input_file, shard_size=1)
a = [4625.32086965, 6585.20209678, 61.00680193, 48.72230922, 48.72230922]
assert dataset.X.shape == (5, 5)
assert np.allclose(dataset.X[0], a, atol=.5)
dataset = loader.create_dataset(input_file, shard_size=None)
assert dataset.X.shape == (5, 5)
dataset = loader.create_dataset([input_file, input_file], shard_size=5)
assert dataset.X.shape == (10, 5)
<file_sep># Requriments - transformers, tokenizers
# Right now, the Smiles Tokenizer uses an exiesting vocab file from rxnfp that is fairly comprehensive and from the USPTO dataset.
# The vocab may be expanded in the near future
import collections
import os
import re
import pkg_resources
from typing import List, Optional
from transformers import BertTokenizer
from logging import getLogger
logger = getLogger(__name__)
"""
SMI_REGEX_PATTERN: str
SMILES regex pattern for tokenization. Designed by Schwaller et. al.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
ACS Central Science 2019 5 (9): Molecular Transformer: A Model for Uncertainty-Calibrated Chemical Reaction Prediction
1572-1583 DOI: 10.1021/acscentsci.9b00576
"""
SMI_REGEX_PATTERN = r"""(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
# add vocab_file dict
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
def get_default_tokenizer():
default_vocab_path = (pkg_resources.resource_filename(
"deepchem", "feat/tests/vocab.txt"))
return SmilesTokenizer(default_vocab_path)
class SmilesTokenizer(BertTokenizer):
"""
Creates the SmilesTokenizer class. The tokenizer heavily inherits from the BertTokenizer
implementation found in Huggingface's transformers library. It runs a WordPiece tokenization
algorithm over SMILES strings using the tokenisation SMILES regex developed by Schwaller et. al.
Please see https://github.com/huggingface/transformers
and https://github.com/rxn4chemistry/rxnfp for more details.
Examples
--------
>>> from deepchem.feat.smiles_tokenizer import SmilesTokenizer
>>> current_dir = os.path.dirname(os.path.realpath(__file__))
>>> vocab_path = os.path.join(current_dir, 'tests/data', 'vocab.txt')
>>> tokenizer = SmilesTokenizer(vocab_path)
>>> print(tokenizer.encode("CC(=O)OC1=CC=CC=C1C(=O)O"))
[12, 16, 16, 17, 22, 19, 18, 19, 16, 20, 22, 16, 16, 22, 16, 16, 22, 16, 20, 16, 17, 22, 19, 18, 19, 13]
References
----------
.. [1] <NAME>; <NAME>; <NAME>.; <NAME>; <NAME>;
<NAME>; et al. (2019): Mapping the Space of Chemical Reactions using Attention-Based Neural
Networks. ChemRxiv. Preprint. https://doi.org/10.26434/chemrxiv.9897365.v3
Note
----
This class requires huggingface's transformers and tokenizers libraries to be installed.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file: str = '',
# unk_token="[UNK]",
# sep_token="[SEP]",
# pad_token="[PAD]",
# cls_token="[CLS]",
# mask_token="[MASK]",
**kwargs):
"""Constructs a SmilesTokenizer.
Parameters
----------
vocab_file: str
Path to a SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
"""
super().__init__(vocab_file, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocab file at path '{}'.".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.highest_unused_index = max([
i for i, v in enumerate(self.vocab.keys())
if v.startswith("[unused")
])
self.ids_to_tokens = collections.OrderedDict([
(ids, tok) for tok, ids in self.vocab.items()
])
self.basic_tokenizer = BasicSmilesTokenizer()
@property
def vocab_size(self):
return len(self.vocab)
@property
def vocab_list(self):
return list(self.vocab.keys())
def _tokenize(self, text: str, max_seq_length: int = 512, **kwargs):
"""Tokenize a string into a list of tokens.
Parameters
----------
text: str
Input string sequence to be tokenized.
"""
max_len_single_sentence = max_seq_length - 2
split_tokens = [
token for token in self.basic_tokenizer.tokenize(text)
[:max_len_single_sentence]
]
return split_tokens
def _convert_token_to_id(self, token: str):
"""Converts a token (str/unicode) in an id using the vocab.
Parameters
----------
token: str
String token from a larger sequence to be converted to a numerical id.
"""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index: int):
"""Converts an index (integer) in a token (string/unicode) using the vocab.
Parameters
----------
index: int
Integer index to be converted back to a string-based token as part of a larger sequence.
"""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]):
"""Converts a sequence of tokens (string) in a single string.
Parameters
----------
tokens: List[str]
List of tokens for a given string sequence.
Returns
-------
out_string: str
Single string from combined tokens.
"""
out_string: str = " ".join(tokens).replace(" ##", "").strip()
return out_string
def add_special_tokens_ids_single_sequence(self,
token_ids: List[Optional[int]]):
"""Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
Parameters
----------
token_ids: list[int]
list of tokenized input ids. Can be obtained using the encode or encode_plus methods.
"""
return [self.cls_token_id] + token_ids + [self.sep_token_id]
def add_special_tokens_single_sequence(self, tokens: List[str]):
"""Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
Parameters
----------
tokens: List[str]
List of tokens for a given string sequence.
"""
return [self.cls_token] + tokens + [self.sep_token]
def add_special_tokens_ids_sequence_pair(
self, token_ids_0: List[Optional[int]],
token_ids_1: List[Optional[int]]) -> List[Optional[int]]:
"""Adds special tokens to a sequence pair for sequence classification tasks.
A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]
Parameters
----------
token_ids_0: List[int]
List of ids for the first string sequence in the sequence pair (A).
token_ids_1: List[int]
List of tokens for the second string sequence in the sequence pair (B).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def add_padding_tokens(self,
token_ids: List[Optional[int]],
length: int,
right: bool = True) -> List[Optional[int]]:
"""Adds padding tokens to return a sequence of length max_length.
By default padding tokens are added to the right of the sequence.
Parameters
----------
token_ids: list[optional[int]]
list of tokenized input ids. Can be obtained using the encode or encode_plus methods.
length: int
TODO
right: bool, default True
TODO
Returns
-------
List[int]
TODO
"""
padding = [self.pad_token_id] * (length - len(token_ids))
if right:
return token_ids + padding
else:
return padding + token_ids
def save_vocabulary(
self,
save_directory: str,
filename_prefix: Optional[str] = None
): # -> Tuple[str]: doctest issue raised with this return type annotation
"""Save the tokenizer vocabulary to a file.
Parameters
----------
vocab_path: obj: str
The directory in which to save the SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
Returns
-------
vocab_file: Tuple
Paths to the files saved.
typle with string to a SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
"""
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory,
VOCAB_FILES_NAMES["vocab_file"])
else:
vocab_file = save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(),
key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".
format(vocab_file))
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicSmilesTokenizer(object):
"""
Run basic SMILES tokenization using a regex pattern developed by Schwaller et. al.
This tokenizer is to be used when a tokenizer that does not require the transformers library by HuggingFace is required.
Examples
--------
>>> from deepchem.feat.smiles_tokenizer import BasicSmilesTokenizer
>>> tokenizer = BasicSmilesTokenizer()
>>> print(tokenizer.tokenize("CC(=O)OC1=CC=CC=C1C(=O)O"))
['C', 'C', '(', '=', 'O', ')', 'O', 'C', '1', '=', 'C', 'C', '=', 'C', 'C', '=', 'C', '1', 'C', '(', '=', 'O', ')', 'O']
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
ACS Central Science 2019 5 (9): Molecular Transformer: A Model for Uncertainty-Calibrated Chemical Reaction Prediction
1572-1583 DOI: 10.1021/acscentsci.9b00576
"""
def __init__(self, regex_pattern: str = SMI_REGEX_PATTERN):
"""Constructs a BasicSMILESTokenizer.
Parameters
----------
regex: string
SMILES token regex
"""
self.regex_pattern = regex_pattern
self.regex = re.compile(self.regex_pattern)
def tokenize(self, text):
"""Basic Tokenization of a SMILES.
"""
tokens = [token for token in self.regex.findall(text)]
return tokens
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
<file_sep>"""
Gathers all models in one place for convenient imports
"""
# flake8: noqa
import logging
from deepchem.models.models import Model
from deepchem.models.multitask import SingletaskToMultitask
from deepchem.models.wandblogger import WandbLogger
from deepchem.models.callbacks import ValidationCallback
logger = logging.getLogger(__name__)
# Tensorflow Dependency Models
try:
from deepchem.models.keras_model import KerasModel
from deepchem.models.IRV import MultitaskIRVClassifier
from deepchem.models.robust_multitask import RobustMultitaskClassifier
from deepchem.models.robust_multitask import RobustMultitaskRegressor
from deepchem.models.progressive_multitask import ProgressiveMultitaskRegressor, ProgressiveMultitaskClassifier
from deepchem.models.graph_models import WeaveModel, DTNNModel, DAGModel, GraphConvModel, MPNNModel
from deepchem.models.scscore import ScScoreModel
from deepchem.models.seqtoseq import SeqToSeq
from deepchem.models.gan import GAN, WGAN
from deepchem.models.molgan import BasicMolGANModel
from deepchem.models.text_cnn import TextCNNModel
from deepchem.models.atomic_conv import AtomicConvModel
from deepchem.models.chemnet_models import Smiles2Vec, ChemCeption
except ModuleNotFoundError as e:
logger.warning(
f'Skipped loading some Tensorflow models, missing a dependency. {e}')
# scikit-learn model
from deepchem.models.sklearn_models import SklearnModel
from deepchem.models.gbdt_models import GBDTModel
# PyTorch models
try:
from deepchem.models.torch_models import TorchModel
from deepchem.models.torch_models import AttentiveFP, AttentiveFPModel
from deepchem.models.torch_models import CGCNN, CGCNNModel
from deepchem.models.torch_models import GAT, GATModel
from deepchem.models.torch_models import GCN, GCNModel
from deepchem.models.torch_models import LCNN, LCNNModel
from deepchem.models.torch_models import Pagtn, PagtnModel
from deepchem.models.fcnet import MultitaskRegressor, MultitaskClassifier, MultitaskFitTransformRegressor
from deepchem.models.torch_models import MEGNetModel
from deepchem.models.torch_models import CNN
from deepchem.models.torch_models import ScaledDotProductAttention, SelfAttention
from deepchem.models.torch_models import GroverReadout
except ModuleNotFoundError as e:
logger.warning(
f'Skipped loading some PyTorch models, missing a dependency. {e}')
try:
from deepchem.models.torch_models import HuggingFaceModel
from deepchem.models.torch_models import Chemberta
except ImportError as e:
logger.warning(e)
# Pytorch models with torch-geometric dependency
try:
# TODO We should clean up DMPNN and remove torch_geometric dependency during import
from deepchem.models.torch_models import DMPNN, DMPNNModel, GNNModular
except ImportError as e:
logger.warning(
f'Skipped loading modules with pytorch-geometric dependency, missing a dependency. {e}'
)
# Pytorch-lightning modules import
try:
from deepchem.models.lightning import DCLightningModule, DCLightningDatasetModule
except ModuleNotFoundError as e:
logger.warning(
f'Skipped loading modules with pytorch-lightning dependency, missing a dependency. {e}'
)
# Jax models
try:
from deepchem.models.jax_models import JaxModel
from deepchem.models.jax_models import PINNModel
except ModuleNotFoundError as e:
logger.warning(
f'Skipped loading some Jax models, missing a dependency. {e}')
#####################################################################################
# Compatibility imports for renamed XGBoost models. Remove below with DeepChem 3.0.
#####################################################################################
from deepchem.models.gbdt_models.gbdt_model import XGBoostModel
########################################################################################
# Compatibility imports for renamed TensorGraph models. Remove below with DeepChem 3.0.
########################################################################################
try:
from deepchem.models.text_cnn import TextCNNTensorGraph
from deepchem.models.graph_models import WeaveTensorGraph, DTNNTensorGraph, DAGTensorGraph, GraphConvTensorGraph, MPNNTensorGraph
from deepchem.models.IRV import TensorflowMultitaskIRVClassifier
except ModuleNotFoundError:
pass
<file_sep>import deepchem as dc
import numpy as np
def test_disk_generative_dataset():
"""Test for a hypothetical generative dataset."""
X = np.random.rand(100, 10, 10)
y = np.random.rand(100, 10, 10)
dataset = dc.data.DiskDataset.from_numpy(X, y)
assert (dataset.X == X).all()
assert (dataset.y == y).all()
def test_numpy_generative_dataset():
"""Test for a hypothetical generative dataset."""
X = np.random.rand(100, 10, 10)
y = np.random.rand(100, 10, 10)
dataset = dc.data.NumpyDataset(X, y)
assert (dataset.X == X).all()
assert (dataset.y == y).all()
<file_sep>import deepchem as dc
from deepchem.models import GCNModel
from deepchem.models.lightning.dc_lightning_module import DCLightningModule
from deepchem.models.lightning.dc_lightning_dataset_module import DCLightningDatasetModule, collate_dataset_wrapper
from deepchem.feat import MolGraphConvFeaturizer
import pytorch_lightning as pl
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--device",
type=str,
required=True,
choices=["cpu", 'gpu'])
args = parser.parse_args()
featurizer = MolGraphConvFeaturizer()
tasks, datasets, transformers = dc.molnet.load_zinc15(featurizer=featurizer)
_, valid_dataset, test_dataset = datasets
n_tasks = len(tasks)
model = GCNModel(graph_conv_layers=[1024, 1024, 1024, 512, 512, 512],
mode='regression',
n_tasks=n_tasks,
number_atom_features=30,
batch_size=1024,
learning_rate=0.001)
gcnmodule = DCLightningModule(model)
smiles_datasetmodule = DCLightningDatasetModule(valid_dataset, 1024,
collate_dataset_wrapper)
trainer = pl.Trainer(
max_epochs=1,
profiler="simple",
devices=1,
accelerator=args.device,
)
trainer.fit(gcnmodule, smiles_datasetmodule)
if __name__ == "__main__":
main()
<file_sep>import time
import logging
import os
from collections.abc import Sequence as SequenceCollection
from typing import Any, Callable, Iterable, List, Optional, Tuple, Union, Sequence
import torch
import torch.nn as nn
from deepchem.models.torch_models.torch_model import TorchModel
from deepchem.models.optimizers import LearningRateSchedule
from deepchem.utils.typing import LossFn, OneOrMany
logger = logging.getLogger(__name__)
class ModularTorchModel(TorchModel):
"""ModularTorchModel is a subclass of TorchModel that allows for components to be
pretrained and then combined into a final model. It is designed to be subclassed
for specific models and is not intended to be used directly. There are 3 main differences
between ModularTorchModel and TorchModel:
- The build_components() method is used to define the components of the model.
- The components are combined into a final model with the build_model() method.
- The loss function is defined with the loss_func method. This may access the
components to compute the loss using intermediate values from the network, rather
than just the full forward pass output.
Here is an example of how to use ModularTorchModel to pretrain a linear layer, load
it into another network and then finetune that network:
>>> import numpy as np
>>> import deepchem as dc
>>> import torch
>>> n_samples = 6
>>> n_feat = 3
>>> n_hidden = 2
>>> n_tasks = 6
>>> pt_tasks = 3
>>> X = np.random.rand(n_samples, n_feat)
>>> y_pretrain = np.zeros((n_samples, pt_tasks)).astype(np.float32)
>>> dataset_pt = dc.data.NumpyDataset(X, y_pretrain)
>>> y_finetune = np.zeros((n_samples, n_tasks)).astype(np.float32)
>>> dataset_ft = dc.data.NumpyDataset(X, y_finetune)
>>> components = {'linear': torch.nn.Linear(n_feat, n_hidden),
... 'activation': torch.nn.ReLU(), 'head': torch.nn.Linear(n_hidden, n_tasks)}
>>> model = torch.nn.Sequential(components['linear'], components['activation'],
... components['head'])
>>> modular_model = dc.models.torch_models.modular.ModularTorchModel(model, components)
>>> def example_loss_func(inputs, labels, weights):
... return (torch.nn.functional.mse_loss(model(inputs), labels[0]) * weights[0]).mean()
>>> modular_model.loss_func = example_loss_func
>>> def example_model_build():
... return torch.nn.Sequential(components['linear'], components['activation'],
... components['head'])
>>> modular_model.build_model = example_model_build
>>> pretrain_components = {'linear': torch.nn.Linear(n_feat, n_hidden),
... 'activation': torch.nn.ReLU(), 'head': torch.nn.Linear(n_hidden, pt_tasks)}
>>> pretrain_model = torch.nn.Sequential(pretrain_components['linear'],
... pretrain_components['activation'], pretrain_components['head'])
>>> pretrain_modular_model = dc.models.torch_models.modular.ModularTorchModel(pretrain_model,
... pretrain_components)
>>> def example_pt_loss_func(inputs, labels, weights):
... return (torch.nn.functional.mse_loss(pretrain_model(inputs), labels[0]) * weights[0]).mean()
>>> pretrain_modular_model.loss_func = example_pt_loss_func
>>> pt_loss = pretrain_modular_model.fit(dataset_pt, nb_epoch=1)
>>> modular_model.load_from_pretrained(pretrain_modular_model, components=['linear'])
>>> ft_loss = modular_model.fit(dataset_ft, nb_epoch=1)
"""
def __init__(self, model: nn.Module, components: dict, **kwargs):
"""Create a ModularTorchModel.
Parameters
----------
model: nn.Module
The model to be trained.
components: dict
A dictionary of the components of the model. The keys are the names of the
components and the values are the components themselves.
"""
self.model = model
self.components = components
# FIXME self.loss_func is an incorrect argument for TorchModel.loss because
# it performs more than computing loss
super().__init__(self.model, self.loss_func, **kwargs)
self.model.to(self.device)
self.components = {
k: v.to(self.device) if isinstance(v, nn.Module) else v
for k, v in self.components.items()
}
def build_model(self) -> nn.Module:
"""Builds the final model from the components."""
raise NotImplementedError("Subclass must define the components")
def build_components(self) -> dict:
"""Creates the components dictionary, with the keys being the names of the
components and the values being torch.nn.module objects."""
raise NotImplementedError("Subclass must define the components")
def loss_func(self, inputs: OneOrMany[torch.Tensor], labels: Sequence,
weights: Sequence) -> torch.Tensor:
"""Defines the loss function for the model which can access the components
using self.components. The loss function should take the inputs, labels, and
weights as arguments and return the loss."""
raise NotImplementedError("Subclass must define the loss function")
def freeze_components(self, components: List[str]):
"""Freezes or unfreezes the parameters of the specified components.
Components string refers to keys in self.components.
Parameters
----------
components: List[str]
The components to freeze.
"""
for component in components:
for param in self.components[component].parameters():
param.requires_grad = False
def unfreeze_components(self, components: List[str]):
"""Unfreezes the parameters of the specified components.
Components string refers to keys in self.components.
Parameters
----------
components: List[str]
The components to unfreeze.
"""
for component in components:
for param in self.components[component].parameters():
param.requires_grad = True
def fit_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
max_checkpoints_to_keep: int = 5,
checkpoint_interval: int = 1000,
restore: bool = False,
variables: Optional[List[torch.nn.Parameter]] = None,
loss: Optional[LossFn] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
"""Train this model on data from a generator. This method is similar to
the TorchModel implementation, but it passes the inputs directly to the
loss function, rather than passing them through the model first. This
enables the loss to be calculated from intermediate steps of the model
and not just the final output.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in training steps.
Set this to 0 to disable automatic checkpointing.
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
variables: list of torch.nn.Parameter
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
all_losses: Optional[List[float]], optional (default None)
If specified, all logged losses are appended into this list. Note that
you can call `fit()` repeatedly with the same list and losses will
continue to be appended.
Returns
-------
The average loss over the most recent checkpoint interval
"""
if not isinstance(callbacks, SequenceCollection):
callbacks = [callbacks]
self._ensure_built()
self.model.train()
avg_loss = 0.0
last_avg_loss = 0.0
averaged_batches = 0
# FIXME This line is not needed as loss is computed inside the call to loss_func
if loss is None:
loss = self._loss_fn
if variables is None:
optimizer = self._pytorch_optimizer
lr_schedule = self._lr_schedule
else:
var_key = tuple(variables)
if var_key in self._optimizer_for_vars:
optimizer, lr_schedule = self._optimizer_for_vars[var_key]
else:
optimizer = self.optimizer._create_pytorch_optimizer(variables)
if isinstance(self.optimizer.learning_rate,
LearningRateSchedule):
lr_schedule = self.optimizer.learning_rate._create_pytorch_schedule(
optimizer)
else:
lr_schedule = None
self._optimizer_for_vars[var_key] = (optimizer, lr_schedule)
time1 = time.time()
# Main training loop.
for batch in generator:
if restore:
self.restore()
restore = False
inputs: OneOrMany[torch.Tensor]
inputs, labels, weights = self._prepare_batch(batch)
# Execute the loss function, accumulating the gradients.
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
optimizer.zero_grad()
batch_loss = self.loss_func(inputs, labels, weights)
batch_loss.backward()
optimizer.step()
if lr_schedule is not None:
lr_schedule.step()
self._global_step += 1
current_step = self._global_step
avg_loss += float(batch_loss)
# Report progress and write checkpoints.
averaged_batches += 1
should_log = (current_step % self.log_frequency == 0)
if should_log:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
# Capture the last avg_loss in case of return since we're resetting to 0 now
last_avg_loss = avg_loss
avg_loss = 0.0
averaged_batches = 0
if checkpoint_interval > 0 and current_step % checkpoint_interval == checkpoint_interval - 1:
self.save_checkpoint(max_checkpoints_to_keep)
for c in callbacks:
c(self, current_step)
if self.tensorboard and should_log:
self._log_scalar_to_tensorboard('loss', batch_loss,
current_step)
if (self.wandb_logger is not None) and should_log:
all_data = dict({'train/loss': batch_loss})
self.wandb_logger.log_data(all_data, step=current_step)
# Report final results.
if averaged_batches > 0:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
last_avg_loss = avg_loss
if checkpoint_interval > 0:
self.save_checkpoint(max_checkpoints_to_keep)
time2 = time.time()
logger.info("TIMING: model fitting took %0.3f s" % (time2 - time1))
return last_avg_loss
def load_from_pretrained( # type: ignore
self,
source_model: Optional["ModularTorchModel"] = None,
components: Optional[List[str]] = None,
checkpoint: Optional[str] = None,
model_dir: Optional[str] = None,
inputs: Optional[Sequence[Any]] = None,
**kwargs) -> None:
"""Copies parameter values from a pretrained model. The pretrained model can be loaded as a source_model (ModularTorchModel object), checkpoint (pytorch .ckpt file) or a model_dir (directory with .ckpt files).
Specific components can be chosen by passing a list of strings with the desired component names. If both a source_model and a checkpoint/model_dir are loaded, the source_model weights will be loaded.
Parameters
----------
source_model: dc.ModularTorchModel, required
source_model can either be the pretrained model or a dc.TorchModel with
the same architecture as the pretrained model. It is used to restore from
a checkpoint, if value_map is None and to create a default assignment map
if assignment_map is None
checkpoint: str, default None
the path to the checkpoint file to load. If this is None, the most recent
checkpoint will be chosen automatically. Call get_checkpoints() to get a
list of all available checkpoints
model_dir: str, default None
Restore source model from custom model directory if needed
inputs: List, input tensors for model
if not None, then the weights are built for both the source and self.
"""
if inputs is not None:
# Ensure weights for both models are built.
if source_model:
source_model.model(inputs)
self.model(inputs)
self._ensure_built()
if source_model is not None:
for name, module in source_model.components.items():
if components is None or name in components:
self.components[name].load_state_dict(module.state_dict())
self.build_model()
elif source_model is None:
self.restore(components=components,
checkpoint=checkpoint,
model_dir=model_dir)
def save_checkpoint(self, max_checkpoints_to_keep=5, model_dir=None):
"""
Saves the current state of the model and its components as a checkpoint file in the specified model directory.
It maintains a maximum number of checkpoint files, deleting the oldest one when the limit is reached.
Parameters
----------
max_checkpoints_to_keep: int, default 5
Maximum number of checkpoint files to keep.
model_dir: str, default None
The directory to save the checkpoint file in. If None, the model_dir specified in the constructor is used.
"""
if model_dir is None:
model_dir = self.model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
data = {
'model': self.model.state_dict(),
'optimizer_state_dict': self._pytorch_optimizer.state_dict(),
'global_step': self._global_step
}
for name, component in self.components.items():
if hasattr(component, 'state_dict'):
data[name] = component.state_dict()
temp_file = os.path.join(model_dir, 'temp_checkpoint.pt')
torch.save(data, temp_file)
# Rename and delete older files.
paths = [
os.path.join(model_dir, 'checkpoint%d.pt' % (i + 1))
for i in range(max_checkpoints_to_keep)
]
if os.path.exists(paths[-1]):
os.remove(paths[-1])
for i in reversed(range(max_checkpoints_to_keep - 1)):
if os.path.exists(paths[i]):
os.rename(paths[i], paths[i + 1])
os.rename(temp_file, paths[0])
def restore( # type: ignore
self,
components: Optional[List[str]] = None,
checkpoint: Optional[str] = None,
model_dir: Optional[str] = None) -> None:
"""
Restores the state of a ModularTorchModel from a checkpoint file.
If no checkpoint file is provided, it will use the latest checkpoint found in the model directory. If a list of component names is provided, only the state of those components will be restored.
Parameters
----------
components: Optional[List[str]]
A list of component names to restore. If None, all components will be restored.
checkpoint: Optional[str]
The path to the checkpoint file. If None, the latest checkpoint in the model directory will
be used.
model_dir: Optional[str]
The path to the model directory. If None, the model directory used to initialize the model will be used.
"""
logger.info('Restoring model')
if checkpoint is None:
checkpoints = sorted(self.get_checkpoints(model_dir))
if len(checkpoints) == 0:
raise ValueError('No checkpoint found')
checkpoint = checkpoints[0]
data = torch.load(checkpoint)
for name, state_dict in data.items():
if name != 'model' and name in self.components.keys():
if components is None or name in components:
self.components[name].load_state_dict(state_dict)
self.build_model()
self._ensure_built()
self._pytorch_optimizer.load_state_dict(data['optimizer_state_dict'])
self._global_step = data['global_step']
<file_sep>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 23 15:04:19 2017
@author: zqwu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from sklearn.kernel_ridge import KernelRidge
import numpy as np
import deepchem as dc
import tempfile
# Only for debug!
np.random.seed(123)
# Load Delaney dataset
n_features = 1024
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney()
train_dataset, valid_dataset, test_dataset = delaney_datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
def model_builder(model_dir):
sklearn_model = KernelRidge(kernel="rbf", alpha=1e-3, gamma=0.05)
return dc.models.SklearnModel(sklearn_model, model_dir)
model_dir = tempfile.mkdtemp()
model = dc.models.SingletaskToMultitask(delaney_tasks, model_builder, model_dir)
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import pandas as pd
import os
from rdkit import Chem
import time
import gzip
import pickle
import deepchem
def main():
print("Processing PubChem FTP Download")
data_dir = deepchem.utils.get_data_dir()
sdf_dir = os.path.join(data_dir, "SDF")
compound_read_count = 0
keys = list()
values = list()
overall_start = time.time()
all_paths = list()
for path, dirs, filenames in os.walk(sdf_dir):
for filename in filenames:
# RDKit consistently hangs when trying to read this file
if "102125001_102150000" in filename:
continue
file_path = os.path.join(sdf_dir, filename)
all_paths.append(file_path)
all_paths.sort()
for filepath in all_paths:
print("Processing: {0}".format(filepath))
start = time.time()
with gzip.open(filepath, 'rb') as myfile:
suppl = Chem.ForwardSDMolSupplier(myfile)
for mol in suppl:
if mol is None: continue
cid = mol.GetProp("PUBCHEM_COMPOUND_CID")
try:
smiles = Chem.MolToSmiles(mol)
keys.append(int(cid))
values.append(smiles)
except Exception:
continue
end = time.time()
print("Processed file, processed thru compound number: {0} in {1} seconds".
format(compound_read_count, end - start))
compound_read_count = compound_read_count + 1
overall_end = time.time()
secs_elapsed = overall_end - overall_start
print("Parsed all smiles in: {0} seconds, or {1} minutes, or {2} hours".
format(secs_elapsed, secs_elapsed / 60, secs_elapsed / 3600))
print("Total length of: {}".format(len(keys)))
with open(os.path.join(data_dir, "/pubchemsmiles_tuple.pickle"), "wb") as f:
pickle.dump((keys, values), f)
print("Done")
overall_end = time.time()
secs_elapsed = overall_end - overall_start
print("Sorted and saved smiles in: {0} seconds, or {1} minutes, or {2} hours".
format(secs_elapsed, secs_elapsed / 60, secs_elapsed / 3600))
if __name__ == '__main__':
main()
<file_sep>"""
Created on Sat Oct 14 16:59:49 2017
@author: zqwu
This script evaluates how performances change with
different size of training set(training set fraction).
Default fractions evaluated are 0.1, 0.2, ..., 0.9.
The whole dataset is split into train set and valid set
with corresponding fractions.(test set is not used)
Models are trained on train set and evaluated on valid set.
Command line options are the same as `benchmark.py`
All results and train set fractions are stored in
'./results_frac_train_curve.csv'
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import deepchem as dc
import argparse
import pickle
import csv
from deepchem.molnet.run_benchmark_models import benchmark_classification, benchmark_regression
from deepchem.molnet.run_benchmark import load_dataset
from deepchem.molnet.check_availability import CheckFeaturizer, CheckSplit
from deepchem.molnet.preset_hyper_parameters import hps
# Evaluate performances with different training set fraction
frac_trains = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
parser = argparse.ArgumentParser(
description='Deepchem benchmark: ' +
'giving performances of different learning models on datasets')
parser.add_argument(
'-s',
action='append',
dest='splitter_args',
default=[],
help='Choice of splitting function: index, random, scaffold, stratified')
parser.add_argument(
'-m',
action='append',
dest='model_args',
default=[],
help='Choice of model: tf, tf_robust, logreg, rf, irv, graphconv, xgb,' + \
' dag, weave, tf_regression, tf_regression_ft, rf_regression, ' + \
'graphconvreg, xgb_regression, dtnn, dag_regression, weave_regression')
parser.add_argument(
'-d',
action='append',
dest='dataset_args',
default=[],
help='Choice of dataset: bace_c, bace_r, bbbp, chembl, clearance, ' +
'clintox, delaney, hiv, hopv, kaggle, lipo, muv, nci, pcba, ' +
'pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl, sider, tox21, toxcast')
parser.add_argument(
'--seed',
action='append',
dest='seed_args',
default=[],
help='Choice of random seed')
args = parser.parse_args()
#Datasets and models used in the benchmark test
splitters = args.splitter_args
models = args.model_args
datasets = args.dataset_args
if len(args.seed_args) > 0:
seed = int(args.seed_args[0])
else:
seed = 123
out_path = '.'
for dataset in datasets:
for split in splitters:
for model in models:
hyper_parameters = None
# Uncomment the two lines below if hyper_parameters are provided
#with open(os.path.join(out_path, dataset + model + '.pkl'), 'r') as f:
# hyper_parameters = pickle.load(f)
if dataset in [
'bace_c', 'bbbp', 'clintox', 'hiv', 'muv', 'pcba', 'sider', 'tox21',
'toxcast'
]:
mode = 'classification'
metric = [dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)]
elif dataset in [
'bace_r', 'chembl', 'clearance', 'delaney', 'hopv', 'kaggle', 'lipo',
'nci', 'pdbbind', 'ppb', 'qm7', 'qm7b', 'qm8', 'qm9', 'sampl'
]:
mode = 'regression'
metric = [dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)]
pair = (dataset, model)
if pair in CheckFeaturizer:
featurizer = CheckFeaturizer[pair][0]
n_features = CheckFeaturizer[pair][1]
else:
supported_combinations = [
key for key in CheckFeaturizer.keys() if pair[0] == key[0]
]
supported_models = [k[1] for k in supported_combinations]
raise ValueError(
"Model %s not supported for %s dataset. Please choose from the following:\n%s"
% (pair[1], pair[0], " ".join(supported_models)))
tasks, all_dataset, transformers = load_dataset(
dataset, featurizer, split='index')
all_dataset = dc.data.DiskDataset.merge(all_dataset)
for frac_train in frac_trains:
splitters = {
'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter(),
'stratified': dc.splits.SingletaskStratifiedSplitter(task_number=0)
}
splitter = splitters[split]
np.random.seed(seed)
train, valid, test = splitter.train_valid_test_split(
all_dataset,
frac_train=frac_train,
frac_valid=1 - frac_train,
frac_test=0.)
test = valid
if mode == 'classification':
train_score, valid_score, test_score = benchmark_classification(
train,
valid,
test,
tasks,
transformers,
n_features,
metric,
model,
test=False,
hyper_parameters=hyper_parameters,
seed=seed)
elif mode == 'regression':
train_score, valid_score, test_score = benchmark_regression(
train,
valid,
test,
tasks,
transformers,
n_features,
metric,
model,
test=False,
hyper_parameters=hyper_parameters,
seed=seed)
with open(os.path.join(out_path, 'results_frac_train_curve.csv'),
'a') as f:
writer = csv.writer(f)
model_name = list(train_score.keys())[0]
for i in train_score[model_name]:
output_line = [
dataset,
str(split), mode, model_name, i, 'train',
train_score[model_name][i], 'valid', valid_score[model_name][i]
]
output_line.extend(['frac_train', frac_train])
writer.writerow(output_line)
<file_sep>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 8 16:48:05 2016
@author: <NAME>
Low data benchmark test
Giving performances of: Siamese, attention-based embedding, residual embedding
on datasets: muv, sider, tox21
time estimation listed in README file
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
import argparse
np.random.seed(123)
parser = argparse.ArgumentParser(
description='Deepchem benchmark: ' +
'giving performances of different learning models on datasets')
parser.add_argument(
'-m',
action='append',
dest='model_args',
default=[],
help='Choice of model: siamese, attn, res')
parser.add_argument(
'-d',
action='append',
dest='dataset_args',
default=[],
help='Choice of dataset: tox21, sider, muv')
parser.add_argument(
'--cv',
action='store_true',
dest='cross_valid',
default=False,
help='whether to implement cross validation')
args = parser.parse_args()
#Datasets and models used in the benchmark test
models = args.model_args
datasets = args.dataset_args
cross_valid = args.cross_valid
if len(models) == 0:
models = ['siamese', 'attn', 'res']
if len(datasets) == 0:
datasets = ['tox21', 'sider', 'muv']
for dataset in datasets:
for model in models:
dc.molnet.run_benchmark_low_data(
[dataset], str(model), cross_valid=cross_valid)
<file_sep>from typing import List, Tuple
import numpy as np
from deepchem.utils.typing import RDKitAtom, RDKitBond, RDKitMol
from deepchem.feat.graph_data import GraphData
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.utils.molecule_feature_utils import one_hot_encode
from deepchem.utils.molecule_feature_utils import get_atom_type_one_hot
from deepchem.utils.molecule_feature_utils import construct_hydrogen_bonding_info
from deepchem.utils.molecule_feature_utils import get_atom_hydrogen_bonding_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_hybridization_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_total_num_Hs_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_is_in_aromatic_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_chirality_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_formal_charge
from deepchem.utils.molecule_feature_utils import get_atom_partial_charge
from deepchem.utils.molecule_feature_utils import get_atom_total_degree_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_type_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_is_in_same_ring_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_is_conjugated_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_stereo_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_formal_charge_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_implicit_valence_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_explicit_valence_one_hot
from deepchem.utils.rdkit_utils import compute_all_pairs_shortest_path
from deepchem.utils.rdkit_utils import compute_pairwise_ring_info
def _construct_atom_feature(atom: RDKitAtom, h_bond_infos: List[Tuple[int,
str]],
use_chirality: bool,
use_partial_charge: bool) -> np.ndarray:
"""Construct an atom feature from a RDKit atom object.
Parameters
----------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
h_bond_infos: List[Tuple[int, str]]
A list of tuple `(atom_index, hydrogen_bonding_type)`.
Basically, it is expected that this value is the return value of
`construct_hydrogen_bonding_info`. The `hydrogen_bonding_type`
value is "Acceptor" or "Donor".
use_chirality: bool
Whether to use chirality information or not.
use_partial_charge: bool
Whether to use partial charge data or not.
Returns
-------
np.ndarray
A one-hot vector of the atom feature.
"""
atom_type = get_atom_type_one_hot(atom)
formal_charge = get_atom_formal_charge(atom)
hybridization = get_atom_hybridization_one_hot(atom)
acceptor_donor = get_atom_hydrogen_bonding_one_hot(atom, h_bond_infos)
aromatic = get_atom_is_in_aromatic_one_hot(atom)
degree = get_atom_total_degree_one_hot(atom)
total_num_Hs = get_atom_total_num_Hs_one_hot(atom)
atom_feat = np.concatenate([
atom_type, formal_charge, hybridization, acceptor_donor, aromatic,
degree, total_num_Hs
])
if use_chirality:
chirality = get_atom_chirality_one_hot(atom)
atom_feat = np.concatenate([atom_feat, np.array(chirality)])
if use_partial_charge:
partial_charge = get_atom_partial_charge(atom)
atom_feat = np.concatenate([atom_feat, np.array(partial_charge)])
return atom_feat
def _construct_bond_feature(bond: RDKitBond) -> np.ndarray:
"""Construct a bond feature from a RDKit bond object.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
Returns
-------
np.ndarray
A one-hot vector of the bond feature.
"""
bond_type = get_bond_type_one_hot(bond)
same_ring = get_bond_is_in_same_ring_one_hot(bond)
conjugated = get_bond_is_conjugated_one_hot(bond)
stereo = get_bond_stereo_one_hot(bond)
return np.concatenate([bond_type, same_ring, conjugated, stereo])
class MolGraphConvFeaturizer(MolecularFeaturizer):
"""This class is a featurizer of general graph convolution networks for molecules.
The default node(atom) and edge(bond) representations are based on
`WeaveNet paper <https://arxiv.org/abs/1603.00856>`_. If you want to use your own representations,
you could use this class as a guide to define your original Featurizer. In many cases, it's enough
to modify return values of `construct_atom_feature` or `construct_bond_feature`.
The default node representation are constructed by concatenating the following values,
and the feature length is 30.
- Atom type: A one-hot vector of this atom, "C", "N", "O", "F", "P", "S", "Cl", "Br", "I", "other atoms".
- Formal charge: Integer electronic charge.
- Hybridization: A one-hot vector of "sp", "sp2", "sp3".
- Hydrogen bonding: A one-hot vector of whether this atom is a hydrogen bond donor or acceptor.
- Aromatic: A one-hot vector of whether the atom belongs to an aromatic ring.
- Degree: A one-hot vector of the degree (0-5) of this atom.
- Number of Hydrogens: A one-hot vector of the number of hydrogens (0-4) that this atom connected.
- Chirality: A one-hot vector of the chirality, "R" or "S". (Optional)
- Partial charge: Calculated partial charge. (Optional)
The default edge representation are constructed by concatenating the following values,
and the feature length is 11.
- Bond type: A one-hot vector of the bond type, "single", "double", "triple", or "aromatic".
- Same ring: A one-hot vector of whether the atoms in the pair are in the same ring.
- Conjugated: A one-hot vector of whether this bond is conjugated or not.
- Stereo: A one-hot vector of the stereo configuration of a bond.
If you want to know more details about features, please check the paper [1]_ and
utilities in deepchem.utils.molecule_feature_utils.py.
Examples
--------
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = MolGraphConvFeaturizer(use_edges=True)
>>> out = featurizer.featurize(smiles)
>>> type(out[0])
<class 'deepchem.feat.graph_data.GraphData'>
>>> out[0].num_node_features
30
>>> out[0].num_edge_features
11
References
----------
.. [1] <NAME>, et al. "Molecular graph convolutions: moving beyond fingerprints."
Journal of computer-aided molecular design 30.8 (2016):595-608.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
use_edges: bool = False,
use_chirality: bool = False,
use_partial_charge: bool = False):
"""
Parameters
----------
use_edges: bool, default False
Whether to use edge features or not.
use_chirality: bool, default False
Whether to use chirality information or not.
If True, featurization becomes slow.
use_partial_charge: bool, default False
Whether to use partial charge data or not.
If True, this featurizer computes gasteiger charges.
Therefore, there is a possibility to fail to featurize for some molecules
and featurization becomes slow.
"""
self.use_edges = use_edges
self.use_partial_charge = use_partial_charge
self.use_chirality = use_chirality
def _featurize(self, datapoint: RDKitMol, **kwargs) -> GraphData:
"""Calculate molecule graph features from RDKit mol object.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit mol object.
Returns
-------
graph: GraphData
A molecule graph with some features.
"""
assert datapoint.GetNumAtoms(
) > 1, "More than one atom should be present in the molecule for this featurizer to work."
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.use_partial_charge:
try:
datapoint.GetAtomWithIdx(0).GetProp('_GasteigerCharge')
except:
# If partial charges were not computed
try:
from rdkit.Chem import AllChem
AllChem.ComputeGasteigerCharges(datapoint)
except ModuleNotFoundError:
raise ImportError(
"This class requires RDKit to be installed.")
# construct atom (node) feature
h_bond_infos = construct_hydrogen_bonding_info(datapoint)
atom_features = np.asarray(
[
_construct_atom_feature(atom, h_bond_infos, self.use_chirality,
self.use_partial_charge)
for atom in datapoint.GetAtoms()
],
dtype=float,
)
# construct edge (bond) index
src, dest = [], []
for bond in datapoint.GetBonds():
# add edge list considering a directed graph
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
src += [start, end]
dest += [end, start]
# construct edge (bond) feature
bond_features = None # deafult None
if self.use_edges:
features = []
for bond in datapoint.GetBonds():
features += 2 * [_construct_bond_feature(bond)]
bond_features = np.asarray(features, dtype=float)
# load_sdf_files returns pos as strings but user can also specify
# numpy arrays for atom coordinates
pos = []
if 'pos_x' in kwargs and 'pos_y' in kwargs and 'pos_z' in kwargs:
if isinstance(kwargs['pos_x'], str):
pos_x = eval(kwargs['pos_x'])
elif isinstance(kwargs['pos_x'], np.ndarray):
pos_x = kwargs['pos_x']
if isinstance(kwargs['pos_y'], str):
pos_y = eval(kwargs['pos_y'])
elif isinstance(kwargs['pos_y'], np.ndarray):
pos_y = kwargs['pos_y']
if isinstance(kwargs['pos_z'], str):
pos_z = eval(kwargs['pos_z'])
elif isinstance(kwargs['pos_z'], np.ndarray):
pos_z = kwargs['pos_z']
for x, y, z in zip(pos_x, pos_y, pos_z):
pos.append([x, y, z])
node_pos_features = np.asarray(pos)
else:
node_pos_features = None
return GraphData(node_features=atom_features,
edge_index=np.asarray([src, dest], dtype=int),
edge_features=bond_features,
node_pos_features=node_pos_features)
class PagtnMolGraphFeaturizer(MolecularFeaturizer):
"""This class is a featuriser of PAGTN graph networks for molecules.
The featurization is based on `PAGTN model <https://arxiv.org/abs/1905.12712>`_. It is
slightly more computationally intensive than default Graph Convolution Featuriser, but it
builds a Molecular Graph connecting all atom pairs accounting for interactions of an atom with
every other atom in the Molecule. According to the paper, interactions between two pairs
of atom are dependent on the relative distance between them and and hence, the function needs
to calculate the shortest path between them.
The default node representation is constructed by concatenating the following values,
and the feature length is 94.
- Atom type: One hot encoding of the atom type. It consists of the most possible elements in a chemical compound.
- Formal charge: One hot encoding of formal charge of the atom.
- Degree: One hot encoding of the atom degree
- Explicit Valence: One hot encoding of explicit valence of an atom. The supported possibilities
include ``0 - 6``.
- Implicit Valence: One hot encoding of implicit valence of an atom. The supported possibilities
include ``0 - 5``.
- Aromaticity: Boolean representing if an atom is aromatic.
The default edge representation is constructed by concatenating the following values,
and the feature length is 42. It builds a complete graph where each node is connected to
every other node. The edge representations are calculated based on the shortest path between two nodes
(choose any one if multiple exist). Each bond encountered in the shortest path is used to
calculate edge features.
- Bond type: A one-hot vector of the bond type, "single", "double", "triple", or "aromatic".
- Conjugated: A one-hot vector of whether this bond is conjugated or not.
- Same ring: A one-hot vector of whether the atoms in the pair are in the same ring.
- Ring Size and Aromaticity: One hot encoding of atoms in pair based on ring size and aromaticity.
- Distance: One hot encoding of the distance between pair of atoms.
Examples
--------
>>> from deepchem.feat import PagtnMolGraphFeaturizer
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = PagtnMolGraphFeaturizer(max_length=5)
>>> out = featurizer.featurize(smiles)
>>> type(out[0])
<class 'deepchem.feat.graph_data.GraphData'>
>>> out[0].num_node_features
94
>>> out[0].num_edge_features
42
References
----------
.. [1] Chen, Barzilay, Jaakkola "Path-Augmented Graph Transformer Network"
10.26434/chemrxiv.8214422.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self, max_length=5):
"""
Parameters
----------
max_length : int
Maximum distance up to which shortest paths must be considered.
Paths shorter than max_length will be padded and longer will be
truncated, default to ``5``.
"""
self.SYMBOLS = [
'C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca',
'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb', 'Sb', 'Sn', 'Ag',
'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', 'Li', 'Ge', 'Cu', 'Au', 'Ni',
'Cd', 'In', 'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'W', 'Ru', 'Nb',
'Re', 'Te', 'Rh', 'Tc', 'Ba', 'Bi', 'Hf', 'Mo', 'U', 'Sm', 'Os',
'Ir', 'Ce', 'Gd', 'Ga', 'Cs', '*', 'UNK'
]
self.RING_TYPES = [(5, False), (5, True), (6, False), (6, True)]
self.ordered_pair = lambda a, b: (a, b) if a < b else (b, a)
self.max_length = max_length
def _pagtn_atom_featurizer(self, atom: RDKitAtom) -> np.ndarray:
"""Calculate Atom features from RDKit atom object.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit mol object.
Returns
-------
atom_feat: np.ndarray
numpy vector of atom features.
"""
atom_type = get_atom_type_one_hot(atom, self.SYMBOLS, False)
formal_charge = get_atom_formal_charge_one_hot(
atom, include_unknown_set=False)
degree = get_atom_total_degree_one_hot(atom, list(range(11)), False)
exp_valence = get_atom_explicit_valence_one_hot(atom, list(range(7)),
False)
imp_valence = get_atom_implicit_valence_one_hot(atom, list(range(6)),
False)
armoticity = get_atom_is_in_aromatic_one_hot(atom)
atom_feat = np.concatenate([
atom_type, formal_charge, degree, exp_valence, imp_valence,
armoticity
])
return atom_feat
def _edge_features(self, mol: RDKitMol, path_atoms: Tuple[int, ...],
ring_info) -> np.ndarray:
"""Computes the edge features for a given pair of nodes.
Parameters
----------
mol : : RDKitMol
RDKit molecule instance.
path_atoms: tuple
Shortest path between the given pair of nodes.
ring_info: list
Different rings that contain the pair of atoms
"""
features: List = []
path_bonds = []
path_length = len(path_atoms)
for path_idx in range(path_length - 1):
bond = mol.GetBondBetweenAtoms(path_atoms[path_idx],
path_atoms[path_idx + 1])
if bond is None:
import warnings
warnings.warn('Valid idx of bonds must be passed')
path_bonds.append(bond)
for path_idx in range(self.max_length):
if path_idx < len(path_bonds):
bond_type = get_bond_type_one_hot(path_bonds[path_idx])
conjugacy = get_bond_is_conjugated_one_hot(path_bonds[path_idx])
ring_attach = get_bond_is_in_same_ring_one_hot(
path_bonds[path_idx])
features.append(
np.concatenate([bond_type, conjugacy, ring_attach]))
else:
features.append(np.zeros(6))
if path_length + 1 > self.max_length:
path_length = self.max_length + 1
position_feature = np.zeros(self.max_length + 2)
position_feature[path_length] = 1
features.append(position_feature)
if ring_info:
rfeat = [
one_hot_encode(r, allowable_set=self.RING_TYPES)
for r in ring_info
]
# The 1.0 float value represents True Boolean
rfeat = [1.0] + np.any(rfeat, axis=0).tolist()
features.append(rfeat)
else:
# This will return a boolean vector with all entries False
features.append(
[0.0] +
one_hot_encode(ring_info, allowable_set=self.RING_TYPES))
return np.concatenate(features, axis=0)
def _pagtn_edge_featurizer(self,
mol: RDKitMol) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate bond features from RDKit mol object.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit mol object.
Returns
-------
np.ndarray
Source and Destination node indexes of each bond.
np.ndarray
numpy vector of bond features.
"""
n_atoms = mol.GetNumAtoms()
# To get the shortest paths between two nodes.
paths_dict = compute_all_pairs_shortest_path(mol)
# To get info if two nodes belong to the same ring.
rings_dict = compute_pairwise_ring_info(mol)
# Featurizer
feats = []
src = []
dest = []
for i in range(n_atoms):
for j in range(n_atoms):
src.append(i)
dest.append(j)
if (i, j) not in paths_dict:
feats.append(np.zeros(7 * self.max_length + 7))
continue
ring_info = rings_dict.get(self.ordered_pair(i, j), [])
feats.append(
self._edge_features(mol, paths_dict[(i, j)], ring_info))
return np.array([src, dest], dtype=int), np.array(feats, dtype=float)
def _featurize(self, datapoint: RDKitMol, **kwargs) -> GraphData:
"""Calculate molecule graph features from RDKit mol object.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit mol object.
Returns
-------
graph: GraphData
A molecule graph with some features.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
node_features = np.asarray([
self._pagtn_atom_featurizer(atom) for atom in datapoint.GetAtoms()
],
dtype=float)
edge_index, edge_features = self._pagtn_edge_featurizer(datapoint)
graph = GraphData(node_features, edge_index, edge_features)
return graph
<file_sep>"""
Imports a number of useful deep learning primitives into one place.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from deepchem.nn.model_ops import weight_decay
from deepchem.nn.model_ops import optimizer
from deepchem.nn.model_ops import add_bias
from deepchem.nn.model_ops import fully_connected_layer
from deepchem.nn.model_ops import multitask_logits
from deepchem.nn.model_ops import softmax_N
from deepchem.nn.objectives import mean_squared_error
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import deepchem as dc
import json
import numpy as np
import tensorflow as tf
from deepchem.models.atomic_conv import atomic_conv_model
sys.path.append("../../models")
from deepchem.models.tensorgraph.layers import Layer, Feature, Label, L2Loss, AtomicConvolution, Transpose, Dense
from deepchem.models import TensorGraph
import numpy as np
import tensorflow as tf
import itertools
import time
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, "datasets")
train_dir = os.path.join(data_dir, "scaffold_train")
test_dir = os.path.join(data_dir, "scaffold_test")
train_dataset = dc.data.DiskDataset(train_dir)
test_dataset = dc.data.DiskDataset(test_dir)
pdbbind_tasks = ["-logKd/Ki"]
transformers = []
y_train = train_dataset.y
y_train *= -1 * 2.479 / 4.184
train_dataset = dc.data.DiskDataset.from_numpy(
train_dataset.X,
y_train,
train_dataset.w,
train_dataset.ids,
tasks=pdbbind_tasks)
y_test = test_dataset.y
y_test *= -1 * 2.479 / 4.184
test_dataset = dc.data.DiskDataset.from_numpy(
test_dataset.X,
y_test,
test_dataset.w,
test_dataset.ids,
tasks=pdbbind_tasks)
batch_size = 24
radial1 = [
[1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5],
[
1.5, 2.5, 3.5, 4.5, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0,
10.5
],
]
radial2 = [
[0.0, 2.0, 4.0],
[0.0, 1.0, 2.0],
[0.0, 1.5, 3.0],
]
radial3 = [
[0.075],
]
layer_sizes = [
[64, 32, 16],
[32, 16, 8],
]
learning_rates = [
0.001,
]
epochs = [10]
def params():
for values in itertools.product(radial1, radial2, radial3, layer_sizes,
learning_rates, epochs):
d = {
"frag1_num_atoms": 140,
"frag2_num_atoms": 821,
"complex_num_atoms": 908,
"radial": [values[0], values[1], values[2]],
"layer_sizes": values[3],
"learning_rate": values[4],
"epochs": values[5]
}
yield d
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
for param in params():
num_epochs = param['epochs']
del param['epochs']
tg, feed_dict_generator, label = atomic_conv_model(**param)
tg.fit_generator(
feed_dict_generator(train_dataset, batch_size, epochs=num_epochs))
test_evaluator = dc.utils.evaluate.GeneratorEvaluator(
tg, feed_dict_generator(test_dataset, batch_size), transformers, [label])
test_scores = test_evaluator.compute_model_performance(metric)
param.update(test_scores)
param['epochs'] = num_epochs
print("Results")
print(param)
with open('hyper_results.txt', 'a') as fout:
fout.write(json.dumps(param))
fout.write("\n")
<file_sep>"""
RDKit Utilities.
This file contains utilities that compute useful properties of
molecules. Some of these are simple cleanup utilities, and
others are more sophisticated functions that detect chemical
properties of molecules.
"""
import os
import logging
import itertools
import numpy as np
from io import StringIO
from deepchem.utils.pdbqt_utils import pdbqt_to_pdb
from deepchem.utils.pdbqt_utils import convert_mol_to_pdbqt
from deepchem.utils.pdbqt_utils import convert_protein_to_pdbqt
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import compute_centroid
from deepchem.utils.fragment_utils import MolecularFragment
from deepchem.utils.fragment_utils import MoleculeLoadException
from typing import Any, List, Sequence, Tuple, Set, Optional, Dict, Union
from deepchem.utils.typing import OneOrMany, RDKitMol
logger = logging.getLogger(__name__)
def get_xyz_from_mol(mol):
"""Extracts a numpy array of coordinates from a molecules.
Returns a `(N, 3)` numpy array of 3d coords of given rdkit molecule
Parameters
----------
mol: rdkit Molecule
Molecule to extract coordinates for
Returns
-------
Numpy ndarray of shape `(N, 3)` where `N = mol.GetNumAtoms()`.
"""
xyz = np.zeros((mol.GetNumAtoms(), 3))
conf = mol.GetConformer()
for i in range(conf.GetNumAtoms()):
position = conf.GetAtomPosition(i)
xyz[i, 0] = position.x
xyz[i, 1] = position.y
xyz[i, 2] = position.z
return (xyz)
def add_hydrogens_to_mol(mol, is_protein=False):
"""
Add hydrogens to a molecule object
Parameters
----------
mol: Rdkit Mol
Molecule to hydrogenate
is_protein: bool, optional (default False)
Whether this molecule is a protein.
Returns
-------
Rdkit Mol
Note
----
This function requires RDKit and PDBFixer to be installed.
"""
return apply_pdbfixer(mol, hydrogenate=True, is_protein=is_protein)
def apply_pdbfixer(mol,
add_missing=True,
hydrogenate=True,
pH=7.4,
remove_heterogens=True,
is_protein=True):
"""
Apply PDBFixer to a molecule to try to clean it up.
Parameters
----------
mol: Rdkit Mol
Molecule to clean up.
add_missing: bool, optional
If true, add in missing residues and atoms
hydrogenate: bool, optional
If true, add hydrogens at specified pH
pH: float, optional
The pH at which hydrogens will be added if `hydrogenate==True`. Set to 7.4 by default.
remove_heterogens: bool, optional
Often times, PDB files come with extra waters and salts attached.
If this field is set, remove these heterogens.
is_protein: bool, optional
If false, then don't remove heterogens (since this molecule is
itself a heterogen).
Returns
-------
Rdkit Mol
Note
----
This function requires RDKit and PDBFixer to be installed.
"""
molecule_file = None
try:
from pdbfixer import PDBFixer
except ModuleNotFoundError:
raise ImportError("This function requires pdbfixer")
try:
import simtk
except ModuleNotFoundError:
raise ImportError("This function requires openmm")
try:
from rdkit import Chem
pdbblock = Chem.MolToPDBBlock(mol)
pdb_stringio = StringIO()
pdb_stringio.write(pdbblock)
pdb_stringio.seek(0)
fixer = PDBFixer(pdbfile=pdb_stringio)
if add_missing:
fixer.findMissingResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
if hydrogenate:
fixer.addMissingHydrogens(pH)
if is_protein and remove_heterogens:
# False here specifies that water is to be removed
fixer.removeHeterogens(False)
hydrogenated_io = StringIO()
simtk.openmm.app.PDBFile.writeFile(fixer.topology, fixer.positions,
hydrogenated_io)
hydrogenated_io.seek(0)
return Chem.MolFromPDBBlock(hydrogenated_io.read(),
sanitize=False,
removeHs=False)
except ValueError as e:
logger.warning("Unable to add hydrogens %s", e)
raise MoleculeLoadException(e)
finally:
try:
os.remove(molecule_file)
except (OSError, TypeError):
pass
def compute_charges(mol):
"""Attempt to compute Gasteiger Charges on Mol
This also has the side effect of calculating charges on mol. The
mol passed into this function has to already have been sanitized
Parameters
----------
mol: rdkit molecule
Returns
-------
No return since updates in place.
Note
----
This function requires RDKit to be installed.
"""
from rdkit.Chem import AllChem
try:
# Updates charges in place
AllChem.ComputeGasteigerCharges(mol)
except Exception as e:
logging.exception("Unable to compute charges for mol")
raise MoleculeLoadException(e)
def load_complex(molecular_complex: OneOrMany[str],
add_hydrogens: bool = True,
calc_charges: bool = True,
sanitize: bool = True) -> List[Tuple[np.ndarray, RDKitMol]]:
"""Loads a molecular complex.
Given some representation of a molecular complex, returns a list of
tuples, where each tuple contains (xyz coords, rdkit object) for
that constituent molecule in the complex.
For now, assumes that molecular_complex is a tuple of filenames.
Parameters
----------
molecular_complex: list or str
If list, each entry should be a filename for a constituent
molecule in complex. If str, should be the filename of a file that
holds the full complex.
add_hydrogens: bool, optional
If true, add hydrogens via pdbfixer
calc_charges: bool, optional
If true, add charges via rdkit
sanitize: bool, optional
If true, sanitize molecules via rdkit
Returns
-------
List of tuples (xyz, mol)
Note
----
This function requires RDKit to be installed.
"""
if isinstance(molecular_complex, str):
molecular_complex = [molecular_complex]
fragments: List = []
for mol in molecular_complex:
loaded = load_molecule(mol,
add_hydrogens=add_hydrogens,
calc_charges=calc_charges,
sanitize=sanitize)
if isinstance(loaded, list):
fragments += loaded
else:
fragments.append(loaded)
return fragments
def load_molecule(molecule_file,
add_hydrogens=True,
calc_charges=True,
sanitize=True,
is_protein=False):
"""Converts molecule file to (xyz-coords, obmol object)
Given molecule_file, returns a tuple of xyz coords of molecule
and an rdkit object representing that molecule in that order `(xyz,
rdkit_mol)`. This ordering convention is used in the code in a few
places.
Parameters
----------
molecule_file: str
filename for molecule
add_hydrogens: bool, optional (default True)
If True, add hydrogens via pdbfixer
calc_charges: bool, optional (default True)
If True, add charges via rdkit
sanitize: bool, optional (default False)
If True, sanitize molecules via rdkit
is_protein: bool, optional (default False)
If True`, this molecule is loaded as a protein. This flag will
affect some of the cleanup procedures applied.
Returns
-------
Tuple (xyz, mol) if file contains single molecule. Else returns a
list of the tuples for the separate molecules in this list.
Note
----
This function requires RDKit to be installed.
"""
from rdkit import Chem
from_pdb = False
if ".mol2" in molecule_file:
my_mol = Chem.MolFromMol2File(molecule_file,
sanitize=False,
removeHs=False)
elif ".sdf" in molecule_file:
suppl = Chem.SDMolSupplier(str(molecule_file), sanitize=False)
# TODO: This is wrong. Should return all molecules
my_mol = suppl[0]
elif ".pdbqt" in molecule_file:
pdb_block = pdbqt_to_pdb(molecule_file)
my_mol = Chem.MolFromPDBBlock(str(pdb_block),
sanitize=False,
removeHs=False)
from_pdb = True
elif ".pdb" in molecule_file:
my_mol = Chem.MolFromPDBFile(str(molecule_file),
sanitize=False,
removeHs=False)
from_pdb = True # noqa: F841
else:
raise ValueError("Unrecognized file type for %s" % str(molecule_file))
if my_mol is None:
raise ValueError("Unable to read non None Molecule Object")
if add_hydrogens or calc_charges:
my_mol = apply_pdbfixer(my_mol,
hydrogenate=add_hydrogens,
is_protein=is_protein)
if sanitize:
try:
Chem.SanitizeMol(my_mol)
# TODO: Ideally we should catch AtomValenceException but Travis seems to choke on it for some reason.
except:
logger.warning("Mol %s failed sanitization" %
Chem.MolToSmiles(my_mol))
if calc_charges:
# This updates in place
compute_charges(my_mol)
xyz = get_xyz_from_mol(my_mol)
return xyz, my_mol
def write_molecule(mol, outfile, is_protein=False):
"""Write molecule to a file
This function writes a representation of the provided molecule to
the specified `outfile`. Doesn't return anything.
Parameters
----------
mol: rdkit Mol
Molecule to write
outfile: str
Filename to write mol to
is_protein: bool, optional
Is this molecule a protein?
Note
----
This function requires RDKit to be installed.
Raises
------
ValueError: if `outfile` isn't of a supported format.
"""
from rdkit import Chem
if ".pdbqt" in outfile:
writer = Chem.PDBWriter(outfile)
writer.write(mol)
writer.close()
if is_protein:
convert_protein_to_pdbqt(mol, outfile)
else:
convert_mol_to_pdbqt(mol, outfile)
elif ".pdb" in outfile:
writer = Chem.PDBWriter(outfile)
writer.write(mol)
writer.close()
elif ".sdf" in outfile:
writer = Chem.SDWriter(outfile)
writer.write(mol)
writer.close()
else:
raise ValueError("Unsupported Format")
def merge_molecules_xyz(xyzs):
"""Merges coordinates of multiple molecules.
Parameters
----------
xyzs: List
List of numpy arrays each of shape `(N_i, 3)` where `N_i` is the number of atoms in the i-th atom.
"""
return np.array(np.vstack(np.vstack(xyzs)))
def merge_molecules(molecules):
"""Helper method to merge two molecules.
Parameters
----------
molecules: list
List of rdkit molecules
Returns
-------
merged: rdkit molecule
"""
from rdkit.Chem import rdmolops
if len(molecules) == 0:
return None
elif len(molecules) == 1:
return molecules[0]
else:
combined = molecules[0]
for nextmol in molecules[1:]:
combined = rdmolops.CombineMols(combined, nextmol)
return combined
def compute_all_ecfp(mol: RDKitMol,
indices: Optional[Set[int]] = None,
degree: int = 2) -> Dict[int, str]:
"""Obtain molecular fragment for all atoms emanating outward to given degree.
For each fragment, compute SMILES string (for now) and hash to
an int. Return a dictionary mapping atom index to hashed
SMILES.
Parameters
----------
mol: rdkit Molecule
Molecule to compute ecfp fragments on
indices: Optional[Set[int]]
List of atom indices for molecule. Default is all indices. If
specified will only compute fragments for specified atoms.
degree: int
Graph degree to use when computing ECFP fingerprints
Returns
----------
dict
Dictionary mapping atom index to hashed smiles.
"""
ecfp_dict = {}
from rdkit import Chem
for i in range(mol.GetNumAtoms()):
if indices is not None and i not in indices:
continue
env = Chem.FindAtomEnvironmentOfRadiusN(mol, degree, i, useHs=True)
submol = Chem.PathToSubmol(mol, env)
smile = Chem.MolToSmiles(submol)
ecfp_dict[i] = "%s,%s" % (mol.GetAtoms()[i].GetAtomicNum(), smile)
return ecfp_dict
def compute_ecfp_features(mol, ecfp_degree=2, ecfp_power=11):
"""Computes ECFP features for provided rdkit molecule.
Parameters
----------
mol: rdkit molecule
Molecule to featurize.
ecfp_degree: int
ECFP radius
ecfp_power: int
Number of bits to store ECFP features (2^ecfp_power will be length of
ECFP array)
Returns
-------
ecfp_array: np.ndarray
Returns an array of size 2^ecfp_power where array at index i has a 1 if
that ECFP fragment is found in the molecule and array at index j has a 0
if ECFP fragment not in molecule.
"""
from rdkit.Chem import AllChem
bv = AllChem.GetMorganFingerprintAsBitVect(mol,
ecfp_degree,
nBits=2**ecfp_power)
return np.array(bv)
def compute_contact_centroid(molecular_complex: Any,
cutoff: float = 4.5) -> np.ndarray:
"""Computes the (x,y,z) centroid of the contact regions of this molecular complex.
For a molecular complex, it's necessary for various featurizations
that compute voxel grids to find a reasonable center for the
voxelization. This function computes the centroid of all the contact
atoms, defined as an atom that's within `cutoff` Angstroms of an
atom from a different molecule.
Parameters
----------
molecular_complex: Object
A representation of a molecular complex, produced by
`rdkit_util.load_complex`.
cutoff: float, optional
The distance in Angstroms considered for computing contacts.
"""
fragments = reduce_molecular_complex_to_contacts(molecular_complex, cutoff)
coords = [frag[0] for frag in fragments]
contact_coords = merge_molecules_xyz(coords)
centroid = np.mean(contact_coords, axis=0)
return (centroid)
def reduce_molecular_complex_to_contacts(fragments: List,
cutoff: float = 4.5) -> List:
"""Reduce a molecular complex to only those atoms near a contact.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function takes in a molecular complex and returns a new molecular
complex representation that contains only contact atoms. The contact
atoms are computed by calling `get_contact_atom_indices` under the
hood.
Parameters
----------
fragments: List
As returned by `rdkit_util.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
A list of length `len(molecular_complex)`. Each entry in this list
is a tuple of `(coords, MolecularShim)`. The coords is stripped down
to `(N_contact_atoms, 3)` where `N_contact_atoms` is the number of
contact atoms for this complex. `MolecularShim` is used since it's
tricky to make a RDKit sub-molecule.
"""
atoms_to_keep = get_contact_atom_indices(fragments, cutoff)
reduced_complex = []
for frag, keep in zip(fragments, atoms_to_keep):
contact_frag = get_mol_subset(frag[0], frag[1], keep)
reduced_complex.append(contact_frag)
return reduced_complex
def compute_ring_center(mol, ring_indices):
"""Computes 3D coordinates of a center of a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
ring_centroid: np.ndarray
Position of a ring center
"""
conformer = mol.GetConformer()
ring_xyz = np.zeros((len(ring_indices), 3))
for i, atom_idx in enumerate(ring_indices):
atom_position = conformer.GetAtomPosition(atom_idx)
ring_xyz[i] = np.array(atom_position)
ring_centroid = compute_centroid(ring_xyz)
return ring_centroid
def get_contact_atom_indices(fragments: List, cutoff: float = 4.5) -> List:
"""Compute the atoms close to contact region.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function computes pairwise distances between all pairs of molecules
in the molecular complex. If an atom is within cutoff distance of
any atom on another molecule in the complex, it is regarded as a
contact atom. Otherwise it is trimmed.
Parameters
----------
fragments: List
As returned by `rdkit_util.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
A list of length `len(molecular_complex)`. Each entry in this list
is a list of atom indices from that molecule which should be kept, in
sorted order.
"""
# indices of atoms to keep
keep_inds: List[Set] = [set([]) for _ in fragments]
for (ind1, ind2) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[ind1], fragments[ind2]
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
keep_inds[ind1] = keep_inds[ind1].union(frag1_atoms)
keep_inds[ind2] = keep_inds[ind2].union(frag2_atoms)
keep_ind_lists = [sorted(list(keep)) for keep in keep_inds]
return keep_ind_lists
def get_mol_subset(coords, mol, atom_indices_to_keep):
"""Strip a subset of the atoms in this molecule
Parameters
----------
coords: Numpy ndarray
Must be of shape (N, 3) and correspond to coordinates of mol.
mol: Rdkit mol or `MolecularFragment`
The molecule to strip
atom_indices_to_keep: list
List of the indices of the atoms to keep. Each index is a unique
number between `[0, N)`.
Returns
-------
A tuple of (coords, mol_frag) where coords is a Numpy array of
coordinates with hydrogen coordinates. mol_frag is a
`MolecularFragment`.
"""
from rdkit import Chem
indexes_to_keep = []
atoms_to_keep = []
#####################################################
# Compute partial charges on molecule if rdkit
if isinstance(mol, Chem.Mol):
compute_charges(mol)
#####################################################
atoms = list(mol.GetAtoms())
for index in atom_indices_to_keep:
indexes_to_keep.append(index)
atoms_to_keep.append(atoms[index])
coords = coords[indexes_to_keep]
mol_frag = MolecularFragment(atoms_to_keep, coords)
return coords, mol_frag
def compute_ring_normal(mol, ring_indices):
"""Computes normal to a plane determined by a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
normal: np.ndarray
Normal vector
"""
conformer = mol.GetConformer()
points = np.zeros((3, 3))
for i, atom_idx in enumerate(ring_indices[:3]):
atom_position = conformer.GetAtomPosition(atom_idx)
points[i] = np.array(atom_position)
v1 = points[1] - points[0]
v2 = points[2] - points[0]
normal = np.cross(v1, v2)
return normal
def compute_all_pairs_shortest_path(
mol) -> Dict[Tuple[int, int], Tuple[int, int]]:
"""Computes the All pair shortest between every pair of nodes
in terms of Rdkit Atom indexes.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
Returns:
--------
paths_dict: Dict representing every atom-atom pair as key in Rdkit index
and value as the shortest path between each atom pair in terms of Atom index.
"""
try:
from rdkit import Chem
except:
raise ImportError("This class requires RDkit installed")
n_atoms = mol.GetNumAtoms()
paths_dict = {(i, j): Chem.rdmolops.GetShortestPath(mol, i, j)
for i in range(n_atoms) for j in range(n_atoms) if i < j}
return paths_dict
def compute_pairwise_ring_info(mol):
""" Computes all atom-atom pair belong to same ring with
its ring size and its aromaticity.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
Returns:
--------
rings_dict: Key consisting of all node-node pair sharing the same ring
and value as a tuple of size of ring and its aromaticity.
"""
try:
from rdkit import Chem
except:
raise ImportError("This class requires RDkit installed")
rings_dict = {}
def ordered_pair(a, b):
return (a, b) if a < b else (b, a)
ssr = [list(x) for x in Chem.GetSymmSSSR(mol)]
for ring in ssr:
ring_sz = len(ring)
is_aromatic = True
for atom_idx in ring:
if not mol.GetAtoms()[atom_idx].GetIsAromatic():
is_aromatic = False
break
for ring_idx, atom_idx in enumerate(ring):
for other_idx in ring[ring_idx:]:
atom_pair = ordered_pair(atom_idx, other_idx)
if atom_pair not in rings_dict:
rings_dict[atom_pair] = [(ring_sz, is_aromatic)]
else:
if (ring_sz, is_aromatic) not in rings_dict[atom_pair]:
rings_dict[atom_pair].append((ring_sz, is_aromatic))
return rings_dict
class DescriptorsNormalizationParameters:
"""
A class for holding a dictionary of 200 RDKit descriptors and the corresponding distribution functions (based on `scipy.stats` module nomenclature)
and associated parameters required for creating normalized descriptor functions.
(The distribution functions and parameters data was collected from the source code of 'descriptastorus' library.
Link to the source of the parameters:
https://github.com/bp-kelley/descriptastorus/blob/baed6a56d63dd27c1bc5f6924a7c9e0d48594012/descriptastorus/descriptors/dists.py)
"""
try:
from math import inf
except ImportError:
inf = float('Inf')
desc_norm_params: Dict[str, Sequence[Union[str, Sequence[float], float]]]
desc_norm_params = {
'VSA_EState1': ('betaprime', (0.12979362790686721, 2.0084510281921832,
-1.7874327445880742e-34,
0.4022005514286884), 0.0, 0.0, 0.0, 0.0),
'Chi4n': ('mielke', (3.4334547302721328, 4.644325894559659,
-0.1540765028544061, 3.7724626101152783), 0.0,
60.15570624032009, 3.5583748823804937, 1.9340011133496022),
'EState_VSA3':
('pearson3', (1.2130333692507862, 20.490424246483514,
14.913598628728794), 0.0, 707.4193712196022,
20.490458921014422, 15.02692979610578),
'PEOE_VSA3':
('recipinvgauss', (2039990.2035692804, -1.514157558116536e-12,
5.862765691466683), 0.0, 278.49556054006456,
5.883620301814973, 7.114346140218968),
'PEOE_VSA10':
('ncx2', (1.2634981555275662, 2.1503143438355354,
-2.21123444897305e-31, 2.606409115395213), 0.0,
494.0556831191233, 9.763622525936078, 12.915305068064065),
'Chi2v': ('fisk', (5.416294252795936, -0.46711733318914683,
7.911730855234288), 0.0, 152.03415385974233,
7.97051218652611, 4.006279197425447),
'SMR_VSA8': ('betaprime', (0.12979362790686721, 2.0084510281921832,
-1.7874327445880742e-34, 0.4022005514286884),
0.0, 0.0, 0.0, 0.0),
'ExactMolWt':
('mielke', (6.030507225812184, 6.081069808326847,
-3.1905715544779594, 393.79789827541134), 7.01545597009,
7902.703267132, 413.2180535712111, 196.11660291127603),
'fr_imidazole': ('wald', (-0.017711130586117518, 0.05908774988990952),
0, 11, 0.10202714189993299, 0.35905461339251266),
'fr_aldehyde':
('halflogistic', (-2.2802084549638172e-10, 0.003260151958473212), 0,
2, 0.0032602282159751184, 0.057529151986179125),
'fr_Al_COO': ('beta', (0.6951891478660377, 401.3878921392054,
-1.8490162599417683e-28, 14.902552079575546), 0,
9, 0.05697398817917254, 0.2720833952111172),
'NumAliphaticHeterocycles':
('alpha', (5.571361455216543e-09, -0.07477286399593108,
0.10965986221560856), 0, 22, 0.7546628263978479,
0.9186038062617755),
'fr_Ar_NH': ('wald', (-0.01991313112984635, 0.06651081784403591), 0, 13,
0.11327792945506185, 0.37297284960554855),
'NumHAcceptors': ('logistic', (5.039523695264815, 1.2773064178194637),
0, 199, 5.285449981498705, 3.9293707904214674),
'fr_lactam':
('halflogistic', (-1.9994772259051099e-10, 0.0019599976691355514),
0, 2, 0.0019601372096046724, 0.04490322117641692),
'fr_NH2': ('wald', (-0.025886531735103906, 0.08666379088388962), 0, 17,
0.14403008210574741, 0.5080894197040707),
'fr_Ndealkylation1':
('wald', (-0.014170257884871005, 0.047146436951077536), 0, 8,
0.08431590211314792, 0.3528598050157884),
'SlogP_VSA7':
('recipinvgauss', (124626.03395996531, -5.039104162427062e-11,
0.4157942168226829), 0.0, 77.05538951387919,
1.1814079834441082, 2.9446812598365155),
'fr_Ar_N': ('halfgennorm', (0.32377704425495, -8.49433587779278e-22,
0.010554705354503424), 0, 66,
1.3546848279379557, 1.7700301662397686),
'NumSaturatedHeterocycles':
('halfgennorm', (0.39340463716320073, -3.084884622335595e-23,
0.007755844850523552), 0, 22, 0.545018151270589,
0.8423690871097294),
'NumAliphaticRings':
('gennorm', (0.16214999420806342, 1.0000000000000002,
1.2644170558638866e-06), 0, 24, 0.9787585130959167,
1.0878564569993276),
'SMR_VSA4':
('betaprime', (0.8177035716382387, 2.026000293708502,
-2.7076233813444817e-29, 0.8955328046106019), 0.0,
169.22417394169068, 3.521664764716257, 6.367910012270387),
'Chi0v': ('mielke', (5.8775750667785065, 5.969290153282742,
-0.051645183224216795, 16.255522569236142), 1.0,
310.3665679014367, 17.129538294841, 7.923870846730872),
'qed': ('johnsonsb', (-0.537683817552717, 0.9438392221113977,
-0.05971660981816428, 1.0270014571751256),
0.001610010104943233, 0.9484019712261345, 0.5707778636205341,
0.21314724659491038),
'fr_sulfonamd':
('betaprime', (0.6061868535729906, 2.47005272151398,
-1.7109734983680305e-30, 0.024136054923030247), 0, 2,
0.09929695078655505, 0.3106727570704293),
'fr_halogen':
('exponweib', (1.5936220372251417, 0.4773265592552294,
-6.305557196427465e-30, 0.11136163589207024), 0, 22,
0.6656565959617173, 1.1538784657180654),
'Chi4v': ('mielke', (3.641407704651825, 4.9160753250874905,
-0.19612721404766648, 4.272311768092421), 0.0,
80.31016831275534, 3.997197941292901, 2.1822145921791463),
'MolLogP':
('nct', (5.423133497140618, -0.2505422147848311, 3.787125066469563,
1.447521060093181), -27.121900000000043,
26.476990000000036, 3.357664737331615, 1.8518910248841818),
'Chi2n': ('burr', (5.323167832131418, 0.9614449953883716,
-0.5182229173193543, 7.403200388112394), 0.0,
140.4224584835206, 7.320378282785918, 3.6830713407241156),
'fr_Al_OH': ('pareto', (8.075644989366163, -0.6961711017351564,
0.6961711017065046), 0, 37, 0.18985328973028112,
0.5806908433990465),
'LabuteASA': ('mielke', (5.9033344150609555, 6.093254767597408,
-1.1647561264561102, 165.59359494140412),
19.375022664857827, 3138.810989711936, 172.78618537968595,
78.72241596326842),
'SMR_VSA5': ('johnsonsu', (-6.770383826447828, 1.5639816052567266,
-8.885737844117894, 0.8747218279195421), 0.0,
1059.7000355910607, 31.92088861776502, 31.701702832660054),
'fr_guanido':
('halflogistic', (-2.8948632876909134e-11, 0.012390710083318518), 0,
8, 0.012390867360715251, 0.1352692056473331),
'SlogP_VSA6':
('dweibull', (1.2651196828193871, 44.8855208417171,
19.999416617652344), 0.0, 425.50532391912986,
46.86150277566015, 23.994033852363053),
'NumRadicalElectrons':
('halfnorm', (-3.3556536839857015e-09, 0.047012245813331466), 0, 10,
0.0002900203014210995, 0.047011387972006546),
'HeavyAtomCount': ('mielke', (5.542942710744559, 6.0129203920305345,
-0.10475651052005365, 28.19145327714555),
1, 545, 29.187713139919794, 13.728021647131865),
'fr_Ar_COO': ('pearson3', (2.284860216852766, 0.009533409939207087,
0.010891254550521512), 0, 4,
0.019321352494674628, 0.14278703946614923),
'fr_ester': ('wald', (-0.021345006038942453, 0.07132669030690274), 0, 9,
0.12089846289240247, 0.3816605577678055),
'NumSaturatedCarbocycles':
('invweibull', (0.6897602654592729, -1.687045423250026e-28,
0.04100428545396396), 0, 9, 0.16625163761463302,
0.5048961994063688),
'MolMR': ('burr', (6.172170768729716, 0.8936060537538131,
-0.6260689145704982, 109.36170666360255), 0.0,
1943.4740000000081, 111.75295175062253, 49.21222833407792),
'fr_SH':
('halflogistic', (-5.627308315330754e-10, 0.002940168145955494), 0,
2, 0.0029402058144070084, 0.057719918625515),
'fr_ketone_Topliss':
('invweibull', (0.7845359779027771, -5.104827196981351e-29,
0.013651958761797665), 0, 5, 0.052113647955356876,
0.24762602874204098),
'MolWt': ('burr', (6.292103975443265, 0.9448356544126613,
-2.576132139748636, 398.4147809834958), 6.941,
7906.685999999983, 413.6754726605188, 196.24518387547647),
'Kappa1': ('fisk', (5.090249068250488, 1.5243046476343975,
17.457634086393984), 1.5974025974025972,
452.4780879471051, 20.511369181587643, 10.901150147628785),
'fr_term_acetylene':
('tukeylambda', (1.56196184906287, 0.005212203460720155,
0.008141262955574484), 0, 2, 0.004260298220875461,
0.06634877000162641),
'Chi0n': ('mielke', (4.913702328256858, 5.56535100301131,
0.892757720674525, 15.151908382349568), 1.0,
310.3665679014367, 16.565702068330904, 7.789694974456289),
'SMR_VSA9':
('betaprime', (0.6371524412800221, 0.1653001011937434,
-1.1096304834919027e-26, 0.008200829448013355), 0.0,
68.99414199940686, 6.711933474546377, 7.898303955057197),
'fr_hdrzine': ('genexpon', (2.2033965201556205, 4.581950155773536e-11,
1.9344067808226306, -6.071473724440753e-12,
0.021418241524934097), 0, 3,
0.009720680447631334, 0.10023082470639716),
'PEOE_VSA11':
('betaprime', (0.5343466300982029, 2.0391606100747115,
-5.644268412961732e-28, 1.3724974434068684), 0.0,
130.7949739456163, 3.9163487225567097, 6.085123241223355),
'PEOE_VSA2': ('genlogistic', (1025.7536859090253, -32.00326592275044,
5.426751909719114), 0.0,
436.5016359224807, 8.968372740727396, 10.688899230076526),
'fr_C_O': ('dweibull', (0.799241338494272, 1.0000000000000002,
1.1376361682177123), 0, 79, 1.3462142349964497,
1.7126177063621493),
'EState_VSA2': ('dgamma', (1.6208597357735999, 14.722163518402503,
5.927601342234055), 0.0, 348.2229478088118,
16.17693847024966, 14.300245340171864),
'fr_aryl_methyl': ('pareto', (2.2590951540541755, -0.03484369022048178,
0.03484369021605811), 0, 10,
0.42467972758093064, 0.7334626175897146),
'EState_VSA9':
('gompertz', (157762717330.9355, -8.297980815964564e-12,
1740272081706.4692), 0.0, 459.53012783207976,
9.405433826279932, 11.29251583636432),
'SlogP_VSA1':
('betaprime', (24.555424043054373, 8.676784435768978,
-8.426380550481028, 5.728625412480383), 0.0,
409.4953592555066, 9.984737467652149, 9.779830593415184),
'PEOE_VSA9': ('betaprime', (3.445175492809242, 31.144739014821592,
-4.51163967848681, 170.01655826137412), 0.0,
811.1436563731141, 14.95011048070105, 13.041254445054955),
'SMR_VSA2': ('wald', (-0.048767578220460746, 0.16186950174913545), 0.0,
43.27426884633218, 0.29905148573740525, 1.360442550951591),
'fr_quatN':
('halflogistic', (-1.8398640615463982e-10, 0.0022802512521409345),
0, 2, 0.002280159611172782, 0.05094097451844112),
'fr_dihydropyridine':
('tukeylambda', (1.5508163529247363, 0.002932569691259215,
0.004547877033296321), 0, 1, 0.002350164511515806,
0.04842149562213685),
'MinPartialCharge':
('johnsonsu', (-2.7149539980911284, 1.0038367476615098,
-0.5139064061681397, 0.008495052829614172),
-0.7539104058810929, 1.0, -0.4200780304196831, 0.07189165509088434
),
'fr_ketone': ('wald', (-0.01054998963908324, 0.035036217635414556), 0,
6, 0.06421449501465103, 0.27790680727297107),
'MaxAbsEStateIndex':
('t', (1.3108542430395254, 12.75352190648762, 0.7106673064513349),
0.0, 18.093289294329608, 12.009719840202807, 2.3499823939671187),
'MaxAbsPartialCharge':
('johnsonsu', (1.7917111012287341, 0.9884411996798824,
0.509455736564852, 0.019080208112214614),
0.044672166080629815, 3.0, 0.4246995864209025, 0.07158969964653725
),
'Chi1v': ('burr', (7.687954133980298, 1.4497345379886477,
-5.291458017141911, 14.007066785153341), 0.0,
193.52184470749225, 10.233294455235377, 4.9372866377359905),
'fr_benzodiazepine':
('tukeylambda', (1.5687789926087756, 0.0024158367934644202,
0.0037899140207594472), 0, 1,
0.0019601372096046724, 0.04423002455034584),
'EState_VSA5':
('exponweib', (0.2671814730479748, 1.165636178704992,
-1.382245994068274e-31, 22.76769085633479), 0.0,
352.5201628431787, 13.722297283129771, 14.369271762415691),
'VSA_EState7':
('loggamma', (0.00016582407490511742, 4.690543031567471e-07,
5.38971779436177e-08), -0.23935820315780676, 0.0,
-2.393749594049651e-06, 0.0007569398071209723),
'fr_C_O_noCOO':
('gennorm', (0.3061307987512025, 1.0, 0.0046024831025323395), 0, 71,
1.2699788985228966, 1.6247465287374963),
'Chi3v': ('mielke', (4.392997381604184, 5.27028468604366,
-0.32791696488665867, 5.890409366481655), 0.0,
106.26777041156456, 5.646449010813653, 2.881905780196624),
'PEOE_VSA5':
('gibrat', (-0.04993824025443838, 0.13664006325267003), 0.0,
73.11939707590946, 2.0398397458425537, 4.669463461453408),
'fr_epoxide':
('hypsecant', (9.775361773901804e-06, 0.004229322664644675), 0, 3,
0.0015001050073505146, 0.041205202260121296),
'fr_prisulfonamd':
('betaprime', (0.12979362790686721, 2.0084510281921832,
-1.7874327445880742e-34, 0.4022005514286884), 0, 0,
0.0, 0.0),
'fr_phenol': ('invweibull', (0.8135991555193818, -4.806156168974463e-28,
0.016229148422267782), 0, 8,
0.05026351844629124, 0.2534132017880052),
'fr_sulfide':
('gengamma', (1.5106281082392625, 0.5574261640341269,
-4.4413855396989186e-30, 0.0038351572717300027), 0, 6,
0.08101567109697679, 0.2882927997299278),
'fr_alkyl_halide': ('wald', (-0.04358050576953841, 0.1452321478197392),
0, 17, 0.25484783934875443, 0.9066667900753467),
'NumAromaticHeterocycles':
('halfgennorm', (0.19057145504745865, -1.897689882032624e-17,
2.1261316374019246e-05), 0, 33, 0.9458862120348425,
1.0262824322901387),
'fr_Ar_OH': ('wald', (-0.0100135027386673, 0.03324263418577556), 0, 8,
0.06124428710009701, 0.2766130074506664),
'fr_thiazole': ('wald', (-0.007257531762340398, 0.024068128495977677),
0, 6, 0.04502315162061344, 0.217339283746722),
'fr_imide': ('pearson3', (2.2221583002327714, 0.018063449381832734,
0.02006992198733707), 0, 6,
0.02752192653485744, 0.1778336591269578),
'NumSaturatedRings':
('halfgennorm', (0.23246838885007082, -2.4267394888596534e-25,
0.00026458005932038795), 0, 22, 0.711269788885222,
0.9971159900281824),
'fr_hdrzone': ('wald', (-0.003105521778228028, 0.010274411296672493), 0,
2, 0.019951396597761843, 0.14280545219170362),
'fr_lactone': ('tukeylambda', (1.527151454037369, 0.013585748455595358,
0.020747495508148833), 0, 6,
0.011000770053903774, 0.11242720454031359),
'FractionCSP3':
('gausshyper', (0.4771522405083861, 9.066071275571563,
-7.620494949081857, 3.1818106084013347,
-4.691817003086665e-28, 1.3215636376781599), 0.0,
1.0, 0.3432847243844843, 0.19586440800503926),
'HallKierAlpha': ('logistic', (-2.756871651517898, 0.6249482292466357),
-56.5600000000003, 3.0300000000000002,
-2.8236952794487813, 1.362245033459619),
'fr_para_hydroxylation':
('gibrat', (-0.00874073035192809, 0.023988673379858362), 0, 7,
0.2581980738651706, 0.5588920168597382),
'HeavyAtomMolWt':
('burr', (6.148243331396738, 0.9539656352826273,
-2.0597306351675164, 373.86535994513247), 6.941,
7542.798000000015, 389.1014159096795, 184.23918925686846),
'SlogP_VSA12': ('lomax', (1.3526140858007119, -3.8436281815982077e-13,
0.5306720480047502), 0.0, 199.36606648479187,
6.222469001946574, 9.773336061271038),
'fr_allylic_oxid':
('wald', (-0.012125985576272109, 0.04021754130884261), 0, 12,
0.07526526856879981, 0.43369194377652315),
'fr_alkyl_carbamate':
('wald', (-0.0028043421468934076, 0.009276348457582401), 0, 3,
0.018071264988549197, 0.13836470784532925),
'fr_HOCCN': ('wald', (-0.0019826051357043533, 0.006555328648504729), 0,
2, 0.01286090026301841, 0.11373440248499654),
'Chi1n': ('mielke', (4.960260518842145, 5.594233665274178,
-0.0629921400622475, 9.415745103559818), 0.0,
179.81742433151658, 9.668289027429788, 4.636496265706159),
'PEOE_VSA4': ('pareto', (1.764490302170505, -1.0130190528730951,
1.0130190526084408), 0.0, 74.63705581047195,
2.871044297114316, 5.252580084878072),
'NOCount': ('dgamma', (0.9114891050741081, 5.999999999999998,
2.360844051557576), 0, 237, 6.576580360625244,
5.165410541429347),
'EState_VSA4':
('foldnorm', (0.005911506485819916, -9.396020384268004e-10,
29.86848626045161), 0.0, 309.6802047001077,
23.73312663187225, 18.134786070135654),
'VSA_EState6': ('betaprime', (0.12979362790686721, 2.0084510281921832,
-1.7874327445880742e-34,
0.4022005514286884), 0.0, 0.0, 0.0, 0.0),
'Chi3n': ('mielke', (3.8419002158163336, 5.021067627998188,
-0.12843494993055915, 5.26846058565802), 0.0,
89.41912553532535, 5.1024891606395135, 2.6233324773423217),
'fr_barbitur':
('genhalflogistic', (0.0020825723750872178, -0.0014169372235491489,
0.005286684885304822), 0, 2,
0.0014100987069094837, 0.03805408163051986),
'fr_Al_OH_noTert':
('gompertz', (35118115.876645096, -1.5683266541399722e-10,
5712177.117752606), 0, 37, 0.16987189103237227,
0.5550559671673745),
'fr_COO2': ('wald', (-0.012755896711276589, 0.04241946965978133), 0, 9,
0.07634534417409218, 0.3067871977652909),
'fr_azo':
('genhalflogistic', (0.00040002675029834824, -2.796838513658869e-05,
0.0031033294350019196), 0, 2,
0.0006700469032832298, 0.027011148452530685),
'FpDensityMorgan1':
('t', (7.1357369753127795, 1.0971430458813116,
0.17213544486837606), 0.07971014492753623, 2.111111111111111,
1.0947677251482226, 0.20201147073315795),
'fr_aniline':
('halfgennorm', (0.1049174054668825, -0.0004210957634456598,
2.0561338188621596e-11), 0, 17, 0.6701569109837688,
0.8999932508248457),
'SMR_VSA3': ('dgamma', (1.7525536696583415, 12.349493875194277,
4.09734805299208), 0.0, 394.2610115631847,
12.654567199384484, 11.238213406272283),
'fr_tetrazole': ('wald', (-0.0018040357049599466, 0.005964316702018277),
0, 2, 0.011720820457432021, 0.1080900079828228),
'VSA_EState10':
('gennorm', (0.325862160652744, -1.5857598255070421e-27,
0.009249443355545232), -22.789260764991006,
74.77274927572002, 1.3375950132085297, 3.8197200206817126),
'fr_phenol_noOrthoHbond':
('invweibull', (0.8136034301945807, -5.137954972709019e-29,
0.01610798208070398), 0, 8, 0.049623473643155024,
0.2514798405151786),
'PEOE_VSA8': ('dgamma', (1.49159666908677, 21.468624511796396,
8.385920333308711), 0.0, 311.7903524102208,
25.24827589064641, 16.37842558375825),
'EState_VSA8': ('genexpon', (0.8492139229831794, 0.898456899940423,
1.8794539462657145, -7.998786049300654e-10,
28.40401980183114), 0.0, 364.8615531780818,
21.03371597445418, 18.393077792682497),
'BalabanJ':
('nct', (4.182658638749994, 2.0965263482828114, 1.1271767054584343,
0.2616489125636474), 0.0, 7.289359191119452,
1.8039183288289355, 0.47846986656304485),
'fr_C_S': ('tukeylambda', (1.373229786243365, 0.006142154117625993,
0.008434588986021351), 0, 2,
0.017351214585020952, 0.13360466287284203),
'fr_ArN': ('tukeylambda', (1.4424188577152934, 0.03329139576768536,
0.04802013705497249), 0, 16,
0.05239366755672897, 0.33593599906289534),
'NumAromaticRings': ('dgamma', (2.5851733541458035, 2.495448308226644,
0.3726216029742541), 0, 34,
2.480923664656526, 1.2619073213233434),
'fr_Imine': ('wald', (-0.004133581140781835, 0.013682691065125839), 0,
4, 0.02636184532917304, 0.17166002580284992),
'NumAliphaticCarbocycles':
('halfgennorm', (0.39096958306392793, -4.260236992450893e-24,
0.0012095685496975065), 0, 9, 0.22409568669806887,
0.5847294862795492),
'fr_piperzine':
('invweibull', (0.7886440189451933, -2.4256731163377907e-29,
0.02564081626730841), 0, 5, 0.08231576210334723,
0.28309044391366156),
'fr_nitroso': ('genhalflogistic',
(2.1084981882574176e-07, -4.9280506479729835e-05,
0.0027110708397517554), 0, 2, 0.00014000980068604803,
0.012648778519674015),
'FpDensityMorgan2':
('johnsonsu', (0.7111118816857295, 1.7569762969412164,
1.9820508520621796, 0.3566466873696017),
0.13043478260869565, 2.75, 1.807249299497176, 0.26404333001263824),
'SlogP_VSA3':
('genhalflogistic', (0.00150187910000416, -2.3112405176891454e-10,
9.57718508471936), 0.0, 486.412075845355,
13.207558092806261, 14.07198140237455),
'fr_urea': ('wald', (-0.011156011934882477, 0.03708223885227959), 0, 4,
0.06708469592871501, 0.2580011979049133),
'VSA_EState9':
('t', (2.778511535506783, 54.00468044177123,
14.841602340950482), -61.35386460040703, 1513.3333333333328,
58.298150276013196, 32.97223516741629),
'fr_nitro_arom':
('exponweib', (1.1347393331388103, 0.7642415443821742,
-9.540668167412522e-31, 0.00766605879556546), 0, 4,
0.03152220655445881, 0.18587317845266496),
'fr_amidine': ('gompertz', (1256395099718.369, -3.6322380930315925e-14,
17278550801.355766), 0, 4,
0.0163211424799736, 0.13976768727132605),
'fr_nitro_arom_nonortho':
('wald', (-0.002927735757054801, 0.009685114750495933), 0, 3,
0.01884131889232246, 0.14002286241970868),
'SlogP_VSA11':
('invweibull', (0.4147001922244138, -1.4325296088889312e-27,
0.4348351110420754), 0.0, 68.99414199940686,
3.2168374575889174, 5.0279560653331945),
'RingCount': ('dgamma', (2.186557671264268, 3.4836909786085286,
0.49776105041563334), 0, 57,
3.4596821777524425, 1.5775840075847496),
'fr_azide': ('hypsecant', (0.0011100250221481181, 0.002817992740312428),
0, 2, 0.0009900693048513396, 0.03238970929040505),
'Ipc': ('ncf', (2.2307091334463722, 0.10899090116091759,
1.0000000000366653, -1.1981582241278056e+221,
0.5596425180505273), 0.0, 9.476131257211451e+221,
1.0245870093128289e+217, inf),
'fr_benzene': ('dgamma', (3.3192814002631734, 1.5061477493525466,
0.23910537147076447), 0, 14,
1.5344274099186943, 0.9561701389956593),
'fr_thiocyan':
('gengamma', (0.5076950581215369, 1.2254477892001665,
-1.4543304035389683e-31, 0.00801926074913513), 0, 2,
0.0002900203014210995, 0.017605044440269665),
'PEOE_VSA14': ('pareto', (1.7596722287597166, -2.239860392602096,
2.239860392451776), 0.0, 416.4980987917504,
3.013749589704376, 6.883189641607028),
'PEOE_VSA7': ('dgamma', (1.4806417259357412, 39.98160154845938,
10.168406696469939), 0.0, 508.39601533563194,
41.78662552891026, 19.81428403790816),
'VSA_EState5':
('genhalflogistic', (4.73389199695264e-09, -0.030429032584450635,
0.09155583517487542), 0.0, 68.19071853741498,
0.008147278492018148, 0.5030871156371451),
'EState_VSA7':
('powerlaw', (0.2103250937511124, -1.031145487494383e-26,
231.78083977397324), 0.0, 225.3129516643085,
25.741886434607192, 21.953593999938157),
'fr_N_O': ('exponnorm', (5045.039118637744, -2.4181865220268798e-05,
5.7188007391269775e-06), 0, 6,
0.028882021741521907, 0.23864172665999558),
'VSA_EState4': ('betaprime', (0.12979362790686721, 2.0084510281921832,
-1.7874327445880742e-34,
0.4022005514286884), 0.0, 0.0, 0.0, 0.0),
'EState_VSA6': ('chi', (0.5386485681683548, -2.640567062882436e-29,
30.844792175705116), 0.0, 298.3055333422921,
18.147758868775043, 16.4171251936251),
'PEOE_VSA6': ('exponpow', (0.9016211226446249, -5.748343789415991e-27,
53.824180129747546), 0.0, 415.6833365808712,
30.51404957069927, 23.580292452344),
'fr_diazo':
('halfnorm', (-8.299739916759084e-10, 0.0031623214263548586), 0, 1,
1.000070004900343e-05, 0.0031623725326093326),
'MaxEStateIndex':
('t', (1.3108542430395254, 12.753521906487567, 0.7106673064513349),
0.0, 18.093289294329608, 12.009719840202806, 2.3499823939671187),
'fr_oxime': ('pearson3', (2.517850854435552, 0.009251874821651483,
0.011647420462412978), 0, 3,
0.015401078075465282, 0.1267436305065821),
'SlogP_VSA10':
('betaprime', (0.4375490925573042, 1.8760340999346696,
-1.050475196211794e-28, 1.367988164331459), 0.0,
99.49189265982736, 6.345391750680324, 7.373907457789322),
'fr_nitrile': ('invweibull', (0.813385754265167, -2.609225753111669e-30,
0.010587145126146093), 0, 5,
0.04477313411938836, 0.2211084100854278),
'fr_COO': ('wald', (-0.01274707189437067, 0.04238860957269042), 0, 9,
0.07629534067384718, 0.306587684326303),
'VSA_EState8': ('cauchy', (0.4615208647039756, 1.4176964379023667),
-4.311579244789392, 610.9052623831149,
9.971522100585924, 18.488170207714543),
'SlogP_VSA2': ('lognorm', (0.5117956692971417, -7.272585382702083,
42.859086084122595), 0.0, 1181.0895715112954,
41.959432071647804, 34.15538345697471),
'fr_priamide':
('exponweib', (1.0954611988211496, 0.7544974723445165,
-8.879663989913682e-32, 0.012058631113746431), 0, 6,
0.03770263918474293, 0.21274815822293996),
'SMR_VSA1': ('cauchy', (13.199049915613251, 5.44099244703784), 0.0,
611.7284512306612, 15.881198059745422, 15.324918915564249),
'FpDensityMorgan3':
('johnsonsu', (1.054397168130864, 1.7348730697701296,
2.7630213444641853, 0.3798207348262541),
0.18115942028985507, 3.5454545454545454, 2.4726320476023513,
0.32057971250733736),
'fr_bicyclic': ('beta', (0.5840060328350145, 474.4004348752039,
-2.2007759769553093e-31, 286.9088171814874), 0,
31, 0.7892552478673507, 1.2222619740000107),
'TPSA': ('johnsonsu', (-0.8579013079024209, 1.232465212393282,
51.250072658107214, 29.00917664083923), 0.0,
3183.790000000002, 83.57268198773914, 74.84752227939705),
'NumHeteroatoms':
('genlogistic', (22.144409538152043, -1.4241236643022548,
2.462142139013319), 0, 259, 7.61976338343684,
5.484367394332425),
'fr_pyridine': ('gibrat', (-0.006105798295798184, 0.016715004503743934),
0, 5, 0.22085545988219174, 0.48191548762954484),
'MinEStateIndex': ('cauchy', (-0.4065559618468691, 0.4370714902017909),
-9.858201857916416, 1.4242592592592593,
-1.1147863044659592, 1.550385821215674),
'NumHDonors':
('johnsonsu', (-6.303785805420036, 1.2935688877340756,
-0.44063513396543863, 0.023159008664878106), 0, 104,
1.6016621163481444, 2.2472252977995697),
'NumValenceElectrons':
('t', (2.854251470602213, 144.3027466924646, 32.00913472185999), 0,
2996, 153.57779044533117, 75.10240753520117),
'Chi0': ('fisk', (5.941141785557418, -0.07374276109511947,
19.65464437888598), 0.0, 405.82501416452726,
20.82806399367901, 9.928368342000695),
'Kappa2': ('fisk', (4.668432120081604, 0.2539009602536103,
7.822349728630153), 0.2713156408808583,
225.9686552554005, 8.912276904621457, 5.461845335100644),
'NHOHCount': ('invgamma', (3.8797587021158746, -1.1307664222598288,
8.227943634490117), 0, 117,
1.7429120038402688, 2.563177769965799),
'SMR_VSA10': ('dweibull', (1.1539393051818632, 20.846729374180775,
11.835976638334323), 0.0, 491.00242136254764,
23.906689304667765, 16.4741302013246),
'PEOE_VSA12': ('alpha', (0.5429249279739217, -1.1871493007688236,
2.145045390430375), 0.0, 419.4097607839573,
5.59007921551618, 9.564823801075985),
'PEOE_VSA1':
('burr', (3.9268049096073985, 0.48995232102675695,
-1.6407085406832138, 19.294424729301284), 0.0,
561.0838284951058, 14.999241834817811, 13.776374483985686),
'fr_ether': ('fisk', (0.8051168725131534, -2.567380610408963e-22,
0.23753230483392967), 0, 60, 0.8167371716020121,
1.2247008120931449),
'EState_VSA1':
('foldcauchy', (0.0067487067434300755, -2.744934012978528e-09,
4.288215504541297), 0.0, 1261.848165001823,
12.539067008486557, 28.935889287596773),
'VSA_EState3': ('betaprime', (0.12979362790686721, 2.0084510281921832,
-1.7874327445880742e-34,
0.4022005514286884), 0.0, 0.0, 0.0, 0.0),
'SlogP_VSA9': ('betaprime', (0.12979362790686721, 2.0084510281921832,
-1.7874327445880742e-34,
0.4022005514286884), 0.0, 0.0, 0.0, 0.0),
'MaxPartialCharge':
('dgamma', (0.8966427125022032, 0.26138963441733876,
0.06744166351965852), -0.03856361615904991, 3.0,
0.27854710402529237, 0.08364524848258846),
'BertzCT': ('beta', (18.188950676898635, 6538386706999.18,
-671.968519633414, 592927086280210.0), 0.0,
30826.22636467273, 987.5790655792254, 632.1534060688446),
'fr_isocyan':
('genhalflogistic', (2.176451524678801e-10, -8.586837508181029e-13,
0.003318792604734705), 0, 1,
0.00011000770053903773, 0.010487878662763976),
'fr_phos_ester':
('genhalflogistic', (3.2594653253264924e-11, -0.0012483281868383108,
0.02952659923331233), 0, 22,
0.007020491434400408, 0.3369253069398357),
'fr_Nhpyrrole': ('wald', (-0.01991313112984635, 0.06651081784403544), 0,
13, 0.11327792945506185, 0.3729728496055485),
'fr_sulfone': ('pearson3', (2.196096781694557, 0.009513879681788937,
0.01044670027530296), 0, 3,
0.021361495304671328, 0.14807178403739918),
'MinAbsPartialCharge':
('laplace', (0.26315857867884,
0.0561294628943012), 0.00017822665335796042, 1.0,
0.2739606546909672, 0.07533771658554175),
'SMR_VSA6': ('recipinvgauss', (0.5082650634594839, -6.086954132180281,
8.524443845355009), 0.0,
817.1820035633156, 19.209113622421707, 17.41580422042883),
'fr_thiophene':
('halfgennorm', (0.5766089104240131, -6.723634040192055e-19,
0.0018219011330323374), 0, 3, 0.050743552048643406,
0.22669113213778155),
'EState_VSA11':
('genhalflogistic', (0.0010872264517366357, -4.398093849911524e-07,
0.31899977022818193), 0.0, 163.01426425844207,
0.11426916104059913, 1.8841357303463615),
'fr_NH0': ('foldnorm', (0.05865369911318355, -2.4259159741023723e-09,
2.811848848966825), 0, 59, 2.174432210254718,
1.785860796591467),
'SlogP_VSA5': ('genexpon', (0.002852587331303075, 0.23353705964951516,
0.2144337777778184, -2.3573191470339223,
4.447211770059805), 0.0, 649.935656436369,
30.740541351785637, 23.895007786195226),
'EState_VSA10': ('genexpon', (2.705707701965304, 3.771883657629245,
5.375903761907104, -4.354556984551863e-07,
54.16500289129278), 0.0, 322.991248574887,
11.740129013811245, 11.161898269805397),
'fr_NH1': ('exponnorm', (2892.636726669909, -0.0012981492832112645,
0.00037957863319105643), 0, 70,
1.1072875101257087, 1.5590446880501752),
'SlogP_VSA4':
('halfgennorm', (0.13290288528045635, -2.16370553583873e-12,
4.03300081749346e-08), 0.0, 116.32363116174835,
6.358583799897668, 7.800100182361395),
'fr_Ndealkylation2':
('wald', (-0.031069214119069204, 0.10443777001623708), 0, 12,
0.16626163831468202, 0.4316735416648144),
'SMR_VSA7': ('dweibull', (1.215378550488066, 56.28465156519039,
23.02916244099079), 0.0, 551.7863436604832,
58.73997506473642, 28.427064508331743),
'fr_nitro': ('wald', (-0.006030983799621385, 0.019984499845659677), 0,
4, 0.03786265038552699, 0.20368952922550065),
'SlogP_VSA8': ('ncf', (0.010351345715274272, 2.238220060195106,
2.3979254167173853, -2.0305306869043397e-08,
0.00093545475888877), 0.0, 133.9023781814824,
6.766619840611257, 8.671848120645365),
'VSA_EState2':
('genlogistic', (1.415613774458108, -4.0305012625995696e-05,
0.001088572076970272), -0.7726899469689867,
0.0802671757431348, -3.2739988469591856e-05, 0.004083178209614408),
'NumAromaticCarbocycles':
('dgamma', (3.319688925350106, 1.5062285243564748,
0.23908502549666388), 0, 14, 1.5350374526216835,
0.956147979224174),
'fr_furan': ('halfgennorm', (0.5939855958815875, -5.726016095250532e-19,
0.001778526530384881), 0, 2,
0.038802716190133306, 0.19747704781754222),
'PEOE_VSA13':
('halfgennorm', (0.2726011593040699, -6.070628359122943e-24,
0.0023718323492315794), 0.0, 78.17764936502758,
3.30010114873348, 4.358778167233281),
'fr_oxazole': ('wald', (-0.0018806042805029088, 0.006217750403694874),
0, 2, 0.012210854759833188, 0.11073284872542345),
'Kappa3': ('foldnorm', (0.7381901404044835, 0.07499999952842873,
4.410636124980368), 0.07500000000000012,
1760.9964197530858, 4.873556896466643, 6.732045653350472),
'fr_morpholine':
('exponweib', (1.5683748664994073, 0.5053553495208175,
-2.1400950538163005e-30, 0.0033466480252323464), 0,
3, 0.046923284629924095, 0.21977656974074022),
'fr_unbrch_alkane':
('genhalflogistic', (0.0001735874860695949, -2.406745376333286e-08,
0.2558409046054385), 0, 167,
0.2555878911523807, 1.8216755193439447),
'fr_amide':
('recipinvgauss', (220103.5665089948, -1.4778017808899836e-11,
0.7451439305459917), 0, 71, 1.1422999609972697,
1.6398141260141192),
'NumRotatableBonds':
('mielke', (2.902960148717509, 4.199193993037961,
-0.9968227721720102, 8.267126836374882), 0, 304,
7.140309821687518, 6.233943765856294),
'Chi1': ('mielke', (5.372526418714403, 6.068367649928466,
-0.06790886881111757, 13.725737593680664), 0.0,
256.9697300670324, 14.033526281218712, 6.54332057037178),
'fr_phos_acid':
('genhalflogistic', (3.2594653253264924e-11, -0.0012483281868383108,
0.02952659923331233), 0, 22,
0.007130499134939446, 0.3375013082055841),
'fr_piperdine':
('halfgennorm', (0.4238794287379706, -1.2796542653568352e-22,
0.0018770831152461857), 0, 6, 0.14572020041402897,
0.39164760531240855),
'fr_isothiocyan':
('genhalflogistic', (8.886662035991823e-09, -0.0008924947259535538,
0.002685293974604489), 0, 2,
0.0002300161011270789, 0.015810268619422887),
'MinAbsEStateIndex':
('genpareto', (0.11654274500988507, -4.295406365103898e-11,
0.13245709790535898), 0.0, 2.5115740740740735,
0.15016751055787822, 0.17292082606243664),
'fr_methoxy': ('gibrat', (-0.01550907507397075, 0.042866902160229356),
0, 8, 0.31338193673557146, 0.6523713877037112)
}
<file_sep>"""
Tests for binding pocket detection.
"""
import os
import logging
import unittest
import numpy as np
import deepchem as dc
from deepchem.utils import rdkit_utils
from deepchem.utils import coordinate_box_utils as box_utils
logger = logging.getLogger(__name__)
class TestBindingPocket(unittest.TestCase):
"""Does sanity checks on binding pocket generation."""
def test_convex_init(self):
"""Tests that ConvexHullPocketFinder can be initialized."""
dc.dock.ConvexHullPocketFinder()
def test_get_face_boxes_for_protein(self):
"""Tests that binding pockets are detected."""
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
coords = rdkit_utils.load_molecule(protein_file)[0]
boxes = box_utils.get_face_boxes(coords)
assert isinstance(boxes, list)
# Pocket is of form ((x_min, x_max), (y_min, y_max), (z_min, z_max))
for pocket in boxes:
assert isinstance(pocket, box_utils.CoordinateBox)
def test_convex_find_pockets(self):
"""Test that some pockets are filtered out."""
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
finder = dc.dock.ConvexHullPocketFinder()
all_pockets = finder.find_all_pockets(protein_file)
pockets = finder.find_pockets(protein_file)
# Test that every atom in pocket maps exists
for pocket in pockets:
assert isinstance(pocket, box_utils.CoordinateBox)
assert len(pockets) < len(all_pockets)
def test_extract_active_site(self):
"""Test that computed pockets have strong overlap with true binding pocket."""
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
active_site_box, active_site_coords = \
dc.dock.binding_pocket.extract_active_site(protein_file, ligand_file)
assert isinstance(active_site_box, box_utils.CoordinateBox)
assert isinstance(active_site_coords, np.ndarray)
<file_sep>import deepchem as dc
import numpy as np
import pytest
import os
try:
import tensorflow as tf
import deepchem.models.layers as layers
from tensorflow.python.framework import test_util # noqa: F401
has_tensorflow = True
except ModuleNotFoundError:
has_tensorflow = False
try:
import torch
import deepchem.models.torch_models.layers as torch_layers
has_torch = True
except ModuleNotFoundError:
has_torch = False
@pytest.mark.tensorflow
def test_cosine_dist():
"""Test invoking cosine_dist."""
x = tf.ones((5, 4), dtype=tf.dtypes.float32, name=None)
y_same = tf.ones((5, 4), dtype=tf.dtypes.float32, name=None)
# x and y are the same tensor (equivalent at every element)
# the pairwise inner product of the rows in x and y will always be 1
# the output tensor will be of shape (5,5)
cos_sim_same = layers.cosine_dist(x, y_same)
diff = cos_sim_same - tf.ones((5, 5), dtype=tf.dtypes.float32, name=None)
assert tf.abs(tf.reduce_sum(diff)) < 1e-5 # True
identity_tensor = tf.eye(
512, dtype=tf.dtypes.float32) # identity matrix of shape (512,512)
x1 = identity_tensor[0:256, :]
x2 = identity_tensor[256:512, :]
# each row in x1 is orthogonal to each row in x2
# the pairwise inner product of the rows in x and y will always be 0
# the output tensor will be of shape (256,256)
cos_sim_orth = layers.cosine_dist(x1, x2)
assert tf.abs(tf.reduce_sum(cos_sim_orth)) < 1e-5 # True
assert all([cos_sim_orth.shape[dim] == 256 for dim in range(2)]) # True
@pytest.mark.tensorflow
def test_highway():
"""Test invoking Highway."""
width = 5
batch_size = 10
input = np.random.rand(batch_size, width).astype(np.float32)
layer = layers.Highway()
result = layer(input)
assert result.shape == (batch_size, width)
assert len(layer.trainable_variables) == 4
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Highway()
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
@pytest.mark.tensorflow
def test_combine_mean_std():
"""Test invoking CombineMeanStd."""
mean = np.random.rand(5, 3).astype(np.float32)
std = np.random.rand(5, 3).astype(np.float32)
layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)
result1 = layer([mean, std], training=False)
assert np.array_equal(result1, mean) # No noise in test mode
result2 = layer([mean, std], training=True)
assert not np.array_equal(result2, mean)
assert np.allclose(result2, mean, atol=0.1)
@pytest.mark.tensorflow
def test_stack():
"""Test invoking Stack."""
input1 = np.random.rand(5, 4).astype(np.float32)
input2 = np.random.rand(5, 4).astype(np.float32)
result = layers.Stack()([input1, input2])
assert result.shape == (5, 2, 4)
assert np.array_equal(input1, result[:, 0, :])
assert np.array_equal(input2, result[:, 1, :])
@pytest.mark.tensorflow
def test_variable():
"""Test invoking Variable."""
value = np.random.rand(5, 4).astype(np.float32)
layer = layers.Variable(value)
layer.build([])
result = layer.call([]).numpy()
assert np.allclose(result, value)
assert len(layer.trainable_variables) == 1
@pytest.mark.tensorflow
def test_interatomic_l2_distances():
"""Test invoking InteratomicL2Distances."""
atoms = 5
neighbors = 2
coords = np.random.rand(atoms, 3)
neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))
layer = layers.InteratomicL2Distances(atoms, neighbors, 3)
result = layer([coords, neighbor_list])
assert result.shape == (atoms, neighbors)
for atom in range(atoms):
for neighbor in range(neighbors):
delta = coords[atom] - coords[neighbor_list[atom, neighbor]]
dist2 = np.dot(delta, delta)
assert np.allclose(dist2, result[atom, neighbor])
@pytest.mark.tensorflow
def test_weave_layer():
"""Test invoking WeaveLayer."""
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
from rdkit import Chem
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
weave = layers.WeaveLayer(init=tf.keras.initializers.TruncatedNormal(
stddev=0.03))
atom_feat = []
pair_feat = []
atom_to_pair = []
pair_split = []
start = 0
n_pair_feat = 14
for im, mol in enumerate(mols):
n_atoms = mol.get_num_atoms()
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(
np.transpose(np.array([C1.flatten() + start,
C0.flatten() + start])))
# number of pairs for each atom
pair_split.extend(C1.flatten() + start)
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(),
(n_atoms * n_atoms, n_pair_feat)))
inputs = [
np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),
np.concatenate(pair_feat, axis=0),
np.array(pair_split),
np.concatenate(atom_to_pair, axis=0)
]
# Outputs should be [A, P]
outputs = weave(inputs)
assert len(outputs) == 2
@pytest.mark.tensorflow
def test_weave_gather():
"""Test invoking WeaveGather."""
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
from rdkit import Chem
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
atom_feat = []
atom_split = []
for im, mol in enumerate(mols):
n_atoms = mol.get_num_atoms()
atom_split.extend([im] * n_atoms)
# atom features
atom_feat.append(mol.get_atom_features())
inputs = [
np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),
np.array(atom_split)
]
# Try without compression
gather = layers.WeaveGather(batch_size=2, n_input=75, gaussian_expand=True)
# Outputs should be [mol1_vec, mol2_vec)
outputs = gather(inputs)
assert len(outputs) == 2
assert np.array(outputs[0]).shape == (11 * 75,)
assert np.array(outputs[1]).shape == (11 * 75,)
# Try with compression
gather = layers.WeaveGather(batch_size=2,
n_input=75,
gaussian_expand=True,
compress_post_gaussian_expansion=True)
# Outputs should be [mol1_vec, mol2_vec)
outputs = gather(inputs)
assert len(outputs) == 2
assert np.array(outputs[0]).shape == (75,)
assert np.array(outputs[1]).shape == (75,)
@pytest.mark.tensorflow
def test_weave_gather_gaussian_histogram():
"""Test Gaussian Histograms."""
from rdkit import Chem
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
gather = layers.WeaveGather(batch_size=2, n_input=75)
atom_feat = []
atom_split = []
for im, mol in enumerate(mols):
n_atoms = mol.get_num_atoms()
atom_split.extend([im] * n_atoms)
# atom features
atom_feat.append(mol.get_atom_features())
inputs = [
np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),
np.array(atom_split)
]
# per_mol_features = tf.math.segment_sum(inputs[0], inputs[1])
outputs = gather.gaussian_histogram(inputs[0])
# Gaussian histograms expands into 11 Gaussian buckets.
assert np.array(outputs).shape == (
4,
11 * 75,
)
# assert np.array(outputs[1]).shape == (11 * 75,)
@pytest.mark.tensorflow
def test_graph_conv():
"""Test invoking GraphConv."""
out_channels = 2
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
from rdkit import Chem
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
layer = layers.GraphConv(out_channels)
result = layer(args)
assert result.shape == (n_atoms, out_channels)
num_deg = 2 * layer.max_degree + (1 - layer.min_degree)
assert len(layer.trainable_variables) == 2 * num_deg
@pytest.mark.tensorflow
def test_graph_pool():
"""Test invoking GraphPool."""
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
from rdkit import Chem
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphPool()(args)
assert result.shape[0] == n_atoms
# TODO What should shape[1] be? It's not documented.
@pytest.mark.tensorflow
def test_graph_gather():
"""Test invoking GraphGather."""
batch_size = 2
n_features = 75
# n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
from rdkit import Chem
mols = [Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphGather(batch_size)(args)
# TODO(rbharath): Why is it 2*n_features instead of n_features?
assert result.shape == (batch_size, 2 * n_features)
@pytest.mark.tensorflow
def test_lstm_step():
"""Test invoking LSTMStep."""
n_test = 5
n_feat = 10
y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)
state_zero = np.random.rand(n_test, n_feat).astype(np.float32)
state_one = np.random.rand(n_test, n_feat).astype(np.float32)
layer = layers.LSTMStep(n_feat, 2 * n_feat)
result = layer([y, state_zero, state_one])
h_out, h_copy_out, c_out = (result[0], result[1][0], result[1][1])
assert h_out.shape == (n_test, n_feat)
assert h_copy_out.shape == (n_test, n_feat)
assert c_out.shape == (n_test, n_feat)
assert len(layer.trainable_variables) == 1
@pytest.mark.tensorflow
def test_attn_lstm_embedding():
"""Test invoking AttnLSTMEmbedding."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer([test, support])
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 4
@pytest.mark.tensorflow
def test_iter_ref_lstm_embedding():
"""Test invoking IterRefLSTMEmbedding."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer([test, support])
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 8
@pytest.mark.tensorflow
def test_vina_free_energy():
"""Test invoking VinaFreeEnergy."""
n_atoms = 5
m_nbrs = 1
ndim = 3
nbr_cutoff = 1
start = 0
stop = 4
X = np.random.rand(n_atoms, ndim).astype(np.float32)
Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32)
layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer([X, Z])
assert len(layer.trainable_variables) == 6
assert result.shape == tuple()
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result2 = layer2([X, Z])
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer([X, Z])
assert np.allclose(result, result3)
@pytest.mark.tensorflow
def test_weighted_linear_combo():
"""Test invoking WeightedLinearCombo."""
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
layer = layers.WeightedLinearCombo()
result = layer([input1, input2])
assert len(layer.trainable_variables) == 2
expected = input1 * layer.trainable_variables[
0] + input2 * layer.trainable_variables[1]
assert np.allclose(result, expected)
@pytest.mark.tensorflow
def test_neighbor_list():
"""Test invoking NeighborList."""
N_atoms = 5
start = 0
stop = 12
nbr_cutoff = 3
ndim = 3
M_nbrs = 2
coords = start + np.random.rand(N_atoms, ndim) * (stop - start)
coords = tf.cast(tf.stack(coords), tf.float32)
layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop)
result = layer(coords)
assert result.shape == (N_atoms, M_nbrs)
@pytest.mark.tensorflow
def test_atomic_convolution():
"""Test invoking AtomicConvolution."""
batch_size = 4
max_atoms = 5
max_neighbors = 2
dimensions = 3
params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]
input1 = np.random.rand(batch_size, max_atoms,
dimensions).astype(np.float32)
input2 = np.random.randint(max_atoms,
size=(batch_size, max_atoms, max_neighbors))
input3 = np.random.randint(1,
10,
size=(batch_size, max_atoms, max_neighbors))
layer = layers.AtomicConvolution(radial_params=params)
result = layer([input1, input2, input3])
assert result.shape == (batch_size, max_atoms, len(params))
assert len(layer.trainable_variables) == 3
@pytest.mark.tensorflow
def test_alpha_share_layer():
"""Test invoking AlphaShareLayer."""
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.AlphaShareLayer()
result = layer([input1, input2])
assert input1.shape == result[0].shape
assert input2.shape == result[1].shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.AlphaShareLayer()
result2 = layer2([input1, input2])
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer([input1, input2])
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
@pytest.mark.tensorflow
def test_sluice_loss():
"""Test invoking SluiceLoss."""
input1 = np.ones((3, 4)).astype(np.float32)
input2 = np.ones((2, 2)).astype(np.float32)
result = layers.SluiceLoss()([input1, input2])
assert np.allclose(result, 40.0)
@pytest.mark.tensorflow
def test_beta_share():
"""Test invoking BetaShare."""
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.BetaShare()
result = layer([input1, input2])
assert input1.shape == result.shape
assert input2.shape == result.shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.BetaShare()
result2 = layer2([input1, input2])
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer([input1, input2])
assert np.allclose(result, result3)
@pytest.mark.tensorflow
def test_ani_feat():
"""Test invoking ANIFeat."""
batch_size = 10
max_atoms = 5
input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)
layer = layers.ANIFeat(max_atoms=max_atoms)
result = layer(input) # noqa: F841
# TODO What should the output shape be? It's not documented, and there
# are no other test cases for it.
@pytest.mark.tensorflow
def test_graph_embed_pool_layer():
"""Test invoking GraphEmbedPoolLayer."""
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphEmbedPoolLayer(num_vertices=6)
result = layer([V, adjs])
assert result[0].shape == (10, 6, 50)
assert result[1].shape == (10, 6, 5, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)
result2 = layer2([V, adjs])
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer([V, adjs])
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
@pytest.mark.tensorflow
def test_graph_cnn():
"""Test invoking GraphCNN."""
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphCNN(num_filters=6)
result = layer([V, adjs])
assert result.shape == (10, 100, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphCNN(num_filters=6)
result2 = layer2([V, adjs])
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer([V, adjs])
assert np.allclose(result, result3)
@pytest.mark.tensorflow
def test_DAG_layer():
"""Test invoking DAGLayer."""
batch_size = 10
n_graph_feat = 30
n_atom_feat = 75
max_atoms = 50
layer_sizes = [100]
atom_features = np.random.rand(batch_size, n_atom_feat)
parents = np.random.randint(0,
max_atoms,
size=(batch_size, max_atoms, max_atoms))
calculation_orders = np.random.randint(0,
batch_size,
size=(batch_size, max_atoms))
calculation_masks = np.random.randint(0, 2, size=(batch_size, max_atoms))
# Recall that the DAG layer expects a MultiConvMol as input,
# so the "batch" is a pooled set of atoms from all the
# molecules in the batch, just as it is for the graph conv.
# This means that n_atoms is the batch-size
n_atoms = batch_size
# dropout_switch = False
layer = layers.DAGLayer(n_graph_feat=n_graph_feat,
n_atom_feat=n_atom_feat,
max_atoms=max_atoms,
layer_sizes=layer_sizes)
outputs = layer([ # noqa: F841
atom_features,
parents,
calculation_orders,
calculation_masks,
n_atoms,
# dropout_switch
])
# TODO(rbharath): What is the shape of outputs supposed to be?
# I'm getting (7, 30) here. Where does 7 come from??
@pytest.mark.tensorflow
def test_DAG_gather():
"""Test invoking DAGGather."""
# TODO(rbharath): We need more documentation about why
# these numbers work.
batch_size = 10
n_graph_feat = 30
n_atom_feat = 30
n_outputs = 75
max_atoms = 50
layer_sizes = [100]
layer = layers.DAGGather(n_graph_feat=n_graph_feat,
n_outputs=n_outputs,
max_atoms=max_atoms,
layer_sizes=layer_sizes)
atom_features = np.random.rand(batch_size, n_atom_feat)
membership = np.sort(np.random.randint(0, batch_size, size=(batch_size)))
outputs = layer([atom_features, membership]) # noqa: F841
@pytest.mark.torch
def test_scale_norm():
"""Test invoking ScaleNorm."""
input_ar = torch.tensor([[1., 99., 10000.], [0.003, 999.37, 23.]])
layer = torch_layers.ScaleNorm(0.35)
result1 = layer(input_ar)
output_ar = torch.tensor([[5.9157897e-05, 5.8566318e-03, 5.9157896e-01],
[1.7754727e-06, 5.9145141e-01, 1.3611957e-02]])
assert torch.allclose(result1, output_ar)
@pytest.mark.torch
def test_multi_headed_mat_attention():
"""Test invoking MultiHeadedMATAttention."""
feat = dc.feat.MATFeaturizer()
input_smile = "CC"
out = feat.featurize(input_smile)
node = torch.tensor(out[0].node_features).float().unsqueeze(0)
adj = torch.tensor(out[0].adjacency_matrix).float().unsqueeze(0)
dist = torch.tensor(out[0].distance_matrix).float().unsqueeze(0)
mask = torch.sum(torch.abs(node), dim=-1) != 0
layer = torch_layers.MultiHeadedMATAttention(dist_kernel='softmax',
lambda_attention=0.33,
lambda_distance=0.33,
h=16,
hsize=1024,
dropout_p=0.0)
op = torch_layers.MATEmbedding()(node)
output = layer(op, op, op, mask, adj, dist)
assert (output.shape == (1, 3, 1024))
@pytest.mark.torch
def test_position_wise_feed_forward():
"""Test invoking PositionwiseFeedForward."""
torch.manual_seed(0)
input_ar = torch.tensor([[1., 2.], [5., 6.]])
layer = torch_layers.PositionwiseFeedForward(d_input=2,
d_hidden=2,
d_output=2,
activation='relu',
n_layers=1,
dropout_p=0.0)
result = layer(input_ar)
output_ar = torch.tensor([[0.4810, 0.0000], [1.9771, 0.0000]])
assert torch.allclose(result, output_ar, rtol=1e-4)
@pytest.mark.torch
@pytest.mark.parametrize('skip_connection,batch_norm,expected',
[(False, False, [[0.2795, 0.4243], [0.2795, 0.4243]]),
(True, False, [[-0.9612, 2.3846], [-4.1104, 5.7606]]),
(False, True, [[0.2795, 0.4243], [0.2795, 0.4243]]),
(True, True, [[-0.9612, 2.3846], [-4.1104, 5.7606]])])
def test_multilayer_perceptron(skip_connection, batch_norm, expected):
"""Test invoking MLP."""
torch.manual_seed(0)
input_ar = torch.tensor([[1., 2.], [5., 6.]])
layer = torch_layers.MultilayerPerceptron(d_input=2,
d_output=2,
d_hidden=(2, 2),
activation_fn='relu',
dropout=0.0,
batch_norm=batch_norm,
skip_connection=skip_connection)
result = layer(input_ar)
output_ar = torch.tensor(expected)
assert torch.allclose(result, output_ar, atol=1e-4)
@pytest.mark.torch
def test_multilayer_perceptron_overfit():
import torch
import deepchem.models.torch_models.layers as torch_layers
from deepchem.data import NumpyDataset
from deepchem.models.torch_models.torch_model import TorchModel
from deepchem.models.losses import L1Loss
import numpy as np
torch.manual_seed(0)
x = torch.randn(10, 10)
y = torch.ones(10, 1)
data = NumpyDataset(x, y)
layer = torch_layers.MultilayerPerceptron(d_input=10,
d_output=1,
d_hidden=(2, 2),
activation_fn='relu')
model = TorchModel(layer, loss=L1Loss())
model.fit(data, nb_epoch=1000)
output = model.predict_on_batch(data.X)
assert np.allclose(output, y, atol=1e-2)
@pytest.mark.torch
def test_weighted_skip_multilayer_perceptron():
"Test for weighted skip connection from the input to the output"
seed = 123
torch.manual_seed(seed)
dim = 1
features = torch.Tensor([[0.8343], [1.2713], [1.2713], [1.2713], [1.2713]])
layer = dc.models.torch_models.layers.MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn='silu',
skip_connection=True,
weighted_skip=False)
output = layer(features)
output = output.detach().numpy()
result = np.array([[1.1032], [1.5598], [1.5598], [1.5598], [1.5598]])
assert np.allclose(output, result, atol=1e-04)
assert output.shape == (5, 1)
@pytest.mark.torch
def test_position_wise_feed_forward_dropout_at_input():
"""Test invoking PositionwiseFeedForward."""
torch.manual_seed(0)
input_ar = torch.tensor([[1., 2.], [5., 6.]])
layer = torch_layers.PositionwiseFeedForward(d_input=2,
d_hidden=2,
d_output=2,
activation='relu',
n_layers=1,
dropout_p=0.0,
dropout_at_input_no_act=True)
result = layer(input_ar)
output_ar = torch.tensor([[0.4810, -1.4331], [1.9771, -5.8426]])
assert torch.allclose(result, output_ar, rtol=1e-4)
@pytest.mark.torch
def test_sub_layer_connection():
"""Test invoking SublayerConnection."""
torch.manual_seed(0)
input_ar = torch.tensor([[1., 2.], [5., 6.]])
layer = torch_layers.SublayerConnection(2, 0.0)
result = layer(input_ar, input_ar)
output_ar = torch.tensor([[2.0027e-05, 3.0000e+00],
[4.0000e+00, 7.0000e+00]])
assert torch.allclose(result, output_ar)
@pytest.mark.torch
def test_mat_encoder_layer():
"""Test invoking MATEncoderLayer."""
input_smile = "CC"
feat = dc.feat.MATFeaturizer()
input_smile = "CC"
out = feat.featurize(input_smile)
node = torch.tensor(out[0].node_features).float().unsqueeze(0)
adj = torch.tensor(out[0].adjacency_matrix).float().unsqueeze(0)
dist = torch.tensor(out[0].distance_matrix).float().unsqueeze(0)
mask = torch.sum(torch.abs(node), dim=-1) != 0
layer = torch_layers.MATEncoderLayer()
op = torch_layers.MATEmbedding()(node)
output = layer(op, mask, adj, dist)
assert (output.shape == (1, 3, 1024))
@pytest.mark.torch
def test_mat_embedding():
"""Test invoking MATEmbedding."""
torch.manual_seed(0)
input_ar = torch.tensor([1., 2., 3.])
layer = torch_layers.MATEmbedding(3, 1, 0.0)
result = layer(input_ar).detach()
output_ar = torch.tensor([-1.2353])
assert torch.allclose(result, output_ar, rtol=1e-4)
@pytest.mark.torch
def test_mat_generator():
"""Test invoking MATGenerator."""
torch.manual_seed(0)
input_ar = torch.tensor([1., 2., 3.])
layer = torch_layers.MATGenerator(3, 'mean', 1, 1, 0.0)
mask = torch.tensor([1., 1., 1.])
result = layer(input_ar, mask)
output_ar = torch.tensor([-1.4436])
assert torch.allclose(result, output_ar, rtol=1e-4)
@pytest.mark.torch
def test_dmpnn_encoder_layer():
"""Test invoking DMPNNEncoderLayer."""
torch.manual_seed(0)
input_smile = "CC"
feat = dc.feat.DMPNNFeaturizer(features_generators=['morgan'])
graph = feat.featurize(input_smile)
from deepchem.models.torch_models.dmpnn import _MapperDMPNN
mapper = _MapperDMPNN(graph[0])
atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds, mapping, global_features = mapper.values
molecules_unbatch_key = [len(atom_features)]
atom_features = torch.from_numpy(atom_features).float()
f_ini_atoms_bonds = torch.from_numpy(f_ini_atoms_bonds).float()
atom_to_incoming_bonds = torch.from_numpy(atom_to_incoming_bonds)
mapping = torch.from_numpy(mapping)
global_features = torch.from_numpy(global_features).float()
layer = torch_layers.DMPNNEncoderLayer(d_hidden=2)
assert layer.W_i.__repr__(
) == 'Linear(in_features=147, out_features=2, bias=False)'
assert layer.W_h.__repr__(
) == 'Linear(in_features=2, out_features=2, bias=False)'
assert layer.W_o.__repr__(
) == 'Linear(in_features=135, out_features=2, bias=True)'
output = layer(atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds,
mapping, global_features, molecules_unbatch_key)
readout_output = torch.tensor([[0.1116, 0.0470]])
assert output.shape == torch.Size([1, 2 + 2048])
assert torch.allclose(output[0][:2], readout_output, atol=1e-4)
@pytest.mark.torch
def test_torch_interatomic_l2_distances():
"""Test Invoking the torch equivalent of InteratomicL2Distances"""
atoms = 5
neighbors = 2
coords = np.random.rand(atoms, 3)
neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))
layer = torch_layers.InteratomicL2Distances(atoms, neighbors, 3)
result = layer([coords, neighbor_list])
assert result.shape == (atoms, neighbors)
for atom in range(atoms):
for neighbor in range(neighbors):
delta = coords[atom] - coords[neighbor_list[atom, neighbor]]
dist2 = np.dot(delta, delta)
assert np.allclose(dist2, result[atom, neighbor])
@pytest.mark.torch
def test_torch_neighbor_list():
"""Test invoking the Torch equivalent of NeighborList."""
N_atoms = 5
start = 0
stop = 12
nbr_cutoff = 3
ndim = 3
M_nbrs = 2
coords = start + np.random.rand(N_atoms, ndim) * (stop - start)
coords = torch.as_tensor(coords, dtype=torch.float)
layer = torch_layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer(coords)
assert result.shape == (N_atoms, M_nbrs)
@pytest.mark.torch
def test_torch_lstm_step():
"""Test invoking LSTMStep."""
n_test = 5
n_feat = 10
y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)
state_zero = np.random.rand(n_test, n_feat).astype(np.float32)
state_one = np.random.rand(n_test, n_feat).astype(np.float32)
layer = torch_layers.LSTMStep(n_feat, 2 * n_feat)
result = layer([y, state_zero, state_one])
h_out, h_copy_out, c_out = (result[0], result[1][0], result[1][1])
assert h_out.shape == (n_test, n_feat)
assert h_copy_out.shape == (n_test, n_feat)
assert c_out.shape == (n_test, n_feat)
@pytest.mark.torch
def test_torch_gru():
n_hidden = 100
batch_size = 10
x = torch.tensor(np.random.rand(batch_size, n_hidden).astype(np.float32))
h_0 = torch.tensor(np.random.rand(batch_size, n_hidden).astype(np.float32))
init = 'xavier_uniform_'
layer = torch_layers.GatedRecurrentUnit(n_hidden, init)
y = layer([x, h_0])
assert y.shape == (batch_size, n_hidden)
@pytest.mark.torch
def test_torch_atomic_convolution():
"""Test invoking the Torch equivalent of AtomicConvolution"""
batch_size = 4
max_atoms = 5
max_neighbors = 2
dimensions = 3
radial_params = torch.tensor([[5.0, 2.0, 0.5], [10.0, 2.0, 0.5],
[5.0, 1.0, 0.2]])
input1 = np.random.rand(batch_size, max_atoms,
dimensions).astype(np.float32)
input2 = np.random.randint(max_atoms,
size=(batch_size, max_atoms, max_neighbors))
input3 = np.random.randint(1,
10,
size=(batch_size, max_atoms, max_neighbors))
layer = torch_layers.AtomicConvolution(radial_params=radial_params)
result = layer([input1, input2, input3])
assert result.shape == (batch_size, max_atoms, len(radial_params))
atom_types = [1, 2, 8]
layer = torch_layers.AtomicConvolution(radial_params=radial_params,
atom_types=atom_types)
result = layer([input1, input2, input3])
assert result.shape == (batch_size, max_atoms,
len(radial_params) * len(atom_types))
# By setting the `box_size` to effectively zero, the result should only contain `nan`.
box_size = [0.0, 0.0, 0.0]
layer = torch_layers.AtomicConvolution(radial_params=radial_params,
box_size=box_size)
result = layer([input1, input2, input3])
assert torch.all(result.isnan())
# Check that layer has three trainable parameters.
assert len(list(layer.parameters())) == 3
with pytest.raises(ValueError):
# Check when `box_size` is of wrong dimensionality.
dimensions = 2
box_size = torch.tensor([1.0, 1.0, 1.0])
input1 = np.random.rand(batch_size, max_atoms,
dimensions).astype(np.float32)
layer = torch_layers.AtomicConvolution(radial_params=radial_params,
box_size=box_size)
_ = layer([input1, input2, input3])
# Check when `inputs` is of wrong length.
layer = torch_layers.AtomicConvolution(radial_params=radial_params)
_ = layer([input1, input2])
@pytest.mark.torch
def test_torch_combine_mean_std():
"""Test invoking the Torch equivalent of CombineMeanStd."""
mean = np.random.rand(5, 3).astype(np.float32)
std = np.random.rand(5, 3).astype(np.float32)
layer = torch_layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)
result1 = layer([mean, std], training=False)
assert np.array_equal(result1, mean) # No noise in test mode
result2 = layer([mean, std], training=True)
assert not np.array_equal(result2, mean)
assert np.allclose(result2, mean, atol=0.1)
assert result1.shape == mean.shape and result1.shape == std.shape
assert result2.shape == mean.shape and result2.shape == std.shape
@pytest.mark.torch
def test_torch_weighted_linear_combo():
"""Test invoking the Torch equivalent of WeightedLinearCombo."""
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
layer = torch_layers.WeightedLinearCombo(len([input1, input2]))
result = layer([input1, input2])
assert len(layer.input_weights) == 2
expected = torch.Tensor(input1) * layer.input_weights[0] + torch.Tensor(
input2) * layer.input_weights[1]
assert torch.allclose(result, expected)
@pytest.mark.torch
def test_local_global_discriminator():
import torch
from deepchem.models.torch_models.gnn import LocalGlobalDiscriminator
hidden_dim = 10
discriminator = LocalGlobalDiscriminator(hidden_dim=hidden_dim)
# Create random local node representations and global graph representations
batch_size = 6
x = torch.randn(batch_size, hidden_dim)
summary = torch.randn(batch_size, hidden_dim)
# Compute similarity scores using the discriminator
similarity_scores = discriminator(x, summary)
# Check if the output has the correct shape and dtype
assert similarity_scores.shape == (batch_size,)
assert similarity_scores.dtype == torch.float32
@pytest.mark.torch
def test_set_gather():
"""Test invoking the Torch Equivalent of SetGather."""
# total_n_atoms = 4
# n_atom_feat = 4
# atom_feat = np.random.rand(total_n_atoms, n_atom_feat)
atom_feat = np.load(
os.path.join(os.path.dirname(__file__), "assets",
"atom_feat_SetGather.npy"))
atom_split = np.array([0, 0, 1, 1], dtype=np.int32)
torch_layer = torch_layers.SetGather(2, 2, 4)
weights = np.load(
os.path.join(os.path.dirname(__file__), "assets",
"weights_SetGather_tf.npy"))
torch_layer.U = torch.nn.Parameter(torch.from_numpy(weights))
torch_result = torch_layer([atom_feat, atom_split])
tf_result = np.load(
os.path.join(os.path.dirname(__file__), "assets",
"result_SetGather_tf.npy"))
assert np.allclose(np.array(tf_result), np.array(torch_result), atol=1e-4)
@pytest.mark.torch
def test_dtnn_embedding():
"""Test invoking the Torch Equivalent of DTNNEmbedding."""
# Embeddings and results from Tensorflow implementation
embeddings_tf = [
[0.51979446, -0.43430394, -0.73670053, -0.443037, 0.6706989],
[0.21077824, -0.62696636, 0.66158307, -0.25795913, 0.31941652],
[-0.26653743, 0.15180665, 0.21961051, -0.7263894, -0.4521287],
[0.64324486, -0.66274744, 0.2814387, 0.5478991, -0.32046735],
[0.1925143, -0.5505201, -0.35381562, -0.7409675, 0.6427947]
]
results_tf = [[0.64324486, -0.66274744, 0.2814387, 0.5478991, -0.32046735],
[-0.26653743, 0.15180665, 0.21961051, -0.7263894, -0.4521287],
[0.1925143, -0.5505201, -0.35381562, -0.7409675, 0.6427947]]
embedding_layer_torch = torch_layers.DTNNEmbedding(5, 5, 'xavier_uniform_')
embedding_layer_torch.embedding_list = torch.nn.Parameter(
torch.tensor(embeddings_tf))
result_torch = embedding_layer_torch(torch.tensor([3, 2, 4]))
assert torch.allclose(torch.tensor(results_tf), result_torch)
assert result_torch.shape == (3, 5)
@pytest.mark.torch
def test_dtnn_step():
"""Test invoking the Torch Equivalent of DTNNEmbedding."""
# Weights and Embeddings from Tensorflow implementation
emb = [[-0.57997036, -0.54435134], [0.38634658, -0.7800591],
[0.48274183, 0.09290886], [0.72841835, -0.21489048]]
W_cf = [[
-0.6980064, -0.40244102, 0.12015277, -0.11236137, 0.44983745,
-0.7261406, 0.03590739, 0.18886101
],
[
0.38193417, 0.08161169, -0.19805211, 0.01473492, -0.21253234,
0.07730722, -0.25919884, -0.4723375
]]
W_fc = [[0.27500582, 0.19958842], [-0.07512283, -0.4402059],
[-0.6734804, -0.13714153], [-0.7683939, 0.04202372],
[0.61084986, 0.4715314], [0.3767004, -0.59029776],
[-0.1084643, 0.34647202], [-0.656258, -0.3710086]]
W_df = [[
-0.53450876, -0.49899083, 0.27615517, -0.15492862, 0.61964273,
0.18540198, 0.17524064, -0.3806646
],
[
0.24792421, -0.38151026, -0.50989795, -0.16949275, -0.1911948,
0.24427831, 0.3103531, -0.548931
],
[
0.5648807, -0.26876533, -0.4311456, 0.03692579, -0.04565948,
0.6494999, -0.489844, -0.6053973
],
[
-0.5715633, 0.5406003, -0.4798649, -0.6116994, 0.1802761,
-0.02659523, -0.14560652, -0.59008956
],
[
-0.64630675, -0.2756685, -0.43883026, 0.14410889, -0.13292378,
-0.17106324, -0.60326487, -0.25875738
],
[
-0.28023764, 0.54396844, -0.05222553, -0.6502703, -0.5865139,
-0.03999609, -0.16664535, 0.5127555
]]
output_tf = [[[-0.5800, -0.5444], [0.3863, -0.7801], [0.4827, 0.0929],
[0.7284, -0.2149]],
[[0.2256, -0.1123], [1.1920, -0.3480], [1.2884, 0.5249],
[1.5340, 0.2171]]]
step_layer = torch_layers.DTNNStep(4, 6, 8)
step_layer.W_fc = torch.nn.Parameter(torch.Tensor(W_fc))
step_layer.W_cf = torch.nn.Parameter(torch.Tensor(W_cf))
step_layer.W_df = torch.nn.Parameter(torch.Tensor(W_df))
output_torch = step_layer([
torch.Tensor(emb),
torch.Tensor([0, 1, 2, 3, 4, 5]).to(torch.float32),
torch.Tensor([1]).to(torch.int64),
torch.Tensor([[1]]).to(torch.int64)
])
assert torch.allclose(torch.tensor(output_tf), output_torch, atol=1e-4)
assert output_torch.shape == (2, 4, 2)
@pytest.mark.torch
def test_dtnn_gather():
"""Test invoking the Torch equivalent of EdgeNetwork."""
W_list_1 = [[
0.54732025, -0.627077, -0.2903021, -0.53665423, -0.00559229,
-0.32349566, 0.1962483, 0.5581455, 0.11647487, 0.13117266
],
[
-0.66846573, -0.28275022, 0.06701428, 0.43692493,
-0.24846172, 0.41073883, -0.04701298, -0.23764172,
-0.16597754, -0.23689681
],
[
-0.41830233, -0.2093746, 0.11161888, -0.61909866,
-0.07230109, 0.20211416, 0.07490742, -0.52804005,
-0.4896497, 0.63919294
]]
W_list_2 = [[-0.33358562, -0.5884317, 0.26542962],
[-0.6087704, -0.5719125, -0.05134851],
[0.19017327, -0.5240722, 0.28907597],
[0.09558785, 0.2324171, 0.395795],
[0.04189491, -0.2537845, 0.1019693],
[-0.27015388, -0.53264153, 0.04725528],
[-0.03956562, 0.678604, 0.37642324],
[0.3477502, 0.48643565, -0.48160803],
[0.29909176, -0.4186227, 0.53793466],
[0.05536985, 0.64485407, 0.5148499]]
result_tf = [[0.45788735, 0.9619317, -0.53767115]]
gather_layer_torch = torch_layers.DTNNGather(3, 3, [10])
gather_layer_torch.W_list = torch.nn.ParameterList()
gather_layer_torch.W_list.append(torch.tensor(W_list_1))
gather_layer_torch.W_list.append(torch.tensor(W_list_2))
result_torch = gather_layer_torch([
torch.Tensor([[3, 2, 1]]).to(torch.float32),
torch.Tensor([0]).to(torch.int64)
])
assert torch.allclose(result_torch, torch.tensor(result_tf), atol=1e-4)
assert result_torch.shape == (1, 3)
@pytest.mark.torch
def test_edge_network():
"""Test invoking the Torch equivalent of EdgeNetwork."""
# init parameters
n_pair_features = 14
n_hidden = 75 # based on weave featurizer
torch_init = 'xavier_uniform_'
# generate features for testing
mols = ["CCC"]
featurizer = dc.feat.WeaveFeaturizer()
features = featurizer.featurize(mols)
X_b = np.asarray([features[0]])
X_b = dc.data.pad_features(1, X_b)
atom_feat = []
pair_feat = []
atom_to_pair = []
start = 0
for mol in X_b:
n_atoms = mol.get_num_atoms()
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(
np.transpose(np.array([C1.flatten() + start,
C0.flatten() + start])))
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(),
(n_atoms * n_atoms, n_pair_features)))
atom_features = np.concatenate(atom_feat, axis=0)
pair_features = np.concatenate(pair_feat, axis=0)
atom_to_pair_array = np.concatenate(atom_to_pair, axis=0)
# tensors for torch layer
torch_pair_features = torch.Tensor(pair_features)
torch_atom_features = torch.Tensor(atom_features)
torch_atom_to_pair = torch.Tensor(atom_to_pair_array)
torch_atom_to_pair = torch.squeeze(torch_atom_to_pair.to(torch.int64),
dim=0)
torch_inputs = [
torch_pair_features, torch_atom_features, torch_atom_to_pair
]
torch_layer = dc.models.torch_models.layers.EdgeNetwork(
n_pair_features, n_hidden, torch_init)
# assigning tensorflow layer weights to torch layer
torch_layer.W = torch.from_numpy(
np.load("deepchem/models/tests/assets/edgenetwork_weights.npy"))
torch_result = torch_layer(torch_inputs)
assert np.allclose(
np.array(torch_result),
np.load("deepchem/models/tests/assets/edgenetwork_result.npy"),
atol=1e-04)
@pytest.mark.torch
def test_mxmnet_envelope():
"""Test for _MXMNetEnvelope helper layer."""
env = dc.models.torch_models.layers._MXMNetEnvelope(exponent=2)
input_tensor = torch.tensor([0.5, 1.0, 2.0, 3.0])
output = env(input_tensor)
output = output.detach().numpy()
result = np.array([1.3125, 0.0000, 0.0000, 0.0000])
assert np.allclose(result, output, atol=1e-04)
@pytest.mark.torch
def test_mxmnet_global_message_passing():
""" Test for MXMNetGlobalMessagePassing Layer."""
seed = 123
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
dim = 1
node_features = torch.tensor([[0.8343], [1.2713], [1.2713], [1.2713],
[1.2713]])
edge_attr = torch.tensor([[1.0004], [1.0004], [1.0005], [1.0004], [1.0004],
[-0.2644], [-0.2644], [-0.2644], [1.0004],
[-0.2644], [-0.2644], [-0.2644], [1.0005],
[-0.2644], [-0.2644], [-0.2644], [1.0004],
[-0.2644], [-0.2644], [-0.2644]])
edge_indices = torch.tensor(
[[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],
[1, 2, 3, 4, 0, 2, 3, 4, 0, 1, 3, 4, 0, 1, 2, 4, 0, 1, 2, 3]])
out = dc.models.torch_models.layers.MXMNetGlobalMessagePassing(dim)
output = out(node_features, edge_attr, edge_indices)
output = output.detach().numpy()
result = np.array([[-0.27947044], [2.417905], [2.417905], [2.4178727],
[2.417905]])
print(output)
assert np.allclose(output, result, atol=1e-04)
assert output.shape == (5, 1)
@pytest.mark.torch
def test_mxmnet_besselbasis():
"""Test for MXMNetBesselBasisLayer"""
radial_layer = dc.models.torch_models.layers.MXMNetBesselBasisLayer(
num_radial=2, cutoff=2.0, envelope_exponent=2)
distances = torch.tensor([0.5, 1.0, 2.0, 3.0])
output = radial_layer(distances)
output = output.detach().numpy()
result = np.array([[2.6434e+00, 3.7383e+00], [1.3125e+00, -1.1474e-07],
[-0.0000e+00, 0.0000e+00], [-0.0000e+00, -0.0000e+00]])
assert np.allclose(result, output, atol=1e-04)
@pytest.mark.torch
def test_encoder_rnn():
"""Test for Encoder Layer of SeqToSeq Model"""
hidden_size = 7
num_input_token = 4
input = torch.tensor([[1, 0, 2, 3, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
layer = torch_layers.EncoderRNN(num_input_token, hidden_size)
emb, hidden = layer(input)
assert emb.shape == emb.shape == (input.shape[0], input.shape[1],
hidden_size)
assert hidden.shape == (1, input.shape[0], hidden_size)
@pytest.mark.torch
def test_FerminetElectronFeature():
"Test for FerminetElectronFeature layer."
electron_layer = dc.models.torch_models.layers.FerminetElectronFeature(
[32, 32, 32], [16, 16, 16], 4, 8, 10, [5, 5])
one_electron_test = torch.randn(8, 10, 4 * 4)
two_electron_test = torch.randn(8, 10, 10, 4)
one, two = electron_layer.forward(one_electron_test, two_electron_test)
assert one.size() == torch.Size([8, 10, 32])
assert two.size() == torch.Size([8, 10, 10, 16])
@pytest.mark.torch
def test_FerminetEnvelope():
"Test for FerminetEnvelope layer."
envelope_layer = dc.models.torch_models.layers.FerminetEnvelope(
[32, 32, 32], [16, 16, 16], 10, 8, [5, 5], 5, 16)
one_electron = torch.randn(8, 10, 32)
one_electron_permuted = torch.randn(8, 10, 5, 3)
psi_up, psi_down = envelope_layer.forward(one_electron,
one_electron_permuted)
assert psi_up.size() == torch.Size([8, 16, 5, 5])
assert psi_down.size() == torch.Size([8, 16, 5, 5])
@pytest.mark.torch
def test_mxmnet_local_message_passing():
""" Test for MXMNetLocalMessagePassing Layer."""
seed = 123
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
dim = 1
h = torch.tensor([[0.8343], [1.2713], [1.2713], [1.2713], [1.2713]])
rbf = torch.tensor([[-0.2628], [-0.2628], [-0.2628], [-0.2628], [-0.2629],
[-0.2629], [-0.2628], [-0.2628]])
sbf1 = torch.tensor([[-0.2767], [-0.2767], [-0.2767], [-0.2767], [-0.2767],
[-0.2767], [-0.2767], [-0.2767], [-0.2767], [-0.2767],
[-0.2767], [-0.2767]])
sbf2 = torch.tensor([[-0.0301], [-0.0301], [-0.1483], [-0.1486], [-0.1484],
[-0.0301], [-0.1483], [-0.0301], [-0.1485], [-0.1483],
[-0.0301], [-0.1486], [-0.1485], [-0.0301], [-0.1486],
[-0.0301], [-0.1484], [-0.1483], [-0.1486], [-0.0301]])
idx_kj = torch.tensor([3, 5, 7, 1, 5, 7, 1, 3, 7, 1, 3, 5])
idx_ji_1 = torch.tensor([0, 0, 0, 2, 2, 2, 4, 4, 4, 6, 6, 6])
idx_jj = torch.tensor(
[0, 1, 3, 5, 7, 2, 1, 3, 5, 7, 4, 1, 3, 5, 7, 6, 1, 3, 5, 7])
idx_ji_2 = torch.tensor(
[0, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 7, 7, 7, 7])
edge_index = torch.tensor([[0, 1, 0, 2, 0, 3, 0, 4],
[1, 0, 2, 0, 3, 0, 4, 0]])
layer = dc.models.torch_models.layers.MXMNetLocalMessagePassing(
dim, activation_fn='silu')
output = layer(h, rbf, sbf1, sbf2, idx_kj, idx_ji_1, idx_jj, idx_ji_2,
edge_index)
result0 = np.array([[0.7916], [1.2796], [1.2796], [1.2796], [1.2796]])
result1 = np.array([[0.3439], [0.3441], [0.3441], [0.3441], [0.3441]])
assert np.allclose(result0, output[0].detach().numpy(), atol=1e-04)
assert np.allclose(result1, output[1].detach().numpy(), atol=1e-04)
assert output[0].shape == (5, 1)
assert output[1].shape == (5, 1)
<file_sep>import numpy as np
import deepchem as dc
def test_y_property():
"""Test that dataset.y works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
y_out = dataset.y
np.testing.assert_array_equal(y, y_out)
def test_w_property():
"""Test that dataset.y works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
w_out = dataset.w
np.testing.assert_array_equal(w, w_out)
<file_sep>import numpy as np
import deepchem as dc
np.random.seed(123)
# Load delaney dataset
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney(
featurizer="GraphConv")
train, valid, test = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
optimizer = dc.hyper.GaussianProcessHyperparamOpt(
lambda **p: dc.models.GraphConvModel(n_tasks=len(delaney_tasks), mode="regression", **p))
params_dict = {"dropout": 0.5}
best_model, best_params, all_results = optimizer.hyperparam_search(
params_dict, train, valid, metric, transformers, max_iter=2, search_range=2)
valid_score = best_model.evaluate(valid, [metric], transformers)
print("valid_score")
print(valid_score)
<file_sep>import pytest
import deepchem as dc
import tempfile
import numpy as np
import os
try:
import torch
has_torch = True
except:
has_torch = False
@pytest.mark.torch
def test_dmpnn_regression():
"""
Test DMPNN class for regression mode
"""
try:
from torch_geometric.data import Data, Batch
except ModuleNotFoundError:
raise ImportError(
"This test requires PyTorch Geometric to be installed.")
torch.manual_seed(0)
from deepchem.models.torch_models.dmpnn import _MapperDMPNN, DMPNN
# get data
input_smile = "CC"
feat = dc.feat.DMPNNFeaturizer(features_generators=['morgan'])
graph = feat.featurize(input_smile)
mapper = _MapperDMPNN(graph[0])
atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds, mapping, global_features = mapper.values
atom_features = torch.from_numpy(atom_features).float()
f_ini_atoms_bonds = torch.from_numpy(f_ini_atoms_bonds).float()
atom_to_incoming_bonds = torch.from_numpy(atom_to_incoming_bonds)
mapping = torch.from_numpy(mapping)
global_features = torch.from_numpy(global_features).float()
data = [
Data(atom_features=atom_features,
f_ini_atoms_bonds=f_ini_atoms_bonds,
atom_to_incoming_bonds=atom_to_incoming_bonds,
mapping=mapping,
global_features=global_features)
]
# prepare batch (size 1)
pyg_batch = Batch()
pyg_batch = pyg_batch.from_data_list(data)
# initialize the model
number_of_tasks = 2
number_of_molecules = 1
morgan_feature_size = 2048
model = DMPNN(mode='regression',
global_features_size=morgan_feature_size,
n_tasks=number_of_tasks)
assert model.encoder.__repr__(
) == 'DMPNNEncoderLayer(\n (activation): ReLU()\n (dropout): Dropout(p=0.0, inplace=False)\n (W_i): Linear(in_features=147, out_features=300, bias=False)\n'\
' (W_h): Linear(in_features=300, out_features=300, bias=False)\n (W_o): Linear(in_features=433, out_features=300, bias=True)\n)'
assert model.ffn.__repr__(
) == 'PositionwiseFeedForward(\n (activation): ReLU()\n (linears): ModuleList(\n (0): Linear(in_features=2348, out_features=300, bias=True)\n '\
'(1): Linear(in_features=300, out_features=300, bias=True)\n (2): Linear(in_features=300, out_features=2, bias=True)\n )\n (dropout_p): ModuleList(\n (0): Dropout(p=0.0, inplace=False)\n '\
'(1): Dropout(p=0.0, inplace=False)\n (2): Dropout(p=0.0, inplace=False)\n )\n)'
# get output
output = model(pyg_batch)
assert output.shape == torch.Size([number_of_molecules, number_of_tasks])
required_output = torch.tensor([[0.0044, -0.0572]])
assert torch.allclose(output[0], required_output, atol=1e-4)
@pytest.mark.torch
def test_dmpnn_classification_single_task():
"""
Test DMPNN class for classification mode with 1 task
"""
try:
from torch_geometric.data import Data, Batch
except ModuleNotFoundError:
raise ImportError(
"This test requires PyTorch Geometric to be installed.")
torch.manual_seed(0)
from deepchem.models.torch_models.dmpnn import _MapperDMPNN, DMPNN
# get data
input_smile = "CC"
feat = dc.feat.DMPNNFeaturizer(features_generators=['morgan'])
graph = feat.featurize(input_smile)
mapper = _MapperDMPNN(graph[0])
atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds, mapping, global_features = mapper.values
atom_features = torch.from_numpy(atom_features).float()
f_ini_atoms_bonds = torch.from_numpy(f_ini_atoms_bonds).float()
atom_to_incoming_bonds = torch.from_numpy(atom_to_incoming_bonds)
mapping = torch.from_numpy(mapping)
global_features = torch.from_numpy(global_features).float()
data = [
Data(atom_features=atom_features,
f_ini_atoms_bonds=f_ini_atoms_bonds,
atom_to_incoming_bonds=atom_to_incoming_bonds,
mapping=mapping,
global_features=global_features)
]
# prepare batch (size 1)
pyg_batch = Batch()
pyg_batch = pyg_batch.from_data_list(data)
# initialize the model
number_of_tasks = 1
number_of_classes = 2
number_of_molecules = 1
morgan_feature_size = 2048
model = DMPNN(mode='classification',
n_classes=number_of_classes,
global_features_size=morgan_feature_size,
n_tasks=number_of_tasks)
assert model.encoder.__repr__(
) == 'DMPNNEncoderLayer(\n (activation): ReLU()\n (dropout): Dropout(p=0.0, inplace=False)\n (W_i): Linear(in_features=147, out_features=300, bias=False)\n (W_h): Linear(in_features=300, out_features=300, bias=False)\n'\
' (W_o): Linear(in_features=433, out_features=300, bias=True)\n)'
assert model.ffn.__repr__(
) == 'PositionwiseFeedForward(\n (activation): ReLU()\n (linears): ModuleList(\n (0): Linear(in_features=2348, out_features=300, bias=True)\n (1): Linear(in_features=300, out_features=300, bias=True)\n '\
'(2): Linear(in_features=300, out_features=2, bias=True)\n )\n (dropout_p): ModuleList(\n (0): Dropout(p=0.0, inplace=False)\n (1): Dropout(p=0.0, inplace=False)\n (2): Dropout(p=0.0, inplace=False)\n )\n)'
# get output
output = model(pyg_batch)
assert len(output) == 2
assert output[0].shape == torch.Size(
[number_of_molecules, number_of_classes])
assert output[1].shape == torch.Size(
[number_of_molecules, number_of_classes])
required_output = torch.tensor([[0.5154,
0.4846]]), torch.tensor([[0.0044,
-0.0572]])
assert torch.allclose(output[0][0], required_output[0], atol=1e-4)
assert torch.allclose(output[1][0], required_output[1], atol=1e-4)
@pytest.mark.torch
def test_dmpnn_classification_multi_task():
"""
Test DMPNN class for classification mode with more than 1 task
"""
try:
from torch_geometric.data import Data, Batch
except ModuleNotFoundError:
raise ImportError(
"This test requires PyTorch Geometric to be installed.")
torch.manual_seed(0)
from deepchem.models.torch_models.dmpnn import _MapperDMPNN, DMPNN
# get data
input_smile = "CC"
feat = dc.feat.DMPNNFeaturizer(features_generators=['morgan'])
graph = feat.featurize(input_smile)
mapper = _MapperDMPNN(graph[0])
atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds, mapping, global_features = mapper.values
atom_features = torch.from_numpy(atom_features).float()
f_ini_atoms_bonds = torch.from_numpy(f_ini_atoms_bonds).float()
atom_to_incoming_bonds = torch.from_numpy(atom_to_incoming_bonds)
mapping = torch.from_numpy(mapping)
global_features = torch.from_numpy(global_features).float()
data = [
Data(atom_features=atom_features,
f_ini_atoms_bonds=f_ini_atoms_bonds,
atom_to_incoming_bonds=atom_to_incoming_bonds,
mapping=mapping,
global_features=global_features)
]
# prepare batch (size 1)
pyg_batch = Batch()
pyg_batch = pyg_batch.from_data_list(data)
# initialize the model
number_of_tasks = 2
number_of_classes = 2
number_of_molecules = 1
morgan_feature_size = 2048
model = DMPNN(mode='classification',
n_classes=number_of_classes,
global_features_size=morgan_feature_size,
n_tasks=number_of_tasks)
assert model.encoder.__repr__(
) == 'DMPNNEncoderLayer(\n (activation): ReLU()\n (dropout): Dropout(p=0.0, inplace=False)\n (W_i): Linear(in_features=147, out_features=300, bias=False)\n (W_h): Linear(in_features=300, out_features=300, bias=False)\n (W_o): Linear(in_features=433, out_features=300, bias=True)\n)'
assert model.ffn.__repr__(
) == 'PositionwiseFeedForward(\n (activation): ReLU()\n (linears): ModuleList(\n (0): Linear(in_features=2348, out_features=300, bias=True)\n (1): Linear(in_features=300, out_features=300, bias=True)\n '\
'(2): Linear(in_features=300, out_features=4, bias=True)\n )\n (dropout_p): ModuleList(\n (0): Dropout(p=0.0, inplace=False)\n (1): Dropout(p=0.0, inplace=False)\n (2): Dropout(p=0.0, inplace=False)\n )\n)'
# get output
output = model(pyg_batch)
assert len(output) == 2
assert output[0].shape == torch.Size(
[number_of_molecules, number_of_tasks, number_of_classes])
assert output[1].shape == torch.Size(
[number_of_molecules, number_of_tasks, number_of_classes])
required_output = torch.tensor([[[0.5317, 0.4683], [0.4911, 0.5089]]
]), torch.tensor([[[0.0545, -0.0724],
[0.0204, 0.0558]]])
assert torch.allclose(output[0][0], required_output[0], atol=1e-4)
assert torch.allclose(output[1][0], required_output[1], atol=1e-4)
@pytest.mark.torch
def test_dmpnn_model_regression():
"""
Test DMPNNModel class for regression mode
"""
torch.manual_seed(0)
# load sample dataset
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/freesolv_sample_5.csv')
loader = dc.data.CSVLoader(tasks=['y'],
feature_field='smiles',
featurizer=dc.feat.DMPNNFeaturizer())
dataset = loader.create_dataset(input_file)
# initialize the model
from deepchem.models.torch_models.dmpnn import DMPNNModel
model = DMPNNModel(batch_size=2)
# overfit test
model.fit(dataset, nb_epoch=30)
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
scores = model.evaluate(dataset, [metric])
assert scores['mean_absolute_error'] < 0.5
@pytest.mark.torch
def test_dmpnn_model_classification():
"""
Test DMPNNModel class for classification mode
"""
torch.manual_seed(0)
# load sample dataset
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/example_classification.csv')
loader = dc.data.CSVLoader(tasks=["outcome"],
feature_field="smiles",
featurizer=dc.feat.DMPNNFeaturizer())
dataset = loader.create_dataset(input_file)
# initialize the model
from deepchem.models.torch_models.dmpnn import DMPNNModel
mode = 'classification'
classes = 2
tasks = 1
model = DMPNNModel(mode=mode,
n_classes=classes,
n_tasks=tasks,
batch_size=2)
# overfit test
model.fit(dataset, nb_epoch=30)
metric = dc.metrics.Metric(dc.metrics.accuracy_score, mode="classification")
scores = model.evaluate(dataset, [metric], n_classes=classes)
assert scores['accuracy_score'] > 0.9
@pytest.mark.torch
def test_dmpnn_model_reload():
"""
Test DMPNNModel class for reloading the model
"""
torch.manual_seed(0)
# load sample dataset
dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(dir, 'assets/freesolv_sample_5.csv')
loader = dc.data.CSVLoader(tasks=['y'],
feature_field='smiles',
featurizer=dc.feat.DMPNNFeaturizer())
dataset = loader.create_dataset(input_file)
# initialize the model
from deepchem.models.torch_models.dmpnn import DMPNNModel
model_dir = tempfile.mkdtemp()
model = DMPNNModel(model_dir=model_dir, batch_size=2)
# fit the model
model.fit(dataset, nb_epoch=10)
# reload the model
reloaded_model = DMPNNModel(model_dir=model_dir, batch_size=2)
reloaded_model.restore()
orig_predict = model.predict(dataset)
reloaded_predict = reloaded_model.predict(dataset)
assert np.all(orig_predict == reloaded_predict)
<file_sep># HIV Dataset Examples
The HIV dataset was introduced by the Drug Therapeutics
Program (DTP) AIDS Antiviral Screen, which tested the ability
to inhibit HIV replication for over 40,000 compounds.
Screening results were evaluated and placed into three
categories: confirmed inactive (CI),confirmed active (CA) and
confirmed moderately active (CM). We further combine the
latter two labels, making it a classification task between
inactive (CI) and active (CA and CM).
The data file contains a csv table, in which columns below
are used:
- "smiles": SMILES representation of the molecular structure
- "activity": Three-class labels for screening results: CI/CM/CA
- "HIV_active": Binary labels for screening results: 1 (CA/CM) and 0 (CI)
References:
AIDS Antiviral Screen Data. https://wiki.nci.nih.gov/display/NCIDTPdata/AIDS+Antiviral+Screen+Data
In this example we train models on the HIV collection.
<file_sep>"""
Test Grover Featurizer
"""
import deepchem as dc
import numpy as np
from deepchem.feat import GroverFeaturizer
from deepchem.feat.molecule_featurizers.grover_featurizer import \
GROVER_RDKIT_PROPS
from rdkit import Chem
def test_grover_featurizer():
featurizer = GroverFeaturizer()
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
molgraph = featurizer.featurize(smiles)[0]
assert molgraph.num_nodes == 13
assert molgraph.num_edges == 26
# 151 = 133 + 18 (133 -> one hot encoding from _ATOM_FEATURES, 18 other features)
assert molgraph.node_features.shape == (13, 151)
assert molgraph.edge_index.shape == (2, 26)
assert molgraph.edge_features.shape == (26, 165)
assert molgraph.fg_labels.shape == (len(GROVER_RDKIT_PROPS),)
smiles = 'CCC'
mol = Chem.MolFromSmiles(smiles)
num_atoms, num_bonds = mol.GetNumAtoms(), mol.GetNumBonds()
cfp_size = 1024
featurizer = GroverFeaturizer(
features_generator=dc.feat.CircularFingerprint(size=cfp_size))
molgraph = featurizer.featurize(smiles)[0]
assert molgraph.num_nodes == num_atoms
assert molgraph.num_edges == num_bonds * 2
assert molgraph.node_features.shape == (num_atoms, 151)
np.testing.assert_array_equal(molgraph.edge_index,
np.asarray([[0, 2, 1, 2], [2, 0, 2, 1]]))
assert molgraph.additional_features.shape == (cfp_size,)
<file_sep># Table of Contents
<!-- toc -->
- [Contributing to Deepchem](#contributing-to-deepchem)
- [Getting Started](#getting-started)
- [Pull Request Process](#pull-request-process)
- [Coding Conventions](#coding-conventions)
- [Documentation Conventions](#documentation-conventions)
- [The Agreement](#the-agreement)
- [Deepchem Technical Steering Committee](#deepchem-technical-steering-committee)
<!-- tocstop -->
## Contributing to DeepChem
We actively encourage community contributions to DeepChem. The first
place to start getting involved is
[the tutorials](https://deepchem.readthedocs.io/en/latest/get_started/tutorials.html).
Afterwards, we encourage contributors to give a shot to improving our documentation.
While we take effort to provide good docs, there's plenty of room
for improvement. All docs are hosted on Github, either in `README.md`
file, or in the `docs/` directory.
Once you've got a sense of how the package works, we encourage the use
of Github issues to discuss more complex changes, raise requests for
new features or propose changes to the global architecture of DeepChem.
Once consensus is reached on the issue, please submit a PR with proposed
modifications. All contributed code to DeepChem will be reviewed by a member
of the DeepChem team, so please make sure your code style and documentation
style match our guidelines!
### Getting Started
To develop DeepChem on your machine, we recommend using Anaconda for managing
packages. If you want to manage multiple builds of DeepChem, you can make use of
[conda environments](https://conda.io/projects/conda/en/latest/user-guide/concepts/environments.html)
to maintain seperate Python package environments, each of which can be tied
to a specific build of DeepChem. Here are some tips to get started:
1. Fork the [DeepChem](https://github.com/deepchem/deepchem/) repository
and clone the forked repository
```bash
git clone https://github.com/YOUR-USERNAME/deepchem.git
cd deepchem
```
1.1. If you already have DeepChem from source, update it by running
```bash
git fetch upstream
git rebase upstream/master
```
2. Set up a new conda environment for DeepChem
```bash
conda create -n deepchem python=3.8
conda activate deepchem
```
2.1. DeepChem provides backend support for deep
learning using tensorflow, pytorch or jax. By default, the deep learning frameworks
gets installed in CPU. If GPU support is required, make sure CUDA is installed
and then install the desired deep learning framework before installing DeepChem:
- [tensorflow](https://www.tensorflow.org/install)
- [pytorch](https://pytorch.org/get-started/locally/#start-locally)
- [jax](https://github.com/google/jax#installation)
3. Install DeepChem in `develop` mode
```bash
python setup.py develop
```
This mode will symlink the Python files from current local source tree into
the Python install. Hence, if you modify a Python file, you do not need to
reinstall DeepChem again and again.
In case you need to reinstall, uninstall DeepChem first by running
`pip uninstall deepchem` until you see `Warning: Skipping deepchem
as it is not installed`; run `python setup.py clean` and install in `develop` mode again.
Some other tips:
- Every contribution must pass the unit tests. Some tests are
[marked](https://docs.pytest.org/en/6.2.x/example/markers.html) with custom
markers like `@pytest.mark.tensorflow`. This helps mainly in two ways: 1) restricting the tests only
to the part of code marked with the marker 2) giving
[flexibility](https://docs.pytest.org/en/6.2.x/example/markers.html) in running
the unit tests depending on the environment.
- DeepChem has a number of soft requirements which can be found [here](https://deepchem.readthedocs.io/en/latest/get_started/requirements.html).
- If a commit is simple and doesn't affect any code (keep in mind that some
docstrings contain code that is used in tests), you can add `[skip ci]`
(case sensitive) somewhere in your commit message to [skip all build /
test steps](https://github.blog/changelog/2021-02-08-github-actions-skip-pull-request-and-push-workflows-with-skip-ci/). Note that changing the pull request body or title on GitHub itself has no effect.
### Pull Request Process
Every contribution, must be a pull request and must have adequate time for
review by other committers.
A member of the Technical Steering Committee will review the pull request.
The default path of every contribution should be to merge. The discussion,
review, and merge process should be designed as corrections that move the
contribution into the path to merge. Once there are no more corrections,
(dissent) changes should merge without further process.
On successful merge the author will be added as a member of the DeepChem organization.
### Coding Conventions
DeepChem uses these tools or styles for keeping our codes healthy.
- [YAPF](https://github.com/google/yapf) (code format)
- [Flake8](https://flake8.pycqa.org/en/latest/) (code style check)
- [mypy](http://mypy-lang.org/) (type check)
- [doctest](https://docs.python.org/3/library/doctest.html) (interactive examples)
- [pytest](https://docs.pytest.org/en/6.2.x/index.html) (unit testing)
Before making a PR, please check your codes using them.
You can confirm how to check your codes from [Coding Conventions](https://deepchem.readthedocs.io/en/latest/development_guide/coding.html).
### Document Conventions
DeepChem uses [Sphinx](https://www.sphinx-doc.org/en/master/) to build
[the document](https://deepchem.readthedocs.io/en/latest/index.html).
The document is automatically built by
[Numpy style docstrings](https://numpydoc.readthedocs.io/en/latest/format.html#numpydoc-docstring-guide)
in source codes and [Napoleon extension](http://www.sphinx-doc.org/en/stable/ext/napoleon.html).
For any changes or modification to source code in a PR, please don't forget to add or modify Numpy style docstrings.
## The Agreement
Contributor offers to license certain software (a “Contribution” or multiple
“Contributions”) to DeepChem, and DeepChem agrees to accept said Contributions,
under the terms of the open source license [The MIT License](https://opensource.org/licenses/MIT)
The Contributor understands and agrees that DeepChem shall have the
irrevocable and perpetual right to make and distribute copies of any Contribution, as
well as to create and distribute collective works and derivative works of any Contribution,
under [The MIT License](https://opensource.org/licenses/MIT).
DeepChem understands and agrees that Contributor retains copyright in its Contributions.
Nothing in this Contributor Agreement shall be interpreted to prohibit Contributor
from licensing its Contributions under different terms from the
[The MIT License](https://opensource.org/licenses/MIT) or this Contributor Agreement.
## DeepChem Technical Steering Committee
The Technical Steering Committee admits and oversees all top-level of DeepChem.
The TSC exercises autonomy in setting up and maintaining procedures, policies,
and management and administrative structures as it deems appropriate for the
maintenance and operation of these projects and resources.
Included in the responsibilities of the TSC are:
* Managing code and documentation creation and changes for the listed projects and resources
* Performing code reviews on incoming pull requests and merging suitable code changes.
* Setting and maintaining standards covering contributions of code, documentation and other materials
* Managing code and binary releases: types, schedules, frequency, delivery mechanisms
* Making decisions regarding dependencies of DeepChem, including what those
dependencies are and how they are bundled with source code and releases
* Creating new repositories and projects under the deepchem GitHub organization as required
* Setting overall technical direction for the DeepChem project, including
high-level goals and low-level specifics regarding features and functionality
* Setting and maintaining appropriate standards for community discourse via the various
mediums under TSC control (gitter, facebook, blog)
Members of the TSC will meet regularly (over phone or video conferencing)
to coordinate efforts. Minutes from the TSC meetings will be published publicly on an ongoing basis.
The current members of the TSC are (alphabetically)
* <NAME>
* <NAME>
* <NAME>
* <NAME>
If you want to join the technical steering committee, you will need to
submit an application. The application process is relatively lightweight: submit
a one page document discussing your past contributions to DeepChem and
propose potential projects you could commit to working on as a member of the
steering committee. Note that steering committee membership comes with responsibilities.
In particular, you will need to commit to spending about 10 hours a
week working on DeepChem. The committee will review your application, and
if suitable, will accept you as a probationary member of the TSC. Your
application will be posted publicly to the DeepChem blog if accepted. Membership
on the committee will be confirmed after 6 months if you’ve successfully
implemented some of your proposed projects and demonstrated your ability
to meet the necessary time commitment.
<file_sep>"""
Grover Featurizer.
The adaptation is based on https://github.com/tencent-ailab/grover/blob/0421d97a5e1bd1b59d1923e3afd556afbe4ff782/grover/data/molgraph.py
"""
from typing import Optional
import numpy as np
from deepchem.feat.graph_data import GraphData
from deepchem.feat.molecule_featurizers import RDKitDescriptors
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.utils.molecule_feature_utils import one_hot_encode
from deepchem.utils.typing import RDKitMol
from rdkit import Chem
GROVER_RDKIT_PROPS = [
'fr_Al_COO', 'fr_Al_OH', 'fr_Al_OH_noTert', 'fr_ArN', 'fr_Ar_COO',
'fr_Ar_N', 'fr_Ar_NH', 'fr_Ar_OH', 'fr_COO', 'fr_COO2', 'fr_C_O',
'fr_C_O_noCOO', 'fr_C_S', 'fr_HOCCN', 'fr_Imine', 'fr_NH0', 'fr_NH1',
'fr_NH2', 'fr_N_O', 'fr_Ndealkylation1', 'fr_Ndealkylation2',
'fr_Nhpyrrole', 'fr_SH', 'fr_aldehyde', 'fr_alkyl_carbamate',
'fr_alkyl_halide', 'fr_allylic_oxid', 'fr_amide', 'fr_amidine',
'fr_aniline', 'fr_aryl_methyl', 'fr_azide', 'fr_azo', 'fr_barbitur',
'fr_benzene', 'fr_benzodiazepine', 'fr_bicyclic', 'fr_diazo',
'fr_dihydropyridine', 'fr_epoxide', 'fr_ester', 'fr_ether', 'fr_furan',
'fr_guanido', 'fr_halogen', 'fr_hdrzine', 'fr_hdrzone', 'fr_imidazole',
'fr_imide', 'fr_isocyan', 'fr_isothiocyan', 'fr_ketone',
'fr_ketone_Topliss', 'fr_lactam', 'fr_lactone', 'fr_methoxy',
'fr_morpholine', 'fr_nitrile', 'fr_nitro', 'fr_nitro_arom',
'fr_nitro_arom_nonortho', 'fr_nitroso', 'fr_oxazole', 'fr_oxime',
'fr_para_hydroxylation', 'fr_phenol', 'fr_phenol_noOrthoHbond',
'fr_phos_acid', 'fr_phos_ester', 'fr_piperdine', 'fr_piperzine',
'fr_priamide', 'fr_prisulfonamd', 'fr_pyridine', 'fr_quatN', 'fr_sulfide',
'fr_sulfonamd', 'fr_sulfone', 'fr_term_acetylene', 'fr_tetrazole',
'fr_thiazole', 'fr_thiocyan', 'fr_thiophene', 'fr_unbrch_alkane', 'fr_urea'
]
class GroverFeaturizer(MolecularFeaturizer):
"""Featurizer for GROVER Model
The Grover Featurizer is used to compute features suitable for grover model.
It accepts an rdkit molecule of type `rdkit.Chem.rdchem.Mol` or a SMILES string
as input and computes the following sets of features:
1. a molecular graph from the input molecule
2. functional groups which are used **only** during pretraining
3. additional features which can **only** be used during finetuning
Parameters
----------
additional_featurizer: dc.feat.Featurizer
Given a molecular dataset, it is possible to extract additional molecular features in order
to train and finetune from the existing pretrained model. The `additional_featurizer` can
be used to generate additional features for the molecule.
References
---------
.. [1] <NAME>, et al. "Self-supervised graph transformer on large-scale
molecular data." NeurIPS, 2020
Examples
--------
>>> import deepchem as dc
>>> from deepchem.feat import GroverFeaturizer
>>> feat = GroverFeaturizer(features_generator = dc.feat.CircularFingerprint())
>>> out = feat.featurize('CCC')
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
features_generator: Optional[MolecularFeaturizer] = None,
bond_drop_rate: float = 0.0):
self.featurizer = features_generator
self.functional_group_generator = RDKitDescriptors(
descriptors=GROVER_RDKIT_PROPS, labels_only=True)
self.bond_drop_rate = bond_drop_rate
def _get_atom_features(self, atom, mol):
from deepchem.feat.molecule_featurizers.dmpnn_featurizer import atom_features
features = atom_features(atom)
atom_idx = atom.GetIdx()
hydrogen_donor = Chem.MolFromSmarts(
"[$([N;!H0;v3,v4&+1]),$([O,S;H1;+0]),n&H1&+0]")
hydrogen_acceptor = Chem.MolFromSmarts(
"[$([O,S;H1;v2;!$(*-*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=[O,N,P,S])]),"
"n&H0&+0,$([o,s;+0;!$([o,s]:n);!$([o,s]:c:n)])]")
acidic = Chem.MolFromSmarts("[$([C,S](=[O,S,P])-[O;H1,-1])]")
basic = Chem.MolFromSmarts(
"[#7;+,$([N;H2&+0][$([C,a]);!$([C,a](=O))]),$([N;H1&+0]([$([C,a]);!$([C,a](=O))])[$([C,a]);"
"!$([C,a](=O))]),$([N;H0&+0]([C;!$(C(=O))])([C;!$(C(=O))])[C;!$(C(=O))])]"
)
hydrogen_donor_match = sum(mol.GetSubstructMatches(hydrogen_donor), ())
hydrogen_acceptor_match = sum(
mol.GetSubstructMatches(hydrogen_acceptor), ())
acidic_match = sum(mol.GetSubstructMatches(acidic), ())
basic_match = sum(mol.GetSubstructMatches(basic), ())
ring_info = mol.GetRingInfo()
features = features + \
one_hot_encode(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6], include_unknown_set=True) + \
[atom_idx in hydrogen_acceptor_match] + \
[atom_idx in hydrogen_donor_match] + \
[atom_idx in acidic_match] + \
[atom_idx in basic_match] + \
[ring_info.IsAtomInRingOfSize(atom_idx, 3),
ring_info.IsAtomInRingOfSize(atom_idx, 4),
ring_info.IsAtomInRingOfSize(atom_idx, 5),
ring_info.IsAtomInRingOfSize(atom_idx, 6),
ring_info.IsAtomInRingOfSize(atom_idx, 7),
ring_info.IsAtomInRingOfSize(atom_idx, 8)]
return features
def _make_mol_graph(self, mol: RDKitMol) -> GraphData:
from deepchem.feat.molecule_featurizers.dmpnn_featurizer import bond_features
smiles = Chem.MolToSmiles(mol)
n_atoms = mol.GetNumAtoms() # number of atoms
f_atoms = [] # mapping from atom index to atom features
f_bonds = [
] # mapping from bond index to concat(from_atom, bond) features
edge_index = []
for _, atom in enumerate(mol.GetAtoms()):
f_atoms.append(self._get_atom_features(atom, mol))
for a1 in range(n_atoms):
for a2 in range(a1 + 1, n_atoms):
bond = mol.GetBondBetweenAtoms(a1, a2)
if bond is None:
continue
if np.random.binomial(1, self.bond_drop_rate):
continue
f_bond = bond_features(bond)
# Always treat the bond as directed.
f_bonds.append(f_atoms[a1] + f_bond)
f_bonds.append(f_atoms[a2] + f_bond)
edge_index.extend([[a1, a2], [a2, a1]])
molgraph = GraphData(node_features=np.asarray(f_atoms),
edge_index=np.asarray(edge_index).T,
edge_features=np.asarray(f_bonds),
smiles=smiles)
return molgraph
def _featurize(self, datapoint: RDKitMol, **kwargs) -> GraphData:
"""Featurize a single input molecule.
Parameters
----------
datapoint: RDKitMol
Singular Molecular Graph derived from a SMILES string.
Returns
-------
output: MolGraph
MolGraph generated by Grover
"""
molgraph = self._make_mol_graph(datapoint)
setattr(molgraph, 'fg_labels',
self.functional_group_generator.featurize(datapoint)[0])
if self.featurizer:
setattr(molgraph, 'additional_features',
self.featurizer.featurize(datapoint)[0])
return molgraph
<file_sep>Layers Cheatsheet
-----------------
The "layers cheatsheet" lists various scientifically relevant differentiable layers implemented in DeepChem.
Note that some layers implemented for specific model architectures such as :code:`GROVER`
and :code:`Attention` layers, this is indicated in the `Model` column of the table.
In order to use the layers, make sure that the backend (Keras and tensorflow, Pytorch or Jax) is installed.
**Tensorflow Keras Layers**
These layers are subclasses of the :code:`tensorflow.keras.layers.Layer` class.
.. csv-table:: Custom Keras Layers
:file: ./keras_layers.csv
:width: 100%
:header-rows: 1
**PyTorch**
These layers are subclasses of the :code:`torch.nn.Module` class.
.. csv-table:: Custom PyTorch Layers
:file: ./torch_layers.csv
:width: 100%
:header-rows: 1
<file_sep>"""
qm9 dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
GDB9_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb9.tar.gz"
QM9_CSV_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/qm9.csv"
QM9_TASKS = [
"mu", "alpha", "homo", "lumo", "gap", "r2", "zpve", "cv", "u0", "u298",
"h298", "g298"
]
class _QM9Loader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "gdb9.sdf")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=GDB9_URL,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(
os.path.join(self.data_dir, "gdb9.tar.gz"), self.data_dir)
loader = dc.data.SDFLoader(tasks=self.tasks,
featurizer=self.featurizer,
sanitize=True)
return loader.create_dataset(dataset_file, shard_size=4096)
def load_qm9(
featurizer: Union[dc.feat.Featurizer, str] = dc.feat.CoulombMatrix(29),
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load QM9 dataset
QM9 is a comprehensive dataset that provides geometric, energetic,
electronic and thermodynamic properties for a subset of GDB-17
database, comprising 134 thousand stable organic molecules with up
to 9 heavy atoms. All molecules are modeled using density
functional theory (B3LYP/6-31G(2df,p) based DFT).
Random splitting is recommended for this dataset.
The source data contain:
- qm9.sdf: molecular structures
- qm9.sdf.csv: tables for molecular properties
- "mol_id" - Molecule ID (gdb9 index) mapping to the .sdf file
- "A" - Rotational constant (unit: GHz)
- "B" - Rotational constant (unit: GHz)
- "C" - Rotational constant (unit: GHz)
- "mu" - Dipole moment (unit: D)
- "alpha" - Isotropic polarizability (unit: Bohr^3)
- "homo" - Highest occupied molecular orbital energy (unit: Hartree)
- "lumo" - Lowest unoccupied molecular orbital energy (unit: Hartree)
- "gap" - Gap between HOMO and LUMO (unit: Hartree)
- "r2" - Electronic spatial extent (unit: Bohr^2)
- "zpve" - Zero point vibrational energy (unit: Hartree)
- "u0" - Internal energy at 0K (unit: Hartree)
- "u298" - Internal energy at 298.15K (unit: Hartree)
- "h298" - Enthalpy at 298.15K (unit: Hartree)
- "g298" - Free energy at 298.15K (unit: Hartree)
- "cv" - Heat capavity at 298.15K (unit: cal/(mol*K))
- "u0_atom" - Atomization energy at 0K (unit: kcal/mol)
- "u298_atom" - Atomization energy at 298.15K (unit: kcal/mol)
- "h298_atom" - Atomization enthalpy at 298.15K (unit: kcal/mol)
- "g298_atom" - Atomization free energy at 298.15K (unit: kcal/mol)
"u0_atom" ~ "g298_atom" (used in MoleculeNet) are calculated from the
differences between "u0" ~ "g298" and sum of reference energies of all
atoms in the molecules, as given in
https://figshare.com/articles/Atomref%3A_Reference_thermochemical_energies_of_H%2C_C%2C_N%2C_O%2C_F_atoms./1057643
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
Note
----
DeepChem 2.4.0 has turned on sanitization for this dataset by
default. For the QM9 dataset, this means that calling this
function will return 132480 compounds instead of 133885 in the
source dataset file. This appears to be due to valence
specification mismatches in the dataset that weren't caught in
earlier more lax versions of RDKit. Note that this may subtly
affect benchmarking results on this dataset.
References
----------
.. [1] Blum, <NAME>., and <NAME>. "970 million druglike small
molecules for virtual screening in the chemical universe database GDB-13."
Journal of the American Chemical Society 131.25 (2009): 8732-8733.
.. [2] Ramakrishnan, Raghunathan, et al. "Quantum chemistry structures and
properties of 134 kilo molecules." Scientific data 1 (2014): 140022.
"""
loader = _QM9Loader(featurizer, splitter, transformers, QM9_TASKS, data_dir,
save_dir, **kwargs)
return loader.load_dataset('qm9', reload)
<file_sep>"""
Feature calculations.
"""
import inspect
import logging
import numpy as np
from typing import Any, Dict, Iterable, Optional, Tuple, Union, cast
from deepchem.utils import get_print_threshold
from deepchem.utils.typing import PymatgenStructure
logger = logging.getLogger(__name__)
class Featurizer(object):
"""Abstract class for calculating a set of features for a datapoint.
This class is abstract and cannot be invoked directly. You'll
likely only interact with this class if you're a developer. In
that case, you might want to make a child class which
implements the `_featurize` method for calculating features for
a single datapoints if you'd like to make a featurizer for a
new datatype.
"""
def featurize(self,
datapoints: Iterable[Any],
log_every_n: int = 1000,
**kwargs) -> np.ndarray:
"""Calculate features for datapoints.
Parameters
----------
datapoints: Iterable[Any]
A sequence of objects that you'd like to featurize. Subclassses of
`Featurizer` should instantiate the `_featurize` method that featurizes
objects in the sequence.
log_every_n: int, default 1000
Logs featurization progress every `log_every_n` steps.
Returns
-------
np.ndarray
A numpy array containing a featurized representation of `datapoints`.
"""
datapoints = list(datapoints)
features = []
for i, point in enumerate(datapoints):
if i % log_every_n == 0:
logger.info("Featurizing datapoint %i" % i)
try:
features.append(self._featurize(point, **kwargs))
except:
logger.warning(
"Failed to featurize datapoint %d. Appending empty array")
features.append(np.array([]))
return np.asarray(features)
def __call__(self, datapoints: Iterable[Any], **kwargs):
"""Calculate features for datapoints.
`**kwargs` will get passed directly to `Featurizer.featurize`
Parameters
----------
datapoints: Iterable[Any]
Any blob of data you like. Subclasss should instantiate this.
"""
return self.featurize(datapoints, **kwargs)
def _featurize(self, datapoint: Any, **kwargs):
"""Calculate features for a single datapoint.
Parameters
----------
datapoint: Any
Any blob of data you like. Subclass should instantiate this.
"""
raise NotImplementedError('Featurizer is not defined.')
def __repr__(self) -> str:
"""Convert self to repr representation.
Returns
-------
str
The string represents the class.
Examples
--------
>>> import deepchem as dc
>>> dc.feat.CircularFingerprint(size=1024, radius=4)
CircularFingerprint[radius=4, size=1024, chiral=False, bonds=True, features=False, sparse=False, smiles=False, is_counts_based=False]
>>> dc.feat.CGCNNFeaturizer()
CGCNNFeaturizer[radius=8.0, max_neighbors=12, step=0.2]
"""
args_spec = inspect.getfullargspec(self.__init__) # type: ignore
args_names = [arg for arg in args_spec.args if arg != 'self']
args_info = ''
for arg_name in args_names:
value = self.__dict__[arg_name]
# for str
if isinstance(value, str):
value = "'" + value + "'"
# for list
if isinstance(value, list):
threshold = get_print_threshold()
value = np.array2string(np.array(value), threshold=threshold)
args_info += arg_name + '=' + str(value) + ', '
return self.__class__.__name__ + '[' + args_info[:-2] + ']'
def __str__(self) -> str:
"""Convert self to str representation.
Returns
-------
str
The string represents the class.
Examples
--------
>>> import deepchem as dc
>>> str(dc.feat.CircularFingerprint(size=1024, radius=4))
'CircularFingerprint_radius_4_size_1024'
>>> str(dc.feat.CGCNNFeaturizer())
'CGCNNFeaturizer'
"""
args_spec = inspect.getfullargspec(self.__init__) # type: ignore
args_names = [arg for arg in args_spec.args if arg != 'self']
args_num = len(args_names)
args_default_values = [None for _ in range(args_num)]
if args_spec.defaults is not None:
defaults = list(args_spec.defaults)
args_default_values[-len(defaults):] = defaults
override_args_info = ''
for arg_name, default in zip(args_names, args_default_values):
if arg_name in self.__dict__:
arg_value = self.__dict__[arg_name]
# validation
# skip list
if isinstance(arg_value, list):
continue
if isinstance(arg_value, str):
# skip path string
if "\\/." in arg_value or "/" in arg_value or '.' in arg_value:
continue
# main logic
if default != arg_value:
override_args_info += '_' + arg_name + '_' + str(arg_value)
return self.__class__.__name__ + override_args_info
class ComplexFeaturizer(Featurizer):
""""
Abstract class for calculating features for mol/protein complexes.
"""
def featurize(self,
datapoints: Optional[Iterable[Tuple[str, str]]] = None,
log_every_n: int = 100,
**kwargs) -> np.ndarray:
"""
Calculate features for mol/protein complexes.
Parameters
----------
datapoints: Iterable[Tuple[str, str]]
List of filenames (PDB, SDF, etc.) for ligand molecules and proteins.
Each element should be a tuple of the form (ligand_filename,
protein_filename).
Returns
-------
features: np.ndarray
Array of features
"""
if 'complexes' in kwargs:
datapoints = kwargs.get("complexes")
raise DeprecationWarning(
'Complexes is being phased out as a parameter, please pass "datapoints" instead.'
)
if not isinstance(datapoints, Iterable):
datapoints = [cast(Tuple[str, str], datapoints)]
features, failures, successes = [], [], []
for idx, point in enumerate(datapoints):
if idx % log_every_n == 0:
logger.info("Featurizing datapoint %i" % idx)
try:
features.append(self._featurize(point, **kwargs))
successes.append(idx)
except:
logger.warning(
"Failed to featurize datapoint %i. Appending empty array." %
idx)
features.append(np.zeros(1))
failures.append(idx)
# Find a successful featurization
try:
i = np.argmax([f.shape[0] for f in features])
dtype = features[i].dtype
shape = features[i].shape
dummy_array = np.zeros(shape, dtype=dtype)
except AttributeError:
dummy_array = features[successes[0]]
# Replace failed featurizations with appropriate array
for idx in failures:
features[idx] = dummy_array
return np.asarray(features)
def _featurize(self, datapoint: Optional[Tuple[str, str]] = None, **kwargs):
"""
Calculate features for single mol/protein complex.
Parameters
----------
complex: Tuple[str, str]
Filenames for molecule and protein.
"""
raise NotImplementedError('Featurizer is not defined.')
class MolecularFeaturizer(Featurizer):
"""Abstract class for calculating a set of features for a
molecule.
The defining feature of a `MolecularFeaturizer` is that it
uses SMILES strings and RDKit molecule objects to represent
small molecules. All other featurizers which are subclasses of
this class should plan to process input which comes as smiles
strings or RDKit molecules.
Child classes need to implement the _featurize method for
calculating features for a single molecule.
Note
----
The subclasses of this class require RDKit to be installed.
"""
def __init__(self, use_original_atoms_order=False):
"""
Parameters
----------
use_original_atoms_order: bool, default False
Whether to use original atom ordering or canonical ordering (default)
"""
self.use_original_atoms_order = use_original_atoms_order
def featurize(self, datapoints, log_every_n=1000, **kwargs) -> np.ndarray:
"""Calculate features for molecules.
Parameters
----------
datapoints: rdkit.Chem.rdchem.Mol / SMILES string / iterable
RDKit Mol, or SMILES string or iterable sequence of RDKit mols/SMILES
strings.
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing a featurized representation of `datapoints`.
"""
try:
from rdkit import Chem
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
from rdkit.Chem.rdchem import Mol
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if 'molecules' in kwargs:
datapoints = kwargs.get("molecules")
raise DeprecationWarning(
'Molecules is being phased out as a parameter, please pass "datapoints" instead.'
)
# Special case handling of single molecule
if isinstance(datapoints, str) or isinstance(datapoints, Mol):
datapoints = [datapoints]
else:
# Convert iterables to list
datapoints = list(datapoints)
features: list = []
for i, mol in enumerate(datapoints):
if i % log_every_n == 0:
logger.info("Featurizing datapoint %i" % i)
try:
if isinstance(mol, str):
# condition if the original atom order is required
if hasattr(self, 'use_original_atoms_order'
) and self.use_original_atoms_order:
# mol must be a RDKit Mol object, so parse a SMILES
mol = Chem.MolFromSmiles(mol)
else:
# mol must be a RDKit Mol object, so parse a SMILES
mol = Chem.MolFromSmiles(mol)
# SMILES is unique, so set a canonical order of atoms
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
kwargs_per_datapoint = {}
for key in kwargs.keys():
kwargs_per_datapoint[key] = kwargs[key][i]
features.append(self._featurize(mol, **kwargs_per_datapoint))
except Exception as e:
if isinstance(mol, Chem.rdchem.Mol):
mol = Chem.MolToSmiles(mol)
logger.warning(
"Failed to featurize datapoint %d, %s. Appending empty array",
i, mol)
logger.warning("Exception message: {}".format(e))
features.append(np.array([]))
try:
return np.asarray(features)
except ValueError as e:
logger.warning("Exception message: {}".format(e))
return np.asarray(features, dtype=object)
class MaterialStructureFeaturizer(Featurizer):
"""
Abstract class for calculating a set of features for an
inorganic crystal structure.
The defining feature of a `MaterialStructureFeaturizer` is that it
operates on 3D crystal structures with periodic boundary conditions.
Inorganic crystal structures are represented by Pymatgen structure
objects. Featurizers for inorganic crystal structures that are subclasses of
this class should plan to process input which comes as pymatgen
structure objects.
This class is abstract and cannot be invoked directly. You'll
likely only interact with this class if you're a developer. Child
classes need to implement the _featurize method for calculating
features for a single crystal structure.
Note
----
Some subclasses of this class will require pymatgen and matminer to be
installed.
"""
def featurize(self,
datapoints: Optional[Iterable[Union[Dict[
str, Any], PymatgenStructure]]] = None,
log_every_n: int = 1000,
**kwargs) -> np.ndarray:
"""Calculate features for crystal structures.
Parameters
----------
datapoints: Iterable[Union[Dict, pymatgen.core.Structure]]
Iterable sequence of pymatgen structure dictionaries
or pymatgen.core.Structure. Please confirm the dictionary representations
of pymatgen.core.Structure from https://pymatgen.org/pymatgen.core.structure.html.
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing a featurized representation of
`datapoints`.
"""
try:
from pymatgen.core import Structure
except ModuleNotFoundError:
raise ImportError("This class requires pymatgen to be installed.")
if 'structures' in kwargs:
datapoints = kwargs.get("structures")
raise DeprecationWarning(
'Structures is being phased out as a parameter, please pass "datapoints" instead.'
)
if not isinstance(datapoints, Iterable):
datapoints = [
cast(Union[Dict[str, Any], PymatgenStructure], datapoints)
]
datapoints = list(datapoints)
features = []
for idx, structure in enumerate(datapoints):
if idx % log_every_n == 0:
logger.info("Featurizing datapoint %i" % idx)
try:
if isinstance(structure, Dict):
structure = Structure.from_dict(structure)
features.append(self._featurize(structure, **kwargs))
except:
logger.warning(
"Failed to featurize datapoint %i. Appending empty array" %
idx)
features.append(np.array([]))
return np.asarray(features)
class MaterialCompositionFeaturizer(Featurizer):
"""
Abstract class for calculating a set of features for an
inorganic crystal composition.
The defining feature of a `MaterialCompositionFeaturizer` is that it
operates on 3D crystal chemical compositions.
Inorganic crystal compositions are represented by Pymatgen composition
objects. Featurizers for inorganic crystal compositions that are
subclasses of this class should plan to process input which comes as
Pymatgen composition objects.
This class is abstract and cannot be invoked directly. You'll
likely only interact with this class if you're a developer. Child
classes need to implement the _featurize method for calculating
features for a single crystal composition.
Note
----
Some subclasses of this class will require pymatgen and matminer to be
installed.
"""
def featurize(self,
datapoints: Optional[Iterable[str]] = None,
log_every_n: int = 1000,
**kwargs) -> np.ndarray:
"""Calculate features for crystal compositions.
Parameters
----------
datapoints: Iterable[str]
Iterable sequence of composition strings, e.g. "MoS2".
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing a featurized representation of
`compositions`.
"""
try:
from pymatgen.core import Composition
except ModuleNotFoundError:
raise ImportError("This class requires pymatgen to be installed.")
if 'compositions' in kwargs and datapoints is None:
datapoints = kwargs.get("compositions")
raise DeprecationWarning(
'Compositions is being phased out as a parameter, please pass "datapoints" instead.'
)
if not isinstance(datapoints, Iterable):
datapoints = [cast(str, datapoints)]
datapoints = list(datapoints)
features = []
for idx, composition in enumerate(datapoints):
if idx % log_every_n == 0:
logger.info("Featurizing datapoint %i" % idx)
try:
c = Composition(composition)
features.append(self._featurize(c, **kwargs))
except:
logger.warning(
"Failed to featurize datapoint %i. Appending empty array" %
idx)
features.append(np.array([]))
return np.asarray(features)
class UserDefinedFeaturizer(Featurizer):
"""Directs usage of user-computed featurizations."""
def __init__(self, feature_fields):
"""Creates user-defined-featurizer."""
self.feature_fields = feature_fields
class DummyFeaturizer(Featurizer):
"""Class that implements a no-op featurization.
This is useful when the raw dataset has to be used without featurizing the
examples. The Molnet loader requires a featurizer input and such datasets
can be used in their original form by passing the raw featurizer.
Examples
--------
>>> import deepchem as dc
>>> smi_map = [["N#C[S-].O=C(CBr)c1ccc(C(F)(F)F)cc1>CCO.[K+]", "N#CSCC(=O)c1ccc(C(F)(F)F)cc1"], ["C1COCCN1.FCC(Br)c1cccc(Br)n1>CCN(C(C)C)C(C)C.CN(C)C=O.O", "FCC(c1cccc(Br)n1)N1CCOCC1"]]
>>> Featurizer = dc.feat.DummyFeaturizer()
>>> smi_feat = Featurizer.featurize(smi_map)
>>> smi_feat
array([['N#C[S-].O=C(CBr)c1ccc(C(F)(F)F)cc1>CCO.[K+]',
'N#CSCC(=O)c1ccc(C(F)(F)F)cc1'],
['C1COCCN1.FCC(Br)c1cccc(Br)n1>CCN(C(C)C)C(C)C.CN(C)C=O.O',
'FCC(c1cccc(Br)n1)N1CCOCC1']], dtype='<U55')
"""
def featurize(self,
datapoints: Iterable[Any],
log_every_n: int = 1000,
**kwargs) -> np.ndarray:
"""Passes through dataset, and returns the datapoint.
Parameters
----
datapoints: Iterable[Any]
A sequence of objects that you'd like to featurize.
Returns
----
datapoints: np.ndarray
A numpy array containing a featurized representation of
the datapoints.
"""
return np.asarray(datapoints)
<file_sep>import os
import unittest
import numpy as np
from deepchem.feat import create_char_to_idx, SmilesToSeq, SmilesToImage
class TestSmilesToSeq(unittest.TestCase):
"""Tests for SmilesToSeq featurizers."""
def setUp(self):
"""Setup."""
pad_len = 5
max_len = 35
filename = os.path.join(os.path.dirname(__file__), "data",
"chembl_25_small.csv")
char_to_idx = create_char_to_idx(filename, max_len=max_len)
self.feat = SmilesToSeq(char_to_idx=char_to_idx,
max_len=max_len,
pad_len=pad_len)
def test_smiles_to_seq_featurize(self):
"""Test SmilesToSeq featurization."""
smiles = ["Cn1c(=O)c2c(ncn2C)n(C)c1=O", "CC(=O)N1CN(C(C)=O)C(O)C1O"]
expected_seq_len = self.feat.max_len + 2 * self.feat.pad_len
features = self.feat.featurize(smiles)
assert features.shape[0] == len(smiles)
assert features.shape[-1] == expected_seq_len
def test_reconstruct_from_seq(self):
"""Test SMILES reconstruction from features."""
smiles = ["Cn1c(=O)c2c(ncn2C)n(C)c1=O"]
features = self.feat.featurize(smiles)
# not support array style inputs
reconstructed_smile = self.feat.smiles_from_seq(features[0])
assert smiles[0] == reconstructed_smile
class TestSmilesToImage(unittest.TestCase):
"""Tests for SmilesToImage featurizers."""
def setUp(self):
"""Setup."""
self.smiles = [
"Cn1c(=O)c2c(ncn2C)n(C)c1=O", "CC(=O)N1CN(C(C)=O)C(O)C1O"
]
self.long_molecule_smiles = [
"CCCCCCCCCCCCCCCCCCCC(=O)OCCCNC(=O)c1ccccc1SSc1ccccc1C(=O)NCCCOC(=O)CCCCCCCCCCCCCCCCCCC"
]
def test_smiles_to_image(self):
"""Test default SmilesToImage"""
featurizer = SmilesToImage()
features = featurizer.featurize(self.smiles)
assert features.shape == (2, 80, 80, 1)
def test_smiles_to_image_with_res(self):
"""Test SmilesToImage with res"""
featurizer = SmilesToImage()
base_features = featurizer.featurize(self.smiles)
featurizer = SmilesToImage(res=0.6)
features = featurizer.featurize(self.smiles)
assert features.shape == (2, 80, 80, 1)
assert not np.allclose(base_features, features)
def test_smiles_to_image_with_image_size(self):
"""Test SmilesToImage with image_size"""
featurizer = SmilesToImage(img_size=100)
features = featurizer.featurize(self.smiles)
assert features.shape == (2, 100, 100, 1)
def test_smiles_to_image_with_max_len(self):
"""Test SmilesToImage with max_len"""
smiles_length = [len(s) for s in self.smiles]
assert smiles_length == [26, 25]
featurizer = SmilesToImage(max_len=25)
features = featurizer.featurize(self.smiles)
assert features[0].shape == (0,)
assert features[1].shape == (80, 80, 1)
def test_smiles_to_image_with_img_spec(self):
"""Test SmilesToImage with img_spec"""
featurizer = SmilesToImage()
base_features = featurizer.featurize(self.smiles)
featurizer = SmilesToImage(img_spec='engd')
features = featurizer.featurize(self.smiles)
assert features.shape == (2, 80, 80, 4)
assert not np.allclose(base_features, features)
def test_smiles_to_image_long_molecule(self):
"""Test SmilesToImage for a molecule which does not fit the image"""
featurizer = SmilesToImage(img_size=80,
res=0.5,
max_len=250,
img_spec="std")
features = featurizer.featurize(self.long_molecule_smiles)
assert features.shape == (1, 0)
<file_sep>"""
Script that trains graph-conv models on clintox dataset.
@author <NAME>
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from deepchem.models import GraphConvModel
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_clintox
# Load clintox dataset
clintox_tasks, clintox_datasets, transformers = load_clintox(
featurizer='GraphConv', split='random')
train_dataset, valid_dataset, test_dataset = clintox_datasets
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
# Do setup required for tf/keras models
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 50
model = GraphConvModel(
len(clintox_tasks), batch_size=batch_size, mode='classification')
# Fit trained model
model.fit(train_dataset, nb_epoch=10)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Script that trains multitask models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
import time
from deepchem.molnet import load_tox21
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
n_features = 1024
tox21_tasks, tox21_datasets, transformers = load_tox21()
train_dataset, valid_dataset, test_dataset = tox21_datasets
K = 10
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
transformers = [dc.trans.IRVTransformer(K, len(tox21_tasks), train_dataset)]
for transformer in transformers:
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
model = dc.models.TensorflowMultitaskIRVClassifier(
len(tox21_tasks), K=K, learning_rate=0.001, penalty=0.05, batch_size=32)
# Fit trained model
model.fit(train_dataset, nb_epoch=10)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Gathers all splitters in one place for convenient imports
"""
# flake8: noqa
# basic splitter
from deepchem.splits.splitters import Splitter
from deepchem.splits.splitters import RandomSplitter
from deepchem.splits.splitters import RandomStratifiedSplitter
from deepchem.splits.splitters import RandomGroupSplitter
from deepchem.splits.splitters import SingletaskStratifiedSplitter
from deepchem.splits.splitters import IndexSplitter
from deepchem.splits.splitters import SpecifiedSplitter
# molecule splitter
from deepchem.splits.splitters import ScaffoldSplitter
from deepchem.splits.splitters import MolecularWeightSplitter
from deepchem.splits.splitters import MaxMinSplitter
from deepchem.splits.splitters import FingerprintSplitter
from deepchem.splits.splitters import ButinaSplitter
# other splitter
from deepchem.splits.task_splitter import merge_fold_datasets
from deepchem.splits.task_splitter import TaskSplitter
#################################################################
# Removed API
#################################################################
import logging
logger = logging.getLogger(__name__)
class IndiceSplitter:
def __init__(self, valid_indices=None, test_indices=None):
raise ImportError(
"IndiceSplitter was renamed to SpecifiedSplitter.\n"
"Please use SpecifiedSplitter instead of IndiceSplitter.")
<file_sep># flake8: noqa
import numpy as np
import deepchem as dc
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.feat.mol_graphs import ConvMol, WeaveMol
from deepchem.data import DiskDataset
import logging
from typing import Optional, List, Tuple, Union, Iterable
from deepchem.utils.typing import RDKitMol, RDKitAtom
from deepchem.utils.molecule_feature_utils import one_hot_encode
def one_of_k_encoding(x, allowable_set):
"""Encodes elements of a provided set as integers.
Parameters
----------
x: object
Must be present in `allowable_set`.
allowable_set: list
List of allowable quantities.
Example
-------
>>> import deepchem as dc
>>> dc.feat.graph_features.one_of_k_encoding("a", ["a", "b", "c"])
[True, False, False]
Raises
------
`ValueError` if `x` is not in `allowable_set`.
"""
if x not in allowable_set:
raise ValueError("input {0} not in allowable set{1}:".format(
x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element.
Unlike `one_of_k_encoding`, if `x` is not in `allowable_set`, this method
pretends that `x` is the last element of `allowable_set`.
Parameters
----------
x: object
Must be present in `allowable_set`.
allowable_set: list
List of allowable quantities.
Examples
--------
>>> dc.feat.graph_features.one_of_k_encoding_unk("s", ["a", "b", "c"])
[False, False, True]
"""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def get_intervals(l):
"""For list of lists, gets the cumulative products of the lengths
Note that we add 1 to the lengths of all lists (to avoid an empty list
propagating a 0).
Parameters
----------
l: list of lists
Returns the cumulative product of these lengths.
Examples
--------
>>> dc.feat.graph_features.get_intervals([[1], [1, 2], [1, 2, 3]])
[1, 3, 12]
>>> dc.feat.graph_features.get_intervals([[1], [], [1, 2], [1, 2, 3]])
[1, 1, 3, 12]
"""
intervals = len(l) * [0]
# Initalize with 1
intervals[0] = 1
for k in range(1, len(l)):
intervals[k] = (len(l[k]) + 1) * intervals[k - 1]
return intervals
def safe_index(l, e):
"""Gets the index of e in l, providing an index of len(l) if not found
Parameters
----------
l: list
List of values
e: object
Object to check whether `e` is in `l`
Examples
--------
>>> dc.feat.graph_features.safe_index([1, 2, 3], 1)
0
>>> dc.feat.graph_features.safe_index([1, 2, 3], 7)
3
"""
try:
return l.index(e)
except ValueError:
return len(l)
class GraphConvConstants(object):
"""This class defines a collection of constants which are useful for graph convolutions on molecules."""
possible_atom_list = [
'C', 'N', 'O', 'S', 'F', 'P', 'Cl', 'Mg', 'Na', 'Br', 'Fe', 'Ca', 'Cu',
'Mc', 'Pd', 'Pb', 'K', 'I', 'Al', 'Ni', 'Mn'
]
"""Allowed Numbers of Hydrogens"""
possible_numH_list = [0, 1, 2, 3, 4]
"""Allowed Valences for Atoms"""
possible_valence_list = [0, 1, 2, 3, 4, 5, 6]
"""Allowed Formal Charges for Atoms"""
possible_formal_charge_list = [-3, -2, -1, 0, 1, 2, 3]
"""This is a placeholder for documentation. These will be replaced with corresponding values of the rdkit HybridizationType"""
possible_hybridization_list = ["SP", "SP2", "SP3", "SP3D", "SP3D2"]
"""Allowed number of radical electrons."""
possible_number_radical_e_list = [0, 1, 2]
"""Allowed types of Chirality"""
possible_chirality_list = ['R', 'S']
"""The set of all values allowed."""
reference_lists = [
possible_atom_list, possible_numH_list, possible_valence_list,
possible_formal_charge_list, possible_number_radical_e_list,
possible_hybridization_list, possible_chirality_list
]
"""The number of different values that can be taken. See `get_intervals()`"""
intervals = get_intervals(reference_lists)
"""Possible stereochemistry. We use E-Z notation for stereochemistry
https://en.wikipedia.org/wiki/E%E2%80%93Z_notation"""
possible_bond_stereo = ["STEREONONE", "STEREOANY", "STEREOZ", "STEREOE"]
"""Number of different bond types not counting stereochemistry."""
bond_fdim_base = 6
def get_feature_list(atom):
"""Returns a list of possible features for this atom.
Parameters
----------
atom: RDKit.Chem.rdchem.Atom
Atom to get features for
Examples
--------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles("C")
>>> atom = mol.GetAtoms()[0]
>>> features = dc.feat.graph_features.get_feature_list(atom)
>>> type(features)
<class 'list'>
>>> len(features)
6
Note
----
This method requires RDKit to be installed.
Returns
-------
features: list
List of length 6. The i-th value in this list provides the index of the
atom in the corresponding feature value list. The 6 feature values lists
for this function are `[GraphConvConstants.possible_atom_list,
GraphConvConstants.possible_numH_list,
GraphConvConstants.possible_valence_list,
GraphConvConstants.possible_formal_charge_list,
GraphConvConstants.possible_num_radical_e_list]`.
"""
possible_atom_list = GraphConvConstants.possible_atom_list
possible_numH_list = GraphConvConstants.possible_numH_list
possible_valence_list = GraphConvConstants.possible_valence_list
possible_formal_charge_list = GraphConvConstants.possible_formal_charge_list
possible_number_radical_e_list = GraphConvConstants.possible_number_radical_e_list
possible_hybridization_list = GraphConvConstants.possible_hybridization_list
# Replace the hybridization
from rdkit import Chem
#global possible_hybridization_list
possible_hybridization_list = [
Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2
]
features = 6 * [0]
features[0] = safe_index(possible_atom_list, atom.GetSymbol())
features[1] = safe_index(possible_numH_list, atom.GetTotalNumHs())
features[2] = safe_index(possible_valence_list, atom.GetImplicitValence())
features[3] = safe_index(possible_formal_charge_list,
atom.GetFormalCharge())
features[4] = safe_index(possible_number_radical_e_list,
atom.GetNumRadicalElectrons())
features[5] = safe_index(possible_hybridization_list,
atom.GetHybridization())
return features
def features_to_id(features, intervals):
"""Convert list of features into index using spacings provided in intervals
Parameters
----------
features: list
List of features as returned by `get_feature_list()`
intervals: list
List of intervals as returned by `get_intervals()`
Returns
-------
id: int
The index in a feature vector given by the given set of features.
"""
id = 0
for k in range(len(intervals)):
id += features[k] * intervals[k]
# Allow 0 index to correspond to null molecule 1
id = id + 1
return id
def id_to_features(id, intervals):
"""Given an index in a feature vector, return the original set of features.
Parameters
----------
id: int
The index in a feature vector given by the given set of features.
intervals: list
List of intervals as returned by `get_intervals()`
Returns
-------
features: list
List of features as returned by `get_feature_list()`
"""
features = 6 * [0]
# Correct for null
id -= 1
for k in range(0, 6 - 1):
# print(6-k-1, id)
features[6 - k - 1] = id // intervals[6 - k - 1]
id -= features[6 - k - 1] * intervals[6 - k - 1]
# Correct for last one
features[0] = id
return features
def atom_to_id(atom):
"""Return a unique id corresponding to the atom type
Parameters
----------
atom: RDKit.Chem.rdchem.Atom
Atom to convert to ids.
Returns
-------
id: int
The index in a feature vector given by the given set of features.
"""
features = get_feature_list(atom)
return features_to_id(features, intervals)
def atom_features(atom,
bool_id_feat=False,
explicit_H=False,
use_chirality=False):
"""Helper method used to compute per-atom feature vectors.
Many different featurization methods compute per-atom features such as ConvMolFeaturizer, WeaveFeaturizer. This method computes such features.
Parameters
----------
atom: RDKit.Chem.rdchem.Atom
Atom to compute features on.
bool_id_feat: bool, optional
Return an array of unique identifiers corresponding to atom type.
explicit_H: bool, optional
If true, model hydrogens explicitly
use_chirality: bool, optional
If true, use chirality information.
Returns
-------
features: np.ndarray
An array of per-atom features.
Examples
--------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('CCC')
>>> atom = mol.GetAtoms()[0]
>>> features = dc.feat.graph_features.atom_features(atom)
>>> type(features)
<class 'numpy.ndarray'>
>>> features.shape
(75,)
"""
if bool_id_feat:
return np.array([atom_to_id(atom)])
else:
from rdkit import Chem
results = one_of_k_encoding_unk(
atom.GetSymbol(),
[
'C',
'N',
'O',
'S',
'F',
'Si',
'P',
'Cl',
'Br',
'Mg',
'Na',
'Ca',
'Fe',
'As',
'Al',
'I',
'B',
'V',
'K',
'Tl',
'Yb',
'Sb',
'Sn',
'Ag',
'Pd',
'Co',
'Se',
'Ti',
'Zn',
'H', # H?
'Li',
'Ge',
'Cu',
'Au',
'Ni',
'Cd',
'In',
'Mn',
'Zr',
'Cr',
'Pt',
'Hg',
'Pb',
'Unknown'
]) + one_of_k_encoding(atom.GetDegree(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + \
one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6]) + \
[atom.GetFormalCharge(), atom.GetNumRadicalElectrons()] + \
one_of_k_encoding_unk(atom.GetHybridization(), [
Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.
SP3D, Chem.rdchem.HybridizationType.SP3D2
]) + [atom.GetIsAromatic()]
# In case of explicit hydrogen(QM8, QM9), avoid calling `GetTotalNumHs`
if not explicit_H:
results = results + one_of_k_encoding_unk(atom.GetTotalNumHs(),
[0, 1, 2, 3, 4])
if use_chirality:
try:
results = results + one_of_k_encoding_unk(
atom.GetProp('_CIPCode'),
['R', 'S']) + [atom.HasProp('_ChiralityPossible')]
except:
results = results + [False, False
] + [atom.HasProp('_ChiralityPossible')]
return np.array(results)
def bond_features(bond, use_chirality=False, use_extended_chirality=False):
"""Helper method used to compute bond feature vectors.
Many different featurization methods compute bond features
such as WeaveFeaturizer. This method computes such features.
Parameters
----------
bond: rdkit.Chem.rdchem.Bond
Bond to compute features on.
use_chirality: bool, optional
If true, use chirality information.
use_extended_chirality: bool, optional
If true, use chirality information with upto 6 different types.
Note
----
This method requires RDKit to be installed.
Returns
-------
bond_feats: np.ndarray
Array of bond features. This is a 1-D array of length 6 if `use_chirality`
is `False` else of length 10 with chirality encoded.
bond_feats: Sequence[Union[bool, int, float]]
List of bond features returned if `use_extended_chirality` is `True`.
Examples
--------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('CCC')
>>> bond = mol.GetBonds()[0]
>>> bond_features = dc.feat.graph_features.bond_features(bond)
>>> type(bond_features)
<class 'numpy.ndarray'>
>>> bond_features.shape
(6,)
Note
----
This method requires RDKit to be installed.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This method requires RDKit to be installed.")
bt = bond.GetBondType()
bond_feats = [
bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE,
bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC,
bond.GetIsConjugated(),
bond.IsInRing()
]
if use_chirality:
bond_feats = bond_feats + one_of_k_encoding_unk(
str(bond.GetStereo()), GraphConvConstants.possible_bond_stereo)
if use_extended_chirality:
stereo = one_hot_encode(int(bond.GetStereo()), list(range(6)), True)
stereo = [int(feature) for feature in stereo]
bond_feats = bond_feats + stereo
return bond_feats
return np.array(bond_feats)
def max_pair_distance_pairs(mol: RDKitMol,
max_pair_distance: Optional[int] = None
) -> np.ndarray:
"""Helper method which finds atom pairs within max_pair_distance graph distance.
This helper method is used to find atoms which are within max_pair_distance
graph_distance of one another. This is done by using the fact that the
powers of an adjacency matrix encode path connectivity information. In
particular, if `adj` is the adjacency matrix, then `adj**k` has a nonzero
value at `(i, j)` if and only if there exists a path of graph distance `k`
between `i` and `j`. To find all atoms within `max_pair_distance` of each
other, we can compute the adjacency matrix powers `[adj, adj**2,
...,adj**max_pair_distance]` and find pairs which are nonzero in any of
these matrices. Since adjacency matrices and their powers are positive
numbers, this is simply the nonzero elements of `adj + adj**2 + ... +
adj**max_pair_distance`.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit molecules
max_pair_distance: Optional[int], (default None)
This value can be a positive integer or None. This
parameter determines the maximum graph distance at which pair
features are computed. For example, if `max_pair_distance==2`,
then pair features are computed only for atoms at most graph
distance 2 apart. If `max_pair_distance` is `None`, all pairs are
considered (effectively infinite `max_pair_distance`)
Examples
--------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('CCC')
>>> features = dc.feat.graph_features.max_pair_distance_pairs(mol, 1)
>>> type(features)
<class 'numpy.ndarray'>
>>> features.shape # (2, num_pairs)
(2, 7)
Returns
-------
np.ndarray
Of shape `(2, num_pairs)` where `num_pairs` is the total number of pairs
within `max_pair_distance` of one another.
"""
from rdkit import Chem
from rdkit.Chem import rdmolops
N = len(mol.GetAtoms())
if (max_pair_distance is None or max_pair_distance >= N):
max_distance = N
elif max_pair_distance is not None and max_pair_distance <= 0:
raise ValueError(
"max_pair_distance must either be a positive integer or None")
elif max_pair_distance is not None:
max_distance = max_pair_distance
adj = rdmolops.GetAdjacencyMatrix(mol)
# Handle edge case of self-pairs (i, i)
sum_adj = np.eye(N)
for i in range(max_distance):
# Increment by 1 since we don't want 0-indexing
power = i + 1
sum_adj += np.linalg.matrix_power(adj, power)
nonzero_locs = np.where(sum_adj != 0)
num_pairs = len(nonzero_locs[0])
# This creates a matrix of shape (2, num_pairs)
pair_edges = np.reshape(np.array(list(zip(nonzero_locs))), (2, num_pairs))
return pair_edges
def pair_features(
mol: RDKitMol,
bond_features_map: dict,
bond_adj_list: List,
bt_len: int = 6,
graph_distance: bool = True,
max_pair_distance: Optional[int] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""Helper method used to compute atom pair feature vectors.
Many different featurization methods compute atom pair features
such as WeaveFeaturizer. Note that atom pair features could be
for pairs of atoms which aren't necessarily bonded to one
another.
Parameters
----------
mol: RDKit Mol
Molecule to compute features on.
bond_features_map: dict
Dictionary that maps pairs of atom ids (say `(2, 3)` for a bond between
atoms 2 and 3) to the features for the bond between them.
bond_adj_list: list of lists
`bond_adj_list[i]` is a list of the atom indices that atom `i` shares a
bond with . This list is symmetrical so if `j in bond_adj_list[i]` then `i
in bond_adj_list[j]`.
bt_len: int, optional (default 6)
The number of different bond types to consider.
graph_distance: bool, optional (default True)
If true, use graph distance between molecules. Else use euclidean
distance. The specified `mol` must have a conformer. Atomic
positions will be retrieved by calling `mol.getConformer(0)`.
max_pair_distance: Optional[int], (default None)
This value can be a positive integer or None. This
parameter determines the maximum graph distance at which pair
features are computed. For example, if `max_pair_distance==2`,
then pair features are computed only for atoms at most graph
distance 2 apart. If `max_pair_distance` is `None`, all pairs are
considered (effectively infinite `max_pair_distance`)
Note
----
This method requires RDKit to be installed.
Returns
-------
features: np.ndarray
Of shape `(N_edges, bt_len + max_distance + 1)`. This is the array
of pairwise features for all atom pairs, where N_edges is the
number of edges within max_pair_distance of one another in this
molecules.
pair_edges: np.ndarray
Of shape `(2, num_pairs)` where `num_pairs` is the total number of
pairs within `max_pair_distance` of one another.
"""
if graph_distance:
max_distance = 7
else:
max_distance = 1
N = mol.GetNumAtoms()
pair_edges = max_pair_distance_pairs(mol, max_pair_distance)
num_pairs = pair_edges.shape[1]
N_edges = pair_edges.shape[1]
features = np.zeros((N_edges, bt_len + max_distance + 1))
# Get mapping
mapping = {}
for n in range(N_edges):
a1, a2 = pair_edges[:, n]
mapping[(int(a1), int(a2))] = n
num_atoms = mol.GetNumAtoms()
rings = mol.GetRingInfo().AtomRings()
for a1 in range(num_atoms):
for a2 in bond_adj_list[a1]:
# first `bt_len` features are bond features(if applicable)
if (int(a1), int(a2)) not in mapping:
raise ValueError(
"Malformed molecule with bonds not in specified graph distance."
)
else:
n = mapping[(int(a1), int(a2))]
features[n, :bt_len] = np.asarray(bond_features_map[tuple(
sorted((a1, a2)))],
dtype=float)
for ring in rings:
if a1 in ring:
for a2 in ring:
if (int(a1), int(a2)) not in mapping:
# For ring pairs outside max pairs distance continue
continue
else:
n = mapping[(int(a1), int(a2))]
# `bt_len`-th feature is if the pair of atoms are in the same ring
if a2 == a1:
features[n, bt_len] = 0
else:
features[n, bt_len] = 1
# graph distance between two atoms
if graph_distance:
# distance is a matrix of 1-hot encoded distances for all atoms
distance = find_distance(a1,
num_atoms,
bond_adj_list,
max_distance=max_distance)
for a2 in range(num_atoms):
if (int(a1), int(a2)) not in mapping:
# For ring pairs outside max pairs distance continue
continue
else:
n = mapping[(int(a1), int(a2))]
features[n, bt_len + 1:] = distance[a2]
# Euclidean distance between atoms
if not graph_distance:
coords = np.zeros((N, 3))
for atom in range(N):
pos = mol.GetConformer(0).GetAtomPosition(atom)
coords[atom, :] = [pos.x, pos.y, pos.z]
features[:, :, -1] = np.sqrt(np.sum(np.square(
np.stack([coords] * N, axis=1) - \
np.stack([coords] * N, axis=0)), axis=2))
return features, pair_edges
def find_distance(a1: RDKitAtom,
num_atoms: int,
bond_adj_list,
max_distance=7) -> np.ndarray:
"""Computes distances from provided atom.
Parameters
----------
a1: RDKit atom
The source atom to compute distances from.
num_atoms: int
The total number of atoms.
bond_adj_list: list of lists
`bond_adj_list[i]` is a list of the atom indices that atom `i` shares a
bond with. This list is symmetrical so if `j in bond_adj_list[i]` then `i in
bond_adj_list[j]`.
max_distance: int, optional (default 7)
The max distance to search.
Returns
-------
distances: np.ndarray
Of shape `(num_atoms, max_distance)`. Provides a one-hot encoding of the
distances. That is, `distances[i]` is a one-hot encoding of the distance
from `a1` to atom `i`.
"""
distance = np.zeros((num_atoms, max_distance))
radial = 0
# atoms `radial` bonds away from `a1`
adj_list = set(bond_adj_list[a1])
# atoms less than `radial` bonds away
all_list = set([a1])
while radial < max_distance:
distance[list(adj_list), radial] = 1
all_list.update(adj_list)
# find atoms `radial`+1 bonds away
next_adj = set()
for adj in adj_list:
next_adj.update(bond_adj_list[adj])
adj_list = next_adj - all_list
radial = radial + 1
return distance
class ConvMolFeaturizer(MolecularFeaturizer):
"""This class implements the featurization to implement Duvenaud graph convolutions.
Duvenaud graph convolutions [1]_ construct a vector of descriptors for each
atom in a molecule. The featurizer computes that vector of local descriptors.
Examples
---------
>>> import deepchem as dc
>>> smiles = ["C", "CCC"]
>>> featurizer=dc.feat.ConvMolFeaturizer(per_atom_fragmentation=False)
>>> f = featurizer.featurize(smiles)
>>> # Using ConvMolFeaturizer to create featurized fragments derived from molecules of interest.
... # This is used only in the context of performing interpretation of models using atomic
... # contributions (atom-based model interpretation)
... smiles = ["C", "CCC"]
>>> featurizer=dc.feat.ConvMolFeaturizer(per_atom_fragmentation=True)
>>> f = featurizer.featurize(smiles)
>>> len(f) # contains 2 lists with featurized fragments from 2 mols
2
See Also
--------
Detailed examples of `GraphConvModel` interpretation are provided in Tutorial #28
References
---------
.. [1] Duvenaud, <NAME>., et al. "Convolutional networks on graphs for
learning molecular fingerprints." Advances in neural information
processing systems. 2015.
Note
----
This class requires RDKit to be installed.
"""
name = ['conv_mol']
def __init__(self,
master_atom: bool = False,
use_chirality: bool = False,
atom_properties: Iterable[str] = [],
per_atom_fragmentation: bool = False):
"""
Parameters
----------
master_atom: Boolean
if true create a fake atom with bonds to every other atom.
the initialization is the mean of the other atom features in
the molecule. This technique is briefly discussed in
Neural Message Passing for Quantum Chemistry
https://arxiv.org/pdf/1704.01212.pdf
use_chirality: Boolean
if true then make the resulting atom features aware of the
chirality of the molecules in question
atom_properties: list of string or None
properties in the RDKit Mol object to use as additional
atom-level features in the larger molecular feature. If None,
then no atom-level properties are used. Properties should be in the
RDKit mol object should be in the form
atom XXXXXXXX NAME
where XXXXXXXX is a zero-padded 8 digit number coresponding to the
zero-indexed atom index of each atom and NAME is the name of the property
provided in atom_properties. So "atom 00000000 sasa" would be the
name of the molecule level property in mol where the solvent
accessible surface area of atom 0 would be stored.
per_atom_fragmentation: Boolean
If True, then multiple "atom-depleted" versions of each molecule will be created (using featurize() method).
For each molecule, atoms are removed one at a time and the resulting molecule is featurized.
The result is a list of ConvMol objects,
one with each heavy atom removed. This is useful for subsequent model interpretation: finding atoms
favorable/unfavorable for (modelled) activity. This option is typically used in combination
with a FlatteningTransformer to split the lists into separate samples.
Since ConvMol is an object and not a numpy array, need to set dtype to
object.
"""
self.dtype = object
self.master_atom = master_atom
self.use_chirality = use_chirality
self.atom_properties = list(atom_properties)
self.per_atom_fragmentation = per_atom_fragmentation
def featurize(self,
datapoints: Union[RDKitMol, str, Iterable[RDKitMol],
Iterable[str]],
log_every_n: int = 1000,
**kwargs) -> np.ndarray:
"""
Override parent: aim is to add handling atom-depleted molecules featurization
Parameters
----------
datapoints: rdkit.Chem.rdchem.Mol / SMILES string / iterable
RDKit Mol, or SMILES string or iterable sequence of RDKit mols/SMILES
strings.
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing a featurized representation of `datapoints`.
"""
if 'molecules' in kwargs and datapoints is None:
datapoints = kwargs.get("molecules")
raise DeprecationWarning(
'Molecules is being phased out as a parameter, please pass "datapoints" instead.'
)
features = super(ConvMolFeaturizer, self).featurize(datapoints,
log_every_n=1000)
if self.per_atom_fragmentation:
# create temporary valid ids serving to filter out failed featurizations from every sublist
# of features (i.e. every molecules' frags list), and also totally failed sublists.
# This makes output digestable by Loaders
valid_frag_inds = [[
True if np.array(elt).size > 0 else False for elt in f
] for f in features]
features = np.array(
[[elt for (is_valid, elt) in zip(l, m) if is_valid
] for (l, m) in zip(valid_frag_inds, features) if any(l)],
dtype=object)
return features
def _get_atom_properties(self, atom):
"""
For a given input RDKit atom return the values of the properties
requested when initializing the featurize. See the __init__ of the
class for a full description of the names of the properties
Parameters
----------
atom: RDKit.rdchem.Atom
Atom to get the properties of
returns a numpy lists of floats of the same size as self.atom_properties
"""
values = []
for prop in self.atom_properties:
mol_prop_name = str("atom %08d %s" % (atom.GetIdx(), prop))
try:
values.append(float(atom.GetOwningMol().GetProp(mol_prop_name)))
except KeyError:
raise KeyError("No property %s found in %s in %s" %
(mol_prop_name, atom.GetOwningMol(), self))
return np.array(values)
def _featurize(self, mol):
"""Encodes mol as a ConvMol object.
If per_atom_fragmentation is True,
then for each molecule a list of ConvMolObjects
will be created"""
def per_atom(n, a):
"""
Enumerates fragments resulting from mol object,
s.t. each fragment = mol with single atom removed (all possible removals are enumerated)
Goes over nodes, deletes one at a time and updates adjacency list of lists (removes connections to that node)
Parameters
----------
n: np.array of nodes (number_of_nodes X number_of_features)
a: list of nested lists of adjacent node pairs
"""
for i in range(n.shape[0]):
new_n = np.delete(n, (i), axis=0)
new_a = []
for j, node_pair in enumerate(a):
if i != j: # don't need this pair, no more connections to deleted node
tmp_node_pair = []
for v in node_pair:
if v < i:
tmp_node_pair.append(v)
elif v > i:
tmp_node_pair.append(
v - 1
) # renumber node, because of offset after node deletion
new_a.append(tmp_node_pair)
yield new_n, new_a
# Get the node features
idx_nodes = [(a.GetIdx(),
np.concatenate(
(atom_features(a, use_chirality=self.use_chirality),
self._get_atom_properties(a))))
for a in mol.GetAtoms()]
idx_nodes.sort() # Sort by ind to ensure same order as rd_kit
idx, nodes = list(zip(*idx_nodes))
# Stack nodes into an array
nodes = np.vstack(nodes)
if self.master_atom:
master_atom_features = np.expand_dims(np.mean(nodes, axis=0),
axis=0)
nodes = np.concatenate([nodes, master_atom_features], axis=0)
# Get bond lists with reverse edges included
edge_list = [
(b.GetBeginAtomIdx(), b.GetEndAtomIdx()) for b in mol.GetBonds()
]
# Get canonical adjacency list
canon_adj_list = [[] for mol_id in range(len(nodes))]
for edge in edge_list:
canon_adj_list[edge[0]].append(edge[1])
canon_adj_list[edge[1]].append(edge[0])
if self.master_atom:
fake_atom_index = len(nodes) - 1
for index in range(len(nodes) - 1):
canon_adj_list[index].append(fake_atom_index)
if not self.per_atom_fragmentation:
return ConvMol(nodes, canon_adj_list)
else:
return [ConvMol(n, a) for n, a in per_atom(nodes, canon_adj_list)]
def feature_length(self):
return 75 + len(self.atom_properties)
def __hash__(self):
atom_properties = tuple(self.atom_properties)
return hash((self.master_atom, self.use_chirality, atom_properties))
def __eq__(self, other):
if not isinstance(self, other.__class__):
return False
return self.master_atom == other.master_atom and \
self.use_chirality == other.use_chirality and \
tuple(self.atom_properties) == tuple(other.atom_properties)
class WeaveFeaturizer(MolecularFeaturizer):
"""This class implements the featurization to implement Weave convolutions.
Weave convolutions were introduced in [1]_. Unlike Duvenaud graph
convolutions, weave convolutions require a quadratic matrix of interaction
descriptors for each pair of atoms. These extra descriptors may provide for
additional descriptive power but at the cost of a larger featurized dataset.
Examples
--------
>>> import deepchem as dc
>>> mols = ["CCC"]
>>> featurizer = dc.feat.WeaveFeaturizer()
>>> features = featurizer.featurize(mols)
>>> type(features[0])
<class 'deepchem.feat.mol_graphs.WeaveMol'>
>>> features[0].get_num_atoms() # 3 atoms in compound
3
>>> features[0].get_num_features() # feature size
75
>>> type(features[0].get_atom_features())
<class 'numpy.ndarray'>
>>> features[0].get_atom_features().shape
(3, 75)
>>> type(features[0].get_pair_features())
<class 'numpy.ndarray'>
>>> features[0].get_pair_features().shape
(9, 14)
References
----------
.. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond
fingerprints." Journal of computer-aided molecular design 30.8 (2016):
595-608.
Note
----
This class requires RDKit to be installed.
"""
name = ['weave_mol']
def __init__(self,
graph_distance: bool = True,
explicit_H: bool = False,
use_chirality: bool = False,
max_pair_distance: Optional[int] = None):
"""Initialize this featurizer with set parameters.
Parameters
----------
graph_distance: bool, (default True)
If True, use graph distance for distance features. Otherwise, use
Euclidean distance. Note that this means that molecules that this
featurizer is invoked on must have valid conformer information if this
option is set.
explicit_H: bool, (default False)
If true, model hydrogens in the molecule.
use_chirality: bool, (default False)
If true, use chiral information in the featurization
max_pair_distance: Optional[int], (default None)
This value can be a positive integer or None. This
parameter determines the maximum graph distance at which pair
features are computed. For example, if `max_pair_distance==2`,
then pair features are computed only for atoms at most graph
distance 2 apart. If `max_pair_distance` is `None`, all pairs are
considered (effectively infinite `max_pair_distance`)
"""
# Distance is either graph distance(True) or Euclidean distance(False,
# only support datasets providing Cartesian coordinates)
self.graph_distance = graph_distance
# Set dtype
self.dtype = object
# If includes explicit hydrogens
self.explicit_H = explicit_H
# If uses use_chirality
self.use_chirality = use_chirality
if isinstance(max_pair_distance, int) and max_pair_distance <= 0:
raise ValueError(
"max_pair_distance must either be a positive integer or None")
self.max_pair_distance = max_pair_distance
if self.use_chirality:
self.bt_len = int(GraphConvConstants.bond_fdim_base) + len(
GraphConvConstants.possible_bond_stereo)
else:
self.bt_len = int(GraphConvConstants.bond_fdim_base)
def _featurize(self, mol):
"""Encodes mol as a WeaveMol object."""
# Atom features
idx_nodes = [(a.GetIdx(),
atom_features(a,
explicit_H=self.explicit_H,
use_chirality=self.use_chirality))
for a in mol.GetAtoms()]
idx_nodes.sort() # Sort by ind to ensure same order as rd_kit
idx, nodes = list(zip(*idx_nodes))
# Stack nodes into an array
nodes = np.vstack(nodes)
# Get bond lists
bond_features_map = {}
for b in mol.GetBonds():
bond_features_map[tuple(
sorted([b.GetBeginAtomIdx(),
b.GetEndAtomIdx()
]))] = bond_features(b, use_chirality=self.use_chirality)
# Get canonical adjacency list
bond_adj_list = [[] for mol_id in range(len(nodes))]
for bond in bond_features_map.keys():
bond_adj_list[bond[0]].append(bond[1])
bond_adj_list[bond[1]].append(bond[0])
# Calculate pair features
pairs, pair_edges = pair_features(
mol,
bond_features_map,
bond_adj_list,
bt_len=self.bt_len,
graph_distance=self.graph_distance,
max_pair_distance=self.max_pair_distance)
return WeaveMol(nodes, pairs, pair_edges)
<file_sep>from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import numpy as np
import simdna
from simdna.synthetic import (
RepeatedEmbedder, SubstringEmbedder, ReverseComplementWrapper,
UniformPositionGenerator, InsideCentralBp,
LoadedEncodeMotifs, PwmSamplerFromLoadedMotifs,
UniformIntegerGenerator, ZeroOrderBackgroundGenerator,
EmbedInABackground, GenerateSequenceNTimes,
RandomSubsetOfEmbedders, IsInTraceLabelGenerator,
EmbeddableEmbedder, PairEmbeddableGenerator,
)
from simdna.util import DiscreteDistribution
loaded_motifs = LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
pseudocountProb=0.001)
def get_distribution(GC_fraction):
return DiscreteDistribution({
'A': (1 - GC_fraction) / 2, 'C': GC_fraction / 2,
'G': GC_fraction / 2, 'T': (1 - GC_fraction) / 2})
def simple_motif_embedding(motif_name, seq_length, num_seqs, GC_fraction):
"""
Simulates sequences with a motif embedded anywhere in the sequence.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_seqs: int
number of sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
embedding_arr: 1darray
Array of embedding objects.
"""
if motif_name is None:
embedders = []
else:
substring_generator = PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
embedders = [SubstringEmbedder(
ReverseComplementWrapper(substring_generator))]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(GenerateSequenceNTimes(
embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array([generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [generated_seq.embeddings for generated_seq in generated_sequences]
return sequence_arr, embedding_arr
def motif_density(motif_name, seq_length, num_seqs,
min_counts, max_counts, GC_fraction,
central_bp=None):
"""
returns sequences with motif density, along with embeddings array.
"""
substring_generator = PwmSamplerFromLoadedMotifs(loaded_motifs, motif_name)
if central_bp is not None:
position_generator = InsideCentralBp(central_bp)
else:
position_generator = UniformPositionGenerator()
quantity_generator = UniformIntegerGenerator(min_counts, max_counts)
embedders = [
RepeatedEmbedder(
SubstringEmbedder(
ReverseComplementWrapper(
substring_generator), position_generator),
quantity_generator)]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(GenerateSequenceNTimes(
embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array([generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [generated_seq.embeddings for generated_seq in generated_sequences]
return sequence_arr, embedding_arr
def simulate_single_motif_detection(motif_name, seq_length,
num_pos, num_neg, GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequence with a motif
embedded anywhere in the sequence
- Negative class sequence without the motif
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: 1darray
Array of embedding objects.
"""
motif_sequence_arr, positive_embedding_arr = simple_motif_embedding(
motif_name, seq_length, num_pos, GC_fraction)
random_sequence_arr, negative_embedding_arr = simple_motif_embedding(
None, seq_length, num_neg, GC_fraction)
sequence_arr = np.concatenate((motif_sequence_arr, random_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_counting(motif_name, seq_length, pos_counts, neg_counts,
num_pos, num_neg, GC_fraction):
"""
Generates data for motif counting task.
Parameters
----------
motif_name : str
seq_length : int
pos_counts : list
(min_counts, max_counts) for positive set.
neg_counts : list
(min_counts, max_counts) for negative set.
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_count_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos,
pos_counts[0], pos_counts[1], GC_fraction)
neg_count_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_pos,
neg_counts[0], neg_counts[1], GC_fraction)
sequence_arr = np.concatenate(
(pos_count_sequence_array, neg_count_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_density_localization(
motif_name, seq_length, center_size, min_motif_counts,
max_motif_counts, num_pos, num_neg, GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequences with multiple motif instances
in center of the sequence.
- Negative class sequences with multiple motif instances
anywhere in the sequence.
The number of motif instances is uniformly sampled
between minimum and maximum motif counts.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
center_size : int
length of central part of the sequence where motifs can be positioned
min_motif_counts : int
minimum number of motif instances
max_motif_counts : int
maximum number of motif instances
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
localized_density_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos,
min_motif_counts, max_motif_counts, GC_fraction, center_size)
unlocalized_density_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_neg,
min_motif_counts, max_motif_counts, GC_fraction)
sequence_arr = np.concatenate(
(localized_density_sequence_array, unlocalized_density_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_multi_motif_embedding(motif_names, seq_length, min_num_motifs,
max_num_motifs, num_seqs, GC_fraction):
"""
Generates data for multi motif recognition task.
Parameters
----------
motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_seqs : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : ndarray
Contains labels for each motif.
embedding_arr: 1darray
Array of embedding objects.
"""
def get_embedder(motif_name):
substring_generator = PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
return SubstringEmbedder(
ReverseComplementWrapper(substring_generator),
name=motif_name)
embedders = [get_embedder(motif_name) for motif_name in motif_names]
quantity_generator = UniformIntegerGenerator(
min_num_motifs, max_num_motifs)
combined_embedder = [RandomSubsetOfEmbedders(
quantity_generator, embedders)]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
combined_embedder)
generated_sequences = tuple(GenerateSequenceNTimes(
embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array([generated_seq.seq for generated_seq in generated_sequences])
label_generator = IsInTraceLabelGenerator(np.asarray(motif_names))
y = np.array([label_generator.generateLabels(generated_seq)
for generated_seq in generated_sequences], dtype=bool)
embedding_arr = [generated_seq.embeddings for generated_seq in generated_sequences]
return sequence_arr, y, embedding_arr
def simulate_differential_accessibility(
pos_motif_names, neg_motif_names, seq_length,
min_num_motifs, max_num_motifs, num_pos, num_neg, GC_fraction):
"""
Generates data for differential accessibility task.
Parameters
----------
pos_motif_names : list
List of strings.
neg_motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_motif_sequence_arr, _, positive_embedding_arr = simulate_multi_motif_embedding(
pos_motif_names, seq_length,
min_num_motifs, max_num_motifs, num_pos, GC_fraction)
neg_motif_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
neg_motif_names, seq_length,
min_num_motifs, max_num_motifs, num_neg, GC_fraction)
sequence_arr = np.concatenate(
(pos_motif_sequence_arr, neg_motif_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_heterodimer_grammar(
motif1, motif2, seq_length,
min_spacing, max_spacing, num_pos, num_neg, GC_fraction):
"""
Simulates two classes of sequences with motif1 and motif2:
- Positive class sequences with motif1 and motif2 positioned
min_spacing and max_spacing
- Negative class sequences with independent motif1 and motif2 positioned
anywhere in the sequence, not as a heterodimer grammar
Parameters
----------
seq_length : int, length of sequence
GC_fraction : float, GC fraction in background sequence
num_pos : int, number of positive class sequences
num_neg : int, number of negatice class sequences
motif1 : str, encode motif name
motif2 : str, encode motif name
min_spacing : int, minimum inter motif spacing
max_spacing : int, maximum inter motif spacing
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: list
List of embedding objects.
"""
motif1_generator = ReverseComplementWrapper(PwmSamplerFromLoadedMotifs(loaded_motifs, motif1))
motif2_generator = ReverseComplementWrapper(PwmSamplerFromLoadedMotifs(loaded_motifs, motif2))
separation_generator = UniformIntegerGenerator(min_spacing, max_spacing)
embedder = EmbeddableEmbedder(PairEmbeddableGenerator(
motif1_generator, motif2_generator, separation_generator))
embed_in_background = EmbedInABackground(ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)), [embedder])
generated_sequences = tuple(GenerateSequenceNTimes(
embed_in_background, num_pos).generateSequences())
grammar_sequence_arr = np.array([generated_seq.seq for generated_seq in generated_sequences])
positive_embedding_arr = [generated_seq.embeddings for generated_seq in generated_sequences]
nongrammar_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
[motif1, motif2], seq_length, 2, 2, num_neg, GC_fraction)
sequence_arr = np.concatenate(
(grammar_sequence_arr, nongrammar_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
<file_sep>"""K-Hop Layer and Adaptive Filter Module from https://arxiv.org/pdf/1706.09916.pdf"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__license__ = "MIT"
import numpy as np
import tensorflow as tf
import os
import sys
import logging
from deepchem.models.tensorgraph.layers import Layer, convert_to_layers
from deepchem.models.tensorgraph import initializations, model_ops
from deepchem.models.tensorgraph import activations
class AdaptiveFilter(Layer):
def __init__(self,
num_nodes,
num_node_features,
batch_size=64,
init='glorot_uniform',
combine_method='linear',
**kwargs):
"""
Parameters
----------
num_nodes: int
Number of nodes in the graph
num_node_features: int
Number of features per node in the graph
batch_size: int, optional
Batch size used for training
init: str, optional
Initialization method for the weights
combine_method: str, optional
How to combine adjacency matrix and node features
"""
if combine_method not in ['linear', 'prod']:
raise ValueError('Combine method needs to be one of linear or product')
self.num_nodes = num_nodes
self.num_node_features = num_node_features
self.batch_size = batch_size
self.init = initializations.get(init)
self.combine_method = combine_method
super(AdaptiveFilter, self).__init__(**kwargs)
def _build(self):
if self.combine_method == "linear":
self.Q = self.init(
shape=(self.num_nodes + self.num_node_features, self.num_nodes))
else:
self.Q = self.init(shape=(self.num_node_features, self.num_nodes))
self.trainable_weights = [self.Q]
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
act_fn = activations.get('sigmoid')
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
self._build()
A_tilda_k = in_layers[0].out_tensor
X = in_layers[1].out_tensor
if self.combine_method == "linear":
concatenated = tf.concat([A_tilda_k, X], axis=2)
adp_fn_val = act_fn(
tf.tensordot(concatenated, self.trainable_weights[0], axes=1))
else:
adp_fn_val = act_fn(tf.matmul(A_tilda_k, tf.tensordot(X, self.Q, axes=1)))
out_tensor = adp_fn_val
if set_tensors:
self.variables = self.trainable_weights
self.out_tensor = out_tensor
return out_tensor
def none_tensors(self):
Q = self.Q
self.Q = None
out_tensor, trainable_weights, variables = self.out_tensor, self.trainable_weights, self.variables
self.out_tensor, self.trainable_weights, self.variables = None, [], []
return Q, out_tensor, trainable_weights, variables
def set_tensors(self, tensors):
self.Q, self.out_tensor, self.trainable_weights, self.variables = tensors
class KOrderGraphConv(Layer):
name = ['KOrderGraphConv']
def __init__(self,
num_nodes,
num_node_features,
batch_size=64,
init='glorot_uniform',
**kwargs):
"""
Parameters
----------
num_nodes: int
Number of nodes in the graph
num_node_features: int
Number of features per node in the graph
batch_size: int, optional
Batch size used for training
init: str, optional
Initialization method for the weights
combine_method: str, optional
How to combine adjacency matrix and node features
"""
self.num_nodes = num_nodes
self.num_node_features = num_node_features
self.batch_size = batch_size
self.init = initializations.get(init)
super(KOrderGraphConv, self).__init__(**kwargs)
def _build(self):
self.W = self.init(shape=(self.num_nodes, self.num_nodes))
self.b = model_ops.zeros(shape=[
self.num_nodes,
])
self.trainable_weights = [self.W, self.b]
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
self._build()
A_tilda_k = in_layers[0].out_tensor
X = in_layers[1].out_tensor
adp_fn_val = in_layers[2].out_tensor
attn_weights = tf.multiply(adp_fn_val, self.W)
wt_adjacency = attn_weights * A_tilda_k
out = tf.matmul(wt_adjacency, X) + tf.expand_dims(self.b, axis=1)
out_tensor = out
if set_tensors:
self.variables = self.trainable_weights
self.out_tensor = out_tensor
return out_tensor
def none_tensors(self):
W, b = self.W, self.b
self.W, self.b = None, None
out_tensor, trainable_weights, variables = self.out_tensor, self.trainable_weights, self.variables
self.out_tensor, self.trainable_weights, self.variables = None, [], []
return W, b, out_tensor, trainable_weights, variables
def set_tensors(self, tensors):
self.W, self.b, self.out_tensor, self.trainable_weights, self.variables = tensors
<file_sep>import pytest
import deepchem as dc
import numpy as np
import unittest
try:
import tensorflow as tf # noqa: F401
has_tensorflow = True
except:
has_tensorflow = False
def generate_sequences(sequence_length, num_sequences):
for i in range(num_sequences):
seq = [
np.random.randint(10)
for x in range(np.random.randint(1, sequence_length + 1))
]
yield (seq, seq)
class TestSeqToSeq(unittest.TestCase):
@pytest.mark.tensorflow
def test_int_sequence(self):
"""Test learning to reproduce short sequences of integers."""
sequence_length = 8
tokens = list(range(10))
s = dc.models.SeqToSeq(tokens,
tokens,
sequence_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=150,
learning_rate=0.01,
dropout=0.1)
# Train the model on random sequences. We aren't training long enough to
# really make it reliable, but I want to keep this test fast, and it should
# still be able to reproduce a reasonable fraction of input sequences.
s.fit_sequences(generate_sequences(sequence_length, 25000))
# Test it out.
tests = [seq for seq, target in generate_sequences(sequence_length, 50)]
pred1 = s.predict_from_sequences(tests, beam_width=1)
pred4 = s.predict_from_sequences(tests, beam_width=4)
embeddings = s.predict_embeddings(tests)
pred1e = s.predict_from_embeddings(embeddings, beam_width=1)
pred4e = s.predict_from_embeddings(embeddings, beam_width=4)
count1 = 0
count4 = 0
for i in range(len(tests)):
if pred1[i] == tests[i]:
count1 += 1
if pred4[i] == tests[i]:
count4 += 1
assert pred1[i] == pred1e[i]
assert pred4[i] == pred4e[i]
# Check that it got at least a quarter of them correct.
assert count1 >= 12
assert count4 >= 12
@pytest.mark.tensorflow
def test_aspuru_guzik(self):
"""Test that the aspuru_guzik encoder doesn't hard error.
This model takes too long to fit to do an overfit test
"""
train_smiles = [
'Cc1cccc(N2CCN(C(=O)C34CC5CC(CC(C5)C3)C4)CC2)c1C',
'Cn1ccnc1SCC(=O)Nc1ccc(Oc2ccccc2)cc1',
'COc1cc2c(cc1NC(=O)CN1C(=O)NC3(CCc4ccccc43)C1=O)oc1ccccc12',
'O=C1/C(=C/NC2CCS(=O)(=O)C2)c2ccccc2C(=O)N1c1ccccc1',
'NC(=O)NC(Cc1ccccc1)C(=O)O', 'CCn1c(CSc2nccn2C)nc2cc(C(=O)O)ccc21',
'CCc1cccc2c1NC(=O)C21C2C(=O)N(Cc3ccccc3)C(=O)C2C2CCCN21',
'COc1ccc(C2C(C(=O)NCc3ccccc3)=C(C)N=C3N=CNN32)cc1OC',
'CCCc1cc(=O)nc(SCC(=O)N(CC(C)C)C2CCS(=O)(=O)C2)[nH]1',
'CCn1cnc2c1c(=O)n(CC(=O)Nc1cc(C)on1)c(=O)n2Cc1ccccc1'
]
tokens = set()
for s in train_smiles:
tokens = tokens.union(set(c for c in s))
tokens = sorted(list(tokens))
max_length = max(len(s) for s in train_smiles) + 1
s = dc.models.seqtoseq.AspuruGuzikAutoEncoder(tokens, max_length)
def generate_sequences(smiles, epochs):
for i in range(epochs):
for s in smiles:
yield (s, s)
s.fit_sequences(generate_sequences(train_smiles, 100))
# Test it out.
pred1 = s.predict_from_sequences(train_smiles, beam_width=1)
pred4 = s.predict_from_sequences(train_smiles, beam_width=4)
embeddings = s.predict_embeddings(train_smiles)
pred1e = s.predict_from_embeddings(embeddings, beam_width=1)
pred4e = s.predict_from_embeddings(embeddings, beam_width=4)
for i in range(len(train_smiles)):
assert pred1[i] == pred1e[i]
assert pred4[i] == pred4e[i]
@pytest.mark.tensorflow
def test_variational(self):
"""Test using a SeqToSeq model as a variational autoenconder."""
sequence_length = 10
tokens = list(range(10))
s = dc.models.SeqToSeq(tokens,
tokens,
sequence_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=128,
learning_rate=0.01,
variational=True)
# Actually training a VAE takes far too long for a unit test. Just run a
# few steps of training to make sure nothing crashes, then check that the
# results are at least internally consistent.
s.fit_sequences(generate_sequences(sequence_length, 1000))
for sequence, target in generate_sequences(sequence_length, 10):
pred1 = s.predict_from_sequences([sequence], beam_width=1)
embedding = s.predict_embeddings([sequence])
assert pred1 == s.predict_from_embeddings(embedding, beam_width=1)
<file_sep>"""
Metal vs non-metal classification for inorganic crystals from Materials Project.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
MPMETAL_URL = 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/mp_is_metal.tar.gz'
MPMETAL_TASKS = ['is_metal']
class _MPMetallicityLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, 'mp_is_metal.json')
targz_file = os.path.join(self.data_dir, 'mp_is_metal.tar.gz')
if not os.path.exists(dataset_file):
if not os.path.exists(targz_file):
dc.utils.data_utils.download_url(url=MPMETAL_URL,
dest_dir=self.data_dir)
dc.utils.data_utils.untargz_file(targz_file, self.data_dir)
loader = dc.data.JsonLoader(tasks=self.tasks,
feature_field="structure",
label_field="is_metal",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file)
def load_mp_metallicity(
featurizer: Union[dc.feat.Featurizer, str] = dc.feat.SineCoulombMatrix(),
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load mp formation energy dataset.
Contains 106113 inorganic crystal structures from the Materials
Project database labeled as metals or nonmetals. In benchmark
studies, random forest models achieved a mean ROC-AUC of
0.9 during five-folded nested cross validation on this
dataset.
For more details on the dataset see [1]_. For more details
on previous benchmarks for this dataset, see [2]_.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
Returns
-------
tasks, datasets, transformers : tuple
tasks : list
Column names corresponding to machine learning target variables.
datasets : tuple
train, validation, test splits of data as
``deepchem.data.datasets.Dataset`` instances.
transformers : list
``deepchem.trans.transformers.Transformer`` instances applied
to dataset.
References
----------
.. [1] <NAME>*, <NAME>*, et al. (*=equal contributions) The Materials Project:
A materials genome approach to accelerating materials innovation APL Materials,
2013, 1(1), 011002. doi:10.1063/1.4812323 (2013).
.. [2] <NAME>. et al. "Benchmarking Materials Property Prediction Methods: The Matbench
Test Set and Automatminer Reference Algorithm." https://arxiv.org/abs/2005.00707 (2020)
Examples
--------
>>>
>> import deepchem as dc
>> tasks, datasets, transformers = dc.molnet.load_mp_metallicity()
>> train_dataset, val_dataset, test_dataset = datasets
>> n_tasks = len(tasks)
>> n_features = train_dataset.get_data_shape()[0]
>> model = dc.models.MultitaskRegressor(n_tasks, n_features)
"""
loader = _MPMetallicityLoader(featurizer, splitter, transformers,
MPMETAL_TASKS, data_dir, save_dir, **kwargs)
return loader.load_dataset('mp-metallicity', reload)
<file_sep>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 10 06:12:10 2018
@author: zqwu
"""
import numpy as np
import tensorflow as tf
from deepchem.data import NumpyDataset, pad_features
from deepchem.metrics import to_one_hot
from deepchem.models.tensorgraph.layers import Layer, Dense, SoftMax, Reshape, \
SparseSoftMaxCrossEntropy, BatchNorm, Conv2D, MaxPool2D, WeightedError, \
Dropout, ReLU, Stack, Flatten, ReduceMax, WeightDecay
from deepchem.models.tensorgraph.layers import L2Loss, Label, Weights, Feature
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
from deepchem.trans import undo_transforms
from deepchem.data.data_loader import ImageLoader
from sklearn.metrics import confusion_matrix, accuracy_score
class DRModel(TensorGraph):
def __init__(self,
n_tasks=1,
image_size=512,
n_downsample=6,
n_init_kernel=16,
n_fully_connected=[1024],
n_classes=5,
augment=False,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
image_size: int
Resolution of the input images(square)
n_downsample: int
Downsample ratio in power of 2
n_init_kernel: int
Kernel size for the first convolutional layer
n_fully_connected: list of int
Shape of FC layers after convolutions
n_classes: int
Number of classes to predict (only used in classification mode)
augment: bool
If to use data augmentation
"""
self.n_tasks = n_tasks
self.image_size = image_size
self.n_downsample = n_downsample
self.n_init_kernel = n_init_kernel
self.n_fully_connected = n_fully_connected
self.n_classes = n_classes
self.augment = augment
super(DRModel, self).__init__(**kwargs)
self.build_graph()
def build_graph(self):
# inputs placeholder
self.inputs = Feature(
shape=(None, self.image_size, self.image_size, 3), dtype=tf.float32)
# data preprocessing and augmentation
in_layer = DRAugment(
self.augment,
self.batch_size,
size=(self.image_size, self.image_size),
in_layers=[self.inputs])
# first conv layer
in_layer = Conv2D(
self.n_init_kernel,
kernel_size=7,
activation_fn=None,
in_layers=[in_layer])
in_layer = BatchNorm(in_layers=[in_layer])
in_layer = ReLU(in_layers=[in_layer])
# downsample by max pooling
res_in = MaxPool2D(
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], in_layers=[in_layer])
for ct_module in range(self.n_downsample - 1):
# each module is a residual convolutional block
# followed by a convolutional downsample layer
in_layer = Conv2D(
self.n_init_kernel * 2**(ct_module - 1),
kernel_size=1,
activation_fn=None,
in_layers=[res_in])
in_layer = BatchNorm(in_layers=[in_layer])
in_layer = ReLU(in_layers=[in_layer])
in_layer = Conv2D(
self.n_init_kernel * 2**(ct_module - 1),
kernel_size=3,
activation_fn=None,
in_layers=[in_layer])
in_layer = BatchNorm(in_layers=[in_layer])
in_layer = ReLU(in_layers=[in_layer])
in_layer = Conv2D(
self.n_init_kernel * 2**ct_module,
kernel_size=1,
activation_fn=None,
in_layers=[in_layer])
res_a = BatchNorm(in_layers=[in_layer])
res_out = res_in + res_a
res_in = Conv2D(
self.n_init_kernel * 2**(ct_module + 1),
kernel_size=3,
stride=2,
in_layers=[res_out])
res_in = BatchNorm(in_layers=[res_in])
# max pooling over the final outcome
in_layer = ReduceMax(axis=(1, 2), in_layers=[res_in])
for layer_size in self.n_fully_connected:
# fully connected layers
in_layer = Dense(
layer_size, activation_fn=tf.nn.relu, in_layers=[in_layer])
# dropout for dense layers
#in_layer = Dropout(0.25, in_layers=[in_layer])
logit_pred = Dense(
self.n_tasks * self.n_classes, activation_fn=None, in_layers=[in_layer])
logit_pred = Reshape(
shape=(None, self.n_tasks, self.n_classes), in_layers=[logit_pred])
weights = Weights(shape=(None, self.n_tasks))
labels = Label(shape=(None, self.n_tasks), dtype=tf.int32)
output = SoftMax(logit_pred)
self.add_output(output)
loss = SparseSoftMaxCrossEntropy(in_layers=[labels, logit_pred])
weighted_loss = WeightedError(in_layers=[loss, weights])
# weight decay regularizer
# weighted_loss = WeightDecay(0.1, 'l2', in_layers=[weighted_loss])
self.set_loss(weighted_loss)
def DRAccuracy(y, y_pred):
y_pred = np.argmax(y_pred, 1)
return accuracy_score(y, y_pred)
def DRSpecificity(y, y_pred):
y_pred = (np.argmax(y_pred, 1) > 0) * 1
y = (y > 0) * 1
TN = sum((1 - y_pred) * (1 - y))
N = sum(1 - y)
return float(TN) / N
def DRSensitivity(y, y_pred):
y_pred = (np.argmax(y_pred, 1) > 0) * 1
y = (y > 0) * 1
TP = sum(y_pred * y)
P = sum(y)
return float(TP) / P
def ConfusionMatrix(y, y_pred):
y_pred = np.argmax(y_pred, 1)
return confusion_matrix(y, y_pred)
def QuadWeightedKappa(y, y_pred):
y_pred = np.argmax(y_pred, 1)
cm = confusion_matrix(y, y_pred)
classes_y, counts_y = np.unique(y, return_counts=True)
classes_y_pred, counts_y_pred = np.unique(y_pred, return_counts=True)
E = np.zeros((classes_y.shape[0], classes_y.shape[0]))
for i, c1 in enumerate(classes_y):
for j, c2 in enumerate(classes_y_pred):
E[c1, c2] = counts_y[i] * counts_y_pred[j]
E = E / np.sum(E) * np.sum(cm)
w = np.zeros((classes_y.shape[0], classes_y.shape[0]))
for i in range(classes_y.shape[0]):
for j in range(classes_y.shape[0]):
w[i, j] = float((i - j)**2) / (classes_y.shape[0] - 1)**2
re = 1 - np.sum(w * cm) / np.sum(w * E)
return re
class DRAugment(Layer):
def __init__(self,
augment,
batch_size,
distort_color=True,
central_crop=True,
size=(512, 512),
**kwargs):
"""
Parameters
----------
augment: bool
If to use data augmentation
batch_size: int
Number of images in the batch
distort_color: bool
If to apply random distortion on the color
central_crop: bool
If to randomly crop the sample around the center
size: int
Resolution of the input images(square)
"""
self.augment = augment
self.batch_size = batch_size
self.distort_color = distort_color
self.central_crop = central_crop
self.size = size
super(DRAugment, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent_tensor = inputs[0]
training = kwargs['training'] if 'training' in kwargs else 1.0
parent_tensor = parent_tensor / 255.0
if not self.augment:
out_tensor = parent_tensor
else:
def preprocess(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
img = tf.image.rot90(img, k=np.random.randint(0, 4))
if self.distort_color:
img = tf.image.random_brightness(img, max_delta=32. / 255.)
img = tf.image.random_saturation(img, lower=0.5, upper=1.5)
img = tf.clip_by_value(img, 0.0, 1.0)
if self.central_crop:
# sample cut ratio from a clipped gaussian
img = tf.image.central_crop(img,
np.clip(
np.random.normal(1., 0.06), 0.8, 1.))
img = tf.image.resize_bilinear(
tf.expand_dims(img, 0), tf.convert_to_tensor(self.size))[0]
return img
outs = tf.map_fn(preprocess, parent_tensor)
# train/valid differences
out_tensor = training * outs + (1 - training) * parent_tensor
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
<file_sep>"""
Script that trains graph-conv models on ChEMBL dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from deepchem.models import GraphConvModel
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_chembl
# Load ChEMBL dataset
chembl_tasks, datasets, transformers = load_chembl(
shard_size=2000, featurizer="GraphConv", set="5thresh", split="random")
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
# Do setup required for tf/keras models
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 128
model = GraphConvModel(
len(chembl_tasks), batch_size=batch_size, mode='regression')
# Fit trained model
model.fit(train_dataset, nb_epoch=20)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
print("Test scores")
print(test_scores)
<file_sep>import numpy as np
import copy
import random
import deepchem
class TicTacToeEnvironment(deepchem.rl.Environment):
"""
Play tictactoe against a randomly acting opponent
"""
X = np.array([1.0, 0.0])
O = np.array([0.0, 1.0]) # noqa: E741
EMPTY = np.array([0.0, 0.0])
ILLEGAL_MOVE_PENALTY = -3.0
LOSS_PENALTY = -3.0
NOT_LOSS = 0.1
DRAW_REWARD = 5.0
WIN_REWARD = 10.0
def __init__(self):
super(TicTacToeEnvironment, self).__init__([(3, 3, 2)], 9)
self.reset()
def reset(self):
self._terminated = False
self._state = [np.zeros(shape=(3, 3, 2), dtype=np.float32)]
# Randomize who goes first
if random.randint(0, 1) == 1:
move = self.get_O_move()
self._state[0][move[0]][move[1]] = TicTacToeEnvironment.O
def step(self, action):
self._state = copy.deepcopy(self._state)
row = action // 3
col = action % 3
# Illegal move -- the square is not empty
if not np.all(self._state[0][row][col] == TicTacToeEnvironment.EMPTY):
self._terminated = True
return TicTacToeEnvironment.ILLEGAL_MOVE_PENALTY
# Move X
self._state[0][row][col] = TicTacToeEnvironment.X
# Did X Win
if self.check_winner(TicTacToeEnvironment.X):
self._terminated = True
return TicTacToeEnvironment.WIN_REWARD
if self.game_over():
self._terminated = True
return TicTacToeEnvironment.DRAW_REWARD
move = self.get_O_move()
self._state[0][move[0]][move[1]] = TicTacToeEnvironment.O
# Did O Win
if self.check_winner(TicTacToeEnvironment.O):
self._terminated = True
return TicTacToeEnvironment.LOSS_PENALTY
if self.game_over():
self._terminated = True
return TicTacToeEnvironment.DRAW_REWARD
return TicTacToeEnvironment.NOT_LOSS
def get_O_move(self):
empty_squares = []
for row in range(3):
for col in range(3):
if np.all(
self._state[0][row][col] == TicTacToeEnvironment.EMPTY):
empty_squares.append((row, col))
return random.choice(empty_squares)
def check_winner(self, player):
for i in range(3):
row = np.sum(self._state[0][i][:], axis=0)
if np.all(row == player * 3):
return True
col = np.sum(self._state[0][:][i], axis=0)
if np.all(col == player * 3):
return True
diag1 = self._state[0][0][0] + self._state[0][1][1] + self._state[0][2][
2]
if np.all(diag1 == player * 3):
return True
diag2 = self._state[0][0][2] + self._state[0][1][1] + self._state[0][2][
0]
if np.all(diag2 == player * 3):
return True
return False
def game_over(self):
for i in range(3):
for j in range(3):
if np.all(self._state[0][i][j] == TicTacToeEnvironment.EMPTY):
return False
return True
def display(self):
state = self._state[0]
s = ""
for row in range(3):
for col in range(3):
if np.all(state[row][col] == TicTacToeEnvironment.EMPTY):
s += "_"
if np.all(state[row][col] == TicTacToeEnvironment.X):
s += "X"
if np.all(state[row][col] == TicTacToeEnvironment.O):
s += "O"
s += "\n"
return s
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import warnings
import time
import numpy as np
import tensorflow as tf
from deepchem.utils.save import log
from deepchem.metrics import to_one_hot
from deepchem.metrics import from_one_hot
from deepchem.nn import model_ops
class ProgressiveJointRegressor(TensorflowMultiTaskRegressor):
"""Implements a progressive multitask neural network.
Progressive Networks: https://arxiv.org/pdf/1606.04671v3.pdf
Progressive networks allow for multitask learning where each task
gets a new column of weights. As a result, there is no exponential
forgetting where previous tasks are ignored.
TODO(rbharath): This class is unnecessarily complicated. Can we simplify the
structure of the code here?
"""
def __init__(self, n_tasks, n_features, alpha_init_stddevs=[.02], **kwargs):
"""Creates a progressive network.
Only listing parameters specific to progressive networks here.
Parameters
----------
n_tasks: int
Number of tasks
n_features: int
Number of input features
alpha_init_stddevs: list
List of standard-deviations for alpha in adapter layers.
"""
warnings.warn(
"ProgressiveJointRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.alpha_init_stddevs = alpha_init_stddevs
super(ProgressiveJointRegressor, self).__init__(n_tasks, n_features,
**kwargs)
# Consistency check
lengths_set = {
len(self.layer_sizes),
len(self.weight_init_stddevs),
len(self.alpha_init_stddevs),
len(self.bias_init_consts),
len(self.dropouts),
}
assert len(lengths_set) == 1, "All layer params must have same length."
def build(self, graph, name_scopes, training):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
n_features = self.n_features
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
with placeholder_scope:
self.mol_features = tf.placeholder(
tf.float32, shape=[None, n_features], name='mol_features')
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
prev_layer = self.mol_features
prev_layer_size = n_features
all_layers = {}
for i in range(n_layers):
for task in range(self.n_tasks):
task_scope = TensorflowGraph.shared_name_scope(
"task%d" % task, graph, name_scopes)
print("Adding weights for task %d, layer %d" % (task, i))
with task_scope as scope:
if i == 0:
prev_layer = self.mol_features
prev_layer_size = self.n_features
else:
prev_layer = all_layers[(i - 1, task)]
prev_layer_size = layer_sizes[i - 1]
if task > 0:
lateral_contrib = self.add_adapter(all_layers, task, i)
print("Creating W_layer_%d_task%d of shape %s" %
(i, task, str([prev_layer_size, layer_sizes[i]])))
W = tf.Variable(
tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=self.weight_init_stddevs[i]),
name='W_layer_%d_task%d' % (i, task),
dtype=tf.float32)
print("Creating b_layer_%d_task%d of shape %s" %
(i, task, str([layer_sizes[i]])))
b = tf.Variable(
tf.constant(
value=self.bias_init_consts[i], shape=[layer_sizes[i]]),
name='b_layer_%d_task%d' % (i, task),
dtype=tf.float32)
layer = tf.matmul(prev_layer, W) + b
if i > 0 and task > 0:
layer = layer + lateral_contrib
layer = tf.nn.relu(layer)
layer = model_ops.dropout(layer, dropouts[i], training)
all_layers[(i, task)] = layer
output = []
for task in range(self.n_tasks):
prev_layer = all_layers[(i, task)]
prev_layer_size = layer_sizes[i]
task_scope = TensorflowGraph.shared_name_scope("task%d" % task, graph,
name_scopes)
with task_scope as scope:
if task > 0:
lateral_contrib = tf.squeeze(
self.add_adapter(all_layers, task, i + 1))
weight_init = tf.truncated_normal(
shape=[prev_layer_size, 1], stddev=weight_init_stddevs[i])
bias_init = tf.constant(value=bias_init_consts[i], shape=[1])
print("Creating W_output_task%d of shape %s" %
(task, str([prev_layer_size, 1])))
w = tf.Variable(
weight_init, name='W_output_task%d' % task, dtype=tf.float32)
print("Creating b_output_task%d of shape %s" % (task, str([1])))
b = tf.Variable(
bias_init, name='b_output_task%d' % task, dtype=tf.float32)
layer = tf.squeeze(tf.matmul(prev_layer, w) + b)
if i > 0 and task > 0:
layer = layer + lateral_contrib
output.append(layer)
return output
def add_adapter(self, all_layers, task, layer_num):
"""Add an adapter connection for given task/layer combo"""
i = layer_num
prev_layers = []
# Handle output layer
if i < len(self.layer_sizes):
layer_sizes = self.layer_sizes
alpha_init_stddev = self.alpha_init_stddevs[i]
weight_init_stddev = self.weight_init_stddevs[i]
bias_init_const = self.bias_init_consts[i]
elif i == len(self.layer_sizes):
layer_sizes = self.layer_sizes + [1]
alpha_init_stddev = self.alpha_init_stddevs[-1]
weight_init_stddev = self.weight_init_stddevs[-1]
bias_init_const = self.bias_init_consts[-1]
else:
raise ValueError("layer_num too large for add_adapter.")
# Iterate over all previous tasks.
for prev_task in range(task):
prev_layers.append(all_layers[(i - 1, prev_task)])
# prev_layers is a list with elements of size
# (batch_size, layer_sizes[i-1])
prev_layer = tf.concat(axis=1, values=prev_layers)
alpha = tf.Variable(tf.truncated_normal([
1,
], stddev=alpha_init_stddev))
prev_layer = tf.multiply(alpha, prev_layer)
prev_layer_size = task * layer_sizes[i - 1]
print("Creating V_layer_%d_task%d of shape %s" %
(i, task, str([prev_layer_size, layer_sizes[i - 1]])))
V = tf.Variable(
tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i - 1]],
stddev=weight_init_stddev),
name="V_layer_%d_task%d" % (i, task),
dtype=tf.float32)
print("Creating b_lat_layer_%d_task%d of shape %s" %
(i, task, str([layer_sizes[i - 1]])))
b_lat = tf.Variable(
tf.constant(value=bias_init_const, shape=[layer_sizes[i - 1]]),
name='b_lat_layer_%d_task%d' % (i, task),
dtype=tf.float32)
prev_layer = tf.matmul(prev_layer, V) + b_lat
print("Creating U_layer_%d_task%d of shape %s" %
(i, task, str([layer_sizes[i - 1], layer_sizes[i]])))
U = tf.Variable(
tf.truncated_normal(
shape=[layer_sizes[i - 1], layer_sizes[i]],
stddev=weight_init_stddev),
name="U_layer_%d_task%d" % (i, task),
dtype=tf.float32)
return tf.matmul(prev_layer, U)
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
"""Fit the model.
Parameters
----------
dataset: dc.data.Dataset
Dataset object holding training data
nb_epoch: 10
Number of training epochs.
max_checkpoints_to_keep: int
Maximum number of checkpoints to keep; older checkpoints will be deleted.
log_every_N_batches: int
Report every N batches. Useful for training on very large datasets,
where epochs can take long time to finish.
checkpoint_interval: int
Frequency at which to write checkpoints, measured in epochs
Raises
------
AssertionError
If model is not in training mode.
"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
log("Training for %d epochs" % nb_epoch, self.verbose)
with self.train_graph.graph.as_default():
train_op = self.get_training_op(self.train_graph.graph,
self.train_graph.loss)
with self._get_shared_session(train=True) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
# Save an initial checkpoint.
saver.save(sess, self._save_path, global_step=0)
for epoch in range(nb_epoch):
avg_loss, n_batches = 0., 0
for ind, (X_b, y_b, w_b, ids_b) in enumerate(
# Turns out there are valid cases where we don't want pad-batches
# on by default.
#dataset.iterbatches(batch_size, pad_batches=True)):
dataset.iterbatches(
self.batch_size, pad_batches=self.pad_batches)):
if ind % log_every_N_batches == 0:
log("On batch %d" % ind, self.verbose)
# Run training op.
feed_dict = self.construct_feed_dict(X_b, y_b, w_b, ids_b)
fetches = self.train_graph.output + [
train_op, self.train_graph.loss
]
fetched_values = sess.run(fetches, feed_dict=feed_dict)
output = fetched_values[:len(self.train_graph.output)]
loss = fetched_values[-1]
avg_loss += loss
y_pred = np.squeeze(np.array(output))
y_b = y_b.flatten()
n_batches += 1
if epoch % checkpoint_interval == checkpoint_interval - 1:
saver.save(sess, self._save_path, global_step=epoch)
avg_loss = float(avg_loss) / n_batches
log('Ending epoch %d: Average loss %g' % (epoch, avg_loss),
self.verbose)
# Always save a final checkpoint when complete.
saver.save(sess, self._save_path, global_step=epoch + 1)
############################################################## TIMING
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
def get_training_op(self, graph, loss):
"""Get training op for applying gradients to variables.
Subclasses that need to do anything fancy with gradients should override
this method.
Returns:
A training op.
"""
with graph.as_default():
opt = model_ops.optimizer(self.optimizer, self.learning_rate,
self.momentum)
return opt.minimize(loss, name='train')
def add_training_costs(self, graph, name_scopes, output, labels, weights):
with graph.as_default():
epsilon = 1e-3 # small float to avoid dividing by zero
weighted_costs = [] # weighted costs for each example
gradient_costs = [] # costs used for gradient calculation
with TensorflowGraph.shared_name_scope('costs', graph, name_scopes):
for task in range(self.n_tasks):
task_str = str(task).zfill(len(str(self.n_tasks)))
with TensorflowGraph.shared_name_scope('cost_{}'.format(task_str),
graph, name_scopes):
with tf.name_scope('weighted'):
weighted_cost = self.cost(output[task], labels[task],
weights[task])
weighted_costs.append(weighted_cost)
with tf.name_scope('gradient'):
# Note that we divide by the batch size and not the number of
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)
# aggregated costs
with TensorflowGraph.shared_name_scope('aggregated', graph,
name_scopes):
with tf.name_scope('gradient'):
loss = tf.add_n(gradient_costs)
# weight decay
if self.penalty != 0.0:
penalty = model_ops.weight_decay(self.penalty_type, self.penalty)
loss += penalty
return loss
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Construct a feed dictionary from minibatch data.
TODO(rbharath): ids_b is not used here. Can we remove it?
Args:
X_b: np.ndarray of shape (batch_size, n_features)
y_b: np.ndarray of shape (batch_size, n_tasks)
w_b: np.ndarray of shape (batch_size, n_tasks)
ids_b: List of length (batch_size) with datapoint identifiers.
"""
orig_dict = {}
orig_dict["mol_features"] = X_b
for task in range(self.n_tasks):
if y_b is not None:
orig_dict["labels_%d" % task] = y_b[:, task]
else:
# Dummy placeholders
orig_dict["labels_%d" % task] = np.squeeze(np.zeros((self.batch_size,)))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
# Dummy placeholders
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
def predict_on_batch(self, X):
"""Return model output for the provided input.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.Dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
labels: True labels.
weights: Example weights.
Note that the output and labels arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
len_unpadded = len(X)
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
outputs = []
with self._get_shared_session(train=False).as_default():
n_samples = len(X)
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
# Handle edge case when batch-size is 1.
elif batch_outputs.ndim == 1:
n_samples = len(X)
batch_outputs = batch_outputs.reshape((n_samples, n_tasks))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_outputs.shape))
# Prune away any padding that was added
batch_outputs = batch_outputs[:n_samples]
outputs.append(batch_outputs)
outputs = np.squeeze(np.concatenate(outputs))
outputs = np.copy(outputs)
return outputs[:len_unpadded]
def _get_shared_session(self, train):
# allow_soft_placement=True allows ops without a GPU implementation
# to run on the CPU instead.
if train:
if not self.train_graph.session:
config = tf.ConfigProto(allow_soft_placement=True)
self.train_graph.session = tf.Session(config=config)
return self.train_graph.session
else:
if not self.eval_graph.session:
config = tf.ConfigProto(allow_soft_placement=True)
self.eval_graph.session = tf.Session(config=config)
return self.eval_graph.session
<file_sep>"""
Script that trains textCNN models on delaney dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load Delaney dataset
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney(
featurizer='Raw', split='index')
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
char_dict, length = dc.models.TextCNNModel.build_char_dict(train_dataset)
# Batch size of models
batch_size = 64
model = dc.models.TextCNNModel(
len(delaney_tasks),
char_dict,
seq_length=length,
mode='regression',
learning_rate=1e-3,
batch_size=batch_size,
use_queue=False)
# Fit trained model
model.fit(train_dataset, nb_epoch=100)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>from unittest import TestCase
import pytest
from rdkit import Chem
import deepchem as dc
from deepchem.data import DiskDataset
from deepchem.feat.one_hot import zinc_charset
from deepchem.models.autoencoder_models.autoencoder import TensorflowMoleculeEncoder, TensorflowMoleculeDecoder
class TestTensorflowEncoders(TestCase):
@pytest.mark.slow
def test_fit(self):
tf_enc = TensorflowMoleculeEncoder.zinc_encoder()
smiles = [
"Cn1cnc2c1c(=O)n(C)c(=O)n2C", "O=C(O)[C@@H]1/C(=C/CO)O[C@@H]2CC(=O)N21",
"Cn1c2nncnc2c(=O)n(C)c1=O", "Cn1cnc2c1c(=O)[nH]c(=O)n2C",
"NC(=O)c1ncc[nH]c1=O", "O=C1OCc2c1[nH]c(=O)[nH]c2=O",
"Cn1c(N)c(N)c(=O)n(C)c1=O", "CNc1nc2c([nH]1)c(=O)[nH]c(=O)n2C",
"CC(=O)N1CN(C(C)=O)[C@@H](O)[C@@H]1O",
"CC(=O)N1CN(C(C)=O)[C@H](O)[C@H]1O", "Cc1[nH]c(=O)[nH]c(=O)c1CO",
"O=C1NCCCc2c1no[n+]2[O-]", "Cc1nc(C(N)=O)c(N)n1CCO",
"O=c1[nH]cc(N2CCOCC2)c(=O)[nH]1"
]
featurizer = dc.feat.one_hot.OneHotFeaturizer(zinc_charset, 120)
mols = [Chem.MolFromSmiles(x) for x in smiles]
features = featurizer.featurize(mols)
dataset = DiskDataset.from_numpy(features, features)
prediction = tf_enc.predict_on_batch(dataset.X)
tf_de = TensorflowMoleculeDecoder.zinc_decoder()
one_hot_decoded = tf_de.predict_on_batch(prediction)
decoded_smiles = featurizer.untransform(one_hot_decoded)
assert len(decoded_smiles) == len(smiles)
<file_sep># DeepChem Example Suite
This directory contains the DeepChem example suite. There are a large number of
examples which break into a few broad categories:
- API Examples: These examples show how to do little things with DeepChem's API
that you might not have realized were possible.
- Case Study Examples: These show how to analyze interesting datasets with DeepChem.
- Tutorial Notebooks: These IPython notebooks provide walkthroughs of using
DeepChem on interesting problems in practice.
<file_sep>Examples
========
We show a bunch of examples for DeepChem by the doctest style.
- We match against doctest's :code:`...` wildcard on code where output is usually ignored
- We often use threshold assertions (e.g: :code:`score['mean-pearson_r2_score'] > 0.92`),
as this is what matters for model training code.
.. contents:: Contents
:local:
Before jumping in to examples, we'll import our libraries and ensure our doctests are reproducible:
.. doctest:: *
>>> import numpy as np
>>> import tensorflow as tf
>>> import deepchem as dc
>>>
>>> # Run before every test for reproducibility
>>> def seed_all():
... np.random.seed(123)
... tf.random.set_seed(123)
.. testsetup:: *
import numpy as np
import tensorflow as tf
import deepchem as dc
# Run before every test for reproducibility
def seed_all():
np.random.seed(123)
tf.random.set_seed(123)
Delaney (ESOL)
----------------
Examples of training models on the Delaney (ESOL) dataset included in `MoleculeNet <./moleculenet.html>`_.
We'll be using its :code:`smiles` field to train models to predict its experimentally measured solvation energy (:code:`expt`).
MultitaskRegressor
^^^^^^^^^^^^^^^^^^
First, we'll load the dataset with :func:`load_delaney() <deepchem.molnet.load_delaney>` and fit a :class:`MultitaskRegressor <deepchem.models.MultitaskRegressor>`:
.. doctest:: delaney
>>> seed_all()
>>> # Load dataset with default 'scaffold' splitting
>>> tasks, datasets, transformers = dc.molnet.load_delaney()
>>> tasks
['measured log solubility in mols per litre']
>>> train_dataset, valid_dataset, test_dataset = datasets
>>>
>>> # We want to know the pearson R squared score, averaged across tasks
>>> avg_pearson_r2 = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
>>>
>>> # We'll train a multitask regressor (fully connected network)
>>> model = dc.models.MultitaskRegressor(
... len(tasks),
... n_features=1024,
... layer_sizes=[500])
>>>
>>> model.fit(train_dataset)
0...
>>>
>>> # We now evaluate our fitted model on our training and validation sets
>>> train_scores = model.evaluate(train_dataset, [avg_pearson_r2], transformers)
>>> assert train_scores['mean-pearson_r2_score'] > 0.7, train_scores
>>>
>>> valid_scores = model.evaluate(valid_dataset, [avg_pearson_r2], transformers)
>>> assert valid_scores['mean-pearson_r2_score'] > 0.3, valid_scores
GraphConvModel
^^^^^^^^^^^^^^
The default `featurizer <./featurizers.html>`_ for Delaney is :code:`ECFP`, short for
`"Extended-connectivity fingerprints." <./featurizers.html#circularfingerprint>`_
For a :class:`GraphConvModel <deepchem.models.GraphConvModel>`, we'll reload our datasets with :code:`featurizer='GraphConv'`:
.. doctest:: delaney
>>> seed_all()
>>> tasks, datasets, transformers = dc.molnet.load_delaney(featurizer='GraphConv')
>>> train_dataset, valid_dataset, test_dataset = datasets
>>>
>>> model = dc.models.GraphConvModel(len(tasks), mode='regression', dropout=0.5)
>>>
>>> model.fit(train_dataset, nb_epoch=30)
0...
>>>
>>> # We now evaluate our fitted model on our training and validation sets
>>> train_scores = model.evaluate(train_dataset, [avg_pearson_r2], transformers)
>>> assert train_scores['mean-pearson_r2_score'] > 0.5, train_scores
>>>
>>> valid_scores = model.evaluate(valid_dataset, [avg_pearson_r2], transformers)
>>> assert valid_scores['mean-pearson_r2_score'] > 0.3, valid_scores
ChEMBL
------
Examples of training models on `ChEMBL`_ dataset included in MoleculeNet.
ChEMBL is a manually curated database of bioactive molecules with drug-like properties.
It brings together chemical, bioactivity and genomic data to aid the translation
of genomic information into effective new drugs.
.. _`ChEMBL`: https://www.ebi.ac.uk/chembl
MultitaskRegressor
^^^^^^^^^^^^^^^^^^
.. doctest:: chembl
>>> seed_all()
>>> # Load ChEMBL 5thresh dataset with random splitting
>>> chembl_tasks, datasets, transformers = dc.molnet.load_chembl(
... shard_size=2000, featurizer="ECFP", set="5thresh", split="random")
>>> train_dataset, valid_dataset, test_dataset = datasets
>>> len(chembl_tasks)
691
>>> f'Compound train/valid/test split: {len(train_dataset)}/{len(valid_dataset)}/{len(test_dataset)}'
'Compound train/valid/test split: 19096/2387/2388'
>>>
>>> # We want to know the RMS, averaged across tasks
>>> avg_rms = dc.metrics.Metric(dc.metrics.rms_score, np.mean)
>>>
>>> # Create our model
>>> n_layers = 3
>>> model = dc.models.MultitaskRegressor(
... len(chembl_tasks),
... n_features=1024,
... layer_sizes=[1000] * n_layers,
... dropouts=[.25] * n_layers,
... weight_init_stddevs=[.02] * n_layers,
... bias_init_consts=[1.] * n_layers,
... learning_rate=.0003,
... weight_decay_penalty=.0001,
... batch_size=100)
>>>
>>> model.fit(train_dataset, nb_epoch=5)
0...
>>>
>>> # We now evaluate our fitted model on our training and validation sets
>>> train_scores = model.evaluate(train_dataset, [avg_rms], transformers)
>>> assert train_scores['mean-rms_score'] < 10.00
>>>
>>> valid_scores = model.evaluate(valid_dataset, [avg_rms], transformers)
>>> assert valid_scores['mean-rms_score'] < 10.00
GraphConvModel
^^^^^^^^^^^^^^
.. doctest:: chembl
>>> # Load ChEMBL dataset
>>> chembl_tasks, datasets, transformers = dc.molnet.load_chembl(
... shard_size=2000, featurizer="GraphConv", set="5thresh", split="random")
>>> train_dataset, valid_dataset, test_dataset = datasets
>>>
>>> # RMS, averaged across tasks
>>> avg_rms = dc.metrics.Metric(dc.metrics.rms_score, np.mean)
>>>
>>> model = dc.models.GraphConvModel(
... len(chembl_tasks), batch_size=128, mode='regression')
>>>
>>> # Fit trained model
>>> model.fit(train_dataset, nb_epoch=5)
0...
>>>
>>> # We now evaluate our fitted model on our training and validation sets
>>> train_scores = model.evaluate(train_dataset, [avg_rms], transformers)
>>> assert train_scores['mean-rms_score'] < 10.00
>>>
>>> valid_scores = model.evaluate(valid_dataset, [avg_rms], transformers)
>>> assert valid_scores['mean-rms_score'] < 10.00
<file_sep>"""Utilities for handling PDBQT files."""
from typing import Dict, List, Optional, Set, Tuple
from deepchem.utils.typing import RDKitMol
def pdbqt_to_pdb(filename: Optional[str] = None,
pdbqt_data: Optional[List[str]] = None) -> str:
"""Extracts the PDB part of a pdbqt file as a string.
Either `filename` or `pdbqt_data` must be provided. This function
strips PDBQT charge information from the provided input.
Parameters
----------
filename: str, optional (default None)
Filename of PDBQT file
pdbqt_data: List[str], optional (default None)
Raw list of lines containing data from PDBQT file.
Returns
-------
pdb_block: str
String containing the PDB portion of pdbqt file.
"""
if filename is not None and pdbqt_data is not None:
raise ValueError("Only one of filename or pdbqt_data can be provided")
elif filename is None and pdbqt_data is None:
raise ValueError("Either filename or pdbqt_data must be provided")
elif filename is not None:
pdbqt_data = open(filename).readlines()
pdb_block = ""
# FIXME: Item "None" of "Optional[List[str]]" has no attribute "__iter__" (not iterable)
for line in pdbqt_data: # type: ignore
pdb_block += "%s\n" % line[:66]
return pdb_block
def convert_protein_to_pdbqt(mol: RDKitMol, outfile: str) -> None:
"""Convert a protein PDB file into a pdbqt file.
Writes the extra PDBQT terms directly to `outfile`.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
Protein molecule
outfile: str
filename which already has a valid pdb representation of mol
"""
lines = [x.strip() for x in open(outfile).readlines()]
out_lines = []
for line in lines:
if "ROOT" in line or "ENDROOT" in line or "TORSDOF" in line:
out_lines.append("%s\n" % line)
continue
if not line.startswith("ATOM"):
continue
line = line[:66]
atom_index = int(line[6:11])
atom = mol.GetAtoms()[atom_index - 1]
line = "%s +0.000 %s\n" % (line, atom.GetSymbol().ljust(2))
out_lines.append(line)
with open(outfile, 'w') as fout:
for line in out_lines:
fout.write(line)
def _mol_to_graph(mol: RDKitMol):
"""Convert RDKit Mol to NetworkX graph
Convert mol into a graph representation atoms are nodes, and bonds
are vertices stored as graph
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule to convert into a graph.
Returns
-------
graph: networkx.Graph
Contains atoms indices as nodes, edges as bonds.
Notes
-----
This function requires NetworkX to be installed.
"""
try:
import networkx as nx
except ModuleNotFoundError:
raise ImportError("This function requires NetworkX to be installed.")
G = nx.Graph()
num_atoms = mol.GetNumAtoms()
G.add_nodes_from(range(num_atoms))
for i in range(mol.GetNumBonds()):
from_idx = mol.GetBonds()[i].GetBeginAtomIdx()
to_idx = mol.GetBonds()[i].GetEndAtomIdx()
G.add_edge(from_idx, to_idx)
return G
def _get_rotatable_bonds(mol: RDKitMol) -> List[Tuple[int, int]]:
"""
https://github.com/rdkit/rdkit/blob/f4529c910e546af590c56eba01f96e9015c269a6/Code/GraphMol/Descriptors/Lipinski.cpp#L107
Taken from rdkit source to find which bonds are rotatable store
rotatable bonds in (from_atom, to_atom)
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
Ligand molecule
Returns
-------
rotatable_bonds: List[List[int, int]]
List of rotatable bonds in molecule
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
from rdkit.Chem import rdmolops
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
pattern = Chem.MolFromSmarts(
"[!$(*#*)&!D1&!$(C(F)(F)F)&!$(C(Cl)(Cl)Cl)&!$(C(Br)(Br)Br)&!$(C([CH3])("
"[CH3])[CH3])&!$([CD3](=[N,O,S])-!@[#7,O,S!D1])&!$([#7,O,S!D1]-!@[CD3]="
"[N,O,S])&!$([CD3](=[N+])-!@[#7!D1])&!$([#7!D1]-!@[CD3]=[N+])]-!@[!$(*#"
"*)&!D1&!$(C(F)(F)F)&!$(C(Cl)(Cl)Cl)&!$(C(Br)(Br)Br)&!$(C([CH3])([CH3])"
"[CH3])]")
rdmolops.FastFindRings(mol)
rotatable_bonds = mol.GetSubstructMatches(pattern)
return rotatable_bonds
def convert_mol_to_pdbqt(mol: RDKitMol, outfile: str) -> None:
"""Writes the provided ligand molecule to specified file in pdbqt format.
Creates a torsion tree and write to pdbqt file. The torsion tree
represents rotatable bonds in the molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule whose value is stored in pdb format in outfile
outfile: str
Filename for a valid pdb file with the extention .pdbqt
Notes
-----
This function requires NetworkX to be installed.
"""
try:
import networkx as nx
except ModuleNotFoundError:
raise ImportError("This function requires NetworkX to be installed.")
# Walk through the original file and extract ATOM/HETATM lines and
# add PDBQT charge annotations.
pdb_map = _create_pdb_map(outfile)
graph = _mol_to_graph(mol)
rotatable_bonds = _get_rotatable_bonds(mol)
# Remove rotatable bonds from this molecule
for bond in rotatable_bonds:
graph.remove_edge(bond[0], bond[1])
# Get the connected components now that the rotatable bonds have
# been removed.
components = [x for x in nx.connected_components(graph)]
comp_map = _create_component_map(mol, components)
used_partitions = set()
lines = []
# The root is the largest connected component.
root = max(enumerate(components), key=lambda x: len(x[1]))[0]
# Write the root component
lines.append("ROOT\n")
for atom in components[root]:
lines.append(pdb_map[atom])
lines.append("ENDROOT\n")
# We've looked at the root, so take note of that
used_partitions.add(root)
for bond in rotatable_bonds:
valid, next_partition = _valid_bond(used_partitions, bond, root,
comp_map)
if not valid:
continue
_dfs(used_partitions, next_partition, bond, components, rotatable_bonds,
lines, pdb_map, comp_map)
lines.append("TORSDOF %s" % len(rotatable_bonds))
with open(outfile, 'w') as fout:
for line in lines:
fout.write(line)
def _create_pdb_map(outfile: str) -> Dict[int, str]:
"""Create a mapping from atom numbers to lines to write to pdbqt
This is a map from rdkit atom number to its line in the pdb
file. We also add the two additional columns required for
pdbqt (charge, symbol).
Note rdkit atoms are 0 indexed and pdb files are 1 indexed
Parameters
----------
outfile: str
filename which already has a valid pdb representation of mol
Returns
-------
pdb_map: Dict[int, str]
Maps rdkit atom numbers to lines to be written to PDBQT file.
"""
lines = [x.strip() for x in open(outfile).readlines()]
lines = list(
filter(lambda x: x.startswith("HETATM") or x.startswith("ATOM"), lines))
lines = [x[:66] for x in lines]
pdb_map = {}
for line in lines:
my_values = line.split()
atom_number = int(my_values[1])
atom_symbol = my_values[2]
atom_symbol = ''.join([i for i in atom_symbol if not i.isdigit()])
line = line.replace("HETATM", "ATOM ")
line = "%s +0.000 %s\n" % (line, atom_symbol.ljust(2))
pdb_map[atom_number - 1] = line
return pdb_map
def _create_component_map(mol: RDKitMol,
components: List[List[int]]) -> Dict[int, int]:
"""Creates a map from atom ids to disconnected component id
For each atom in `mol`, maps it to the id of the component in the
molecule. The intent is that this is used on a molecule whose
rotatable bonds have been removed. `components` is a list of the
connected components after this surgery.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule to find disconnected components in
components: List[List[int]]
List of connected components
Returns
-------
comp_map: Dict[int, int]
Maps atom ids to component ides
"""
comp_map = {}
for i in range(mol.GetNumAtoms()):
for j in range(len(components)):
if i in components[j]:
comp_map[i] = j
break
return comp_map
def _dfs(used_partitions: Set[int], current_partition: int,
bond: Tuple[int, int], components: List[List[int]],
rotatable_bonds: List[Tuple[int, int]], lines: List[str],
pdb_map: Dict[int, str], comp_map: Dict[int, int]) -> List[str]:
"""
This function does a depth first search through the torsion tree
Parameters
----------
used_partions: Set[int]
Partitions which have already been used
current_partition: int
The current partition to expand
bond: Tuple[int, int]
the bond which goes from the previous partition into this partition
components: List[List[int]]
List of connected components
rotatable_bonds: List[Tuple[int, int]]
List of rotatable bonds. This tuple is (from_atom, to_atom).
lines: List[str]
List of lines to write
pdb_map: Dict[int, str]
Maps atom numbers to PDBQT lines to write
comp_map: Dict[int, int]
Maps atom numbers to component numbers
Returns
-------
lines: List[str]
List of lines to write. This has more appended lines.
"""
if comp_map[bond[1]] != current_partition:
bond = (bond[1], bond[0])
used_partitions.add(comp_map[bond[0]])
used_partitions.add(comp_map[bond[1]])
lines.append("BRANCH %4s %4s\n" % (bond[0] + 1, bond[1] + 1))
for atom in components[current_partition]:
lines.append(pdb_map[atom])
for b in rotatable_bonds:
valid, next_partition = \
_valid_bond(used_partitions, b, current_partition, comp_map)
if not valid:
continue
lines = _dfs(used_partitions, next_partition, b, components,
rotatable_bonds, lines, pdb_map, comp_map)
lines.append("ENDBRANCH %4s %4s\n" % (bond[0] + 1, bond[1] + 1))
return lines
def _valid_bond(used_partitions: Set[int], bond: Tuple[int, int],
current_partition: int,
comp_map: Dict[int, int]) -> Tuple[bool, int]:
"""Helper method to find next partition to explore.
Used to check if a bond goes from the current partition into a
partition that is not yet explored
Parameters
----------
used_partions: Set[int]
Partitions which have already been used
bond: Tuple[int, int]
The bond to check if it goes to an unexplored partition.
This tuple is (from_atom, to_atom).
current_partition: int
The current partition of the DFS
comp_map: Dict[int, int]
Maps atom ids to component ids
Returns
-------
is_valid: bool
Whether to exist the next partition or not
next_partition: int
The next partition to explore
"""
part1 = comp_map[bond[0]]
part2 = comp_map[bond[1]]
if part1 != current_partition and part2 != current_partition:
return False, 0
if part1 == current_partition:
next_partition = part2
else:
next_partition = part1
return next_partition not in used_partitions, next_partition
<file_sep># -*- coding: utf-8 -*-
"""
Created on Mon Mar 06 14:25:40 2017
@author: <NAME>
"""
'''
import os
import time
import csv
import numpy as np
import tensorflow as tf
import deepchem
from deepchem.molnet.run_benchmark_models import low_data_benchmark_classification
from deepchem.molnet.check_availability import CheckFeaturizer
def run_benchmark_low_data(datasets,
model,
split='task',
metric=None,
featurizer=None,
n_features=0,
out_path='.',
K=4,
hyper_parameters=None,
cross_valid=False,
seed=123):
"""
Run low data benchmark test on designated datasets
with deepchem(or user-defined) model
Parameters
----------
datasets: list of string
choice of which datasets to use, should be: muv, tox21, sider
model: string or user-defined model stucture
choice of which model to use, should be: siamese, attn, res
split: string, optional (default='task')
choice of splitter function, only task splitter supported
metric: string, optional (default=None)
choice of evaluation metrics, None = using the default metrics(AUC)
featurizer: string or dc.feat.Featurizer, optional (default=None)
choice of featurization, None = using the default corresponding to model
(string only applicable to deepchem models)
n_features: int, optional(default=0)
depending on featurizers, redefined when using deepchem featurizers,
need to be specified for user-defined featurizers(if using deepchem models)
out_path: string, optional(default='.')
path of result file
K: int, optional(default=4)
K-fold splitting of datasets
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
cross_valid: boolean, optional(default=False)
whether to cross validate
"""
for dataset in datasets:
if dataset in ['muv', 'sider', 'tox21']:
mode = 'classification'
if metric == None:
metric = str('auc')
else:
raise ValueError('Dataset not supported')
metric_all = {
'auc': deepchem.metrics.Metric(deepchem.metrics.roc_auc_score, np.mean)
}
if isinstance(metric, str):
metric = metric_all[metric]
if featurizer == None and isinstance(model, str):
# Assigning featurizer if not user defined
pair = (dataset, model)
if pair in CheckFeaturizer:
featurizer = CheckFeaturizer[pair][0]
n_features = CheckFeaturizer[pair][1]
else:
continue
loading_functions = {
'muv': deepchem.molnet.load_muv,
'sider': deepchem.molnet.load_sider,
'tox21': deepchem.molnet.load_tox21
}
assert split == 'task'
print('-------------------------------------')
print('Benchmark on dataset: %s' % dataset)
print('-------------------------------------')
# loading datasets
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, split=split, K=K)
if cross_valid:
num_iter = K # K iterations for cross validation
else:
num_iter = 1
for count_iter in range(num_iter):
# Assembling train and valid datasets
train_folds = all_dataset[:K - count_iter - 1] + all_dataset[K -
count_iter:]
train_dataset = deepchem.splits.merge_fold_datasets(train_folds)
valid_dataset = all_dataset[K - count_iter - 1]
time_start_fitting = time.time()
train_score = {}
valid_score = {}
if isinstance(model, str):
if mode == 'classification':
valid_score = low_data_benchmark_classification(
train_dataset,
valid_dataset,
n_features,
metric,
model=model,
hyper_parameters=hyper_parameters,
seed=seed)
else:
model.fit(train_dataset)
valid_score['user_defined'] = model.evaluate(valid_dataset, metric,
transformers)
time_finish_fitting = time.time()
with open(os.path.join(out_path, 'results.csv'), 'a') as f:
writer = csv.writer(f)
for i in valid_score:
output_line = [dataset, str(split), mode, 'valid', i]
for task in valid_score[i][0]:
output_line.extend(
[task, valid_score[i][0][task], valid_score[i][1][task]])
output_line.extend(
['time_for_running', time_finish_fitting - time_start_fitting])
writer.writerow(output_line)
'''
<file_sep>"""
Utilities for constructing node features or bond features.
Some functions are based on chainer-chemistry or dgl-lifesci.
Repositories:
- https://github.com/chainer/chainer-chemistry
- https://github.com/awslabs/dgl-lifesci
"""
import os
import logging
from typing import List, Union, Tuple
import numpy as np
from deepchem.utils.typing import RDKitAtom, RDKitBond, RDKitMol
logger = logging.getLogger(__name__)
DEFAULT_ATOM_TYPE_SET = [
"C",
"N",
"O",
"F",
"P",
"S",
"Cl",
"Br",
"I",
]
DEFAULT_HYBRIDIZATION_SET = ["SP", "SP2", "SP3"]
DEFAULT_TOTAL_NUM_Hs_SET = [0, 1, 2, 3, 4]
DEFAULT_FORMAL_CHARGE_SET = [-2, -1, 0, 1, 2]
DEFAULT_TOTAL_DEGREE_SET = [0, 1, 2, 3, 4, 5]
DEFAULT_RING_SIZE_SET = [3, 4, 5, 6, 7, 8]
DEFAULT_BOND_TYPE_SET = ["SINGLE", "DOUBLE", "TRIPLE", "AROMATIC"]
DEFAULT_BOND_STEREO_SET = ["STEREONONE", "STEREOANY", "STEREOZ", "STEREOE"]
DEFAULT_GRAPH_DISTANCE_SET = [1, 2, 3, 4, 5, 6, 7]
DEFAULT_ATOM_IMPLICIT_VALENCE_SET = [0, 1, 2, 3, 4, 5, 6]
DEFAULT_ATOM_EXPLICIT_VALENCE_SET = [1, 2, 3, 4, 5, 6]
ALLEN_ELECTRONEGATIVTY = { # Allen scale electronegativity
'H': 2.3,
'He': 4.160,
'Li': 0.912,
'Be': 1.576,
'B': 2.051,
'C': 2.544,
'N': 3.066,
'O': 3.610,
'F': 4.193,
'Ne': 4.787,
'Na': 0.869,
'Mg': 1.293,
'Al': 1.613,
'Si': 1.916,
'P': 2.253,
'S': 2.589,
'Cl': 2.869,
'Ar': 3.242,
'K': 0.734,
'Ca': 1.034,
'Sc': 1.19,
'Ti': 1.38,
'V': 1.53,
'Cr': 1.65,
'Mn': 1.75,
'Fe': 1.80,
'Co': 1.84,
'Ni': 1.88,
'Cu': 1.85,
'Zn': 1.588,
'Ga': 1.756,
'Ge': 1.994,
'As': 2.211,
'Se': 2.424,
'Br': 2.685,
'Kr': 2.966,
'Rb': 0.706,
'Sr': 0.963,
'Y': 1.12,
'Zr': 1.32,
'Nb': 1.41,
'Mo': 1.47,
'Tc': 1.51,
'Ru': 1.54,
'Rh': 1.56,
'Pd': 1.58,
'Ag': 1.87,
'Cd': 1.521,
'In': 1.656,
'Sn': 1.824,
'Sb': 1.984,
'Te': 2.158,
'I': 2.359,
'Xe': 2.582,
'Cs': 0.659,
'Ba': 0.881,
'Lu': 1.09,
'Hf': 1.16,
'Ta': 1.34,
'W': 1.47,
'Re': 1.60,
'Os': 1.65,
'Ir': 1.68,
'Pt': 1.72,
'Au': 1.92,
'Hg': 1.765,
'Tl': 1.789,
'Pb': 1.854,
'Bi': 2.01,
'Po': 2.19,
'At': 2.39,
'Rn': 2.60,
'Fr': 0.67,
'Ra': 0.89
}
class _ChemicalFeaturesFactory:
"""This is a singleton class for RDKit base features."""
_instance = None
@classmethod
def get_instance(cls):
try:
from rdkit import RDConfig
from rdkit.Chem import ChemicalFeatures
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if not cls._instance:
fdefName = os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
cls._instance = ChemicalFeatures.BuildFeatureFactory(fdefName)
return cls._instance
def one_hot_encode(val: Union[int, str],
allowable_set: Union[List[str], List[int]],
include_unknown_set: bool = False) -> List[float]:
"""One hot encoder for elements of a provided set.
Examples
--------
>>> one_hot_encode("a", ["a", "b", "c"])
[1.0, 0.0, 0.0]
>>> one_hot_encode(2, [0, 1, 2])
[0.0, 0.0, 1.0]
>>> one_hot_encode(3, [0, 1, 2])
[0.0, 0.0, 0.0]
>>> one_hot_encode(3, [0, 1, 2], True)
[0.0, 0.0, 0.0, 1.0]
Parameters
----------
val: int or str
The value must be present in `allowable_set`.
allowable_set: List[int] or List[str]
List of allowable quantities.
include_unknown_set: bool, default False
If true, the index of all values not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
An one-hot vector of val.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
Raises
------
ValueError
If include_unknown_set is False and `val` is not in `allowable_set`.
"""
if include_unknown_set is False:
if val not in allowable_set:
logger.info("input {0} not in allowable set {1}:".format(
val, allowable_set))
# init an one-hot vector
if include_unknown_set is False:
one_hot_legnth = len(allowable_set)
else:
one_hot_legnth = len(allowable_set) + 1
one_hot = [0.0 for _ in range(one_hot_legnth)]
try:
one_hot[allowable_set.index(val)] = 1.0 # type: ignore
except:
if include_unknown_set:
# If include_unknown_set is True, set the last index is 1.
one_hot[-1] = 1.0
else:
pass
return one_hot
#################################################################
# atom (node) featurization
#################################################################
def get_atom_type_one_hot(atom: RDKitAtom,
allowable_set: List[str] = DEFAULT_ATOM_TYPE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of an atom type.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[str]
The atom types to consider. The default set is
`["C", "N", "O", "F", "P", "S", "Cl", "Br", "I"]`.
include_unknown_set: bool, default True
If true, the index of all atom not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
An one-hot vector of atom types.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetSymbol(), allowable_set, include_unknown_set)
def construct_hydrogen_bonding_info(mol: RDKitMol) -> List[Tuple[int, str]]:
"""Construct hydrogen bonding infos about a molecule.
Parameters
---------
mol: rdkit.Chem.rdchem.Mol
RDKit mol object
Returns
-------
List[Tuple[int, str]]
A list of tuple `(atom_index, hydrogen_bonding_type)`.
The `hydrogen_bonding_type` value is "Acceptor" or "Donor".
"""
factory = _ChemicalFeaturesFactory.get_instance()
feats = factory.GetFeaturesForMol(mol)
hydrogen_bonding = []
for f in feats:
hydrogen_bonding.append((f.GetAtomIds()[0], f.GetFamily()))
return hydrogen_bonding
def get_atom_hydrogen_bonding_one_hot(
atom: RDKitAtom, hydrogen_bonding: List[Tuple[int,
str]]) -> List[float]:
"""Get an one-hot feat about whether an atom accepts electrons or donates electrons.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
hydrogen_bonding: List[Tuple[int, str]]
The return value of `construct_hydrogen_bonding_info`.
The value is a list of tuple `(atom_index, hydrogen_bonding)` like (1, "Acceptor").
Returns
-------
List[float]
A one-hot vector of the ring size type. The first element
indicates "Donor", and the second element indicates "Acceptor".
"""
one_hot = [0.0, 0.0]
atom_idx = atom.GetIdx()
for hydrogen_bonding_tuple in hydrogen_bonding:
if hydrogen_bonding_tuple[0] == atom_idx:
if hydrogen_bonding_tuple[1] == "Donor":
one_hot[0] = 1.0
elif hydrogen_bonding_tuple[1] == "Acceptor":
one_hot[1] = 1.0
return one_hot
def get_atom_is_in_aromatic_one_hot(atom: RDKitAtom) -> List[float]:
"""Get ans one-hot feature about whether an atom is in aromatic system or not.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
Returns
-------
List[float]
A vector of whether an atom is in aromatic system or not.
"""
return [float(atom.GetIsAromatic())]
def get_atom_hybridization_one_hot(
atom: RDKitAtom,
allowable_set: List[str] = DEFAULT_HYBRIDIZATION_SET,
include_unknown_set: bool = False) -> List[float]:
"""Get an one-hot feature of hybridization type.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[str]
The hybridization types to consider. The default set is `["SP", "SP2", "SP3"]`
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
An one-hot vector of the hybridization type.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(str(atom.GetHybridization()), allowable_set,
include_unknown_set)
def get_atom_total_num_Hs_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_TOTAL_NUM_Hs_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of the number of hydrogens which an atom has.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
The number of hydrogens to consider. The default set is `[0, 1, ..., 4]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the number of hydrogens which an atom has.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetTotalNumHs(), allowable_set,
include_unknown_set)
def get_atom_chirality_one_hot(atom: RDKitAtom) -> List[float]:
"""Get an one-hot feature about an atom chirality type.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
Returns
-------
List[float]
A one-hot vector of the chirality type. The first element
indicates "R", and the second element indicates "S".
"""
one_hot = [0.0, 0.0]
try:
chiral_type = atom.GetProp('_CIPCode')
if chiral_type == "R":
one_hot[0] = 1.0
elif chiral_type == "S":
one_hot[1] = 1.0
except:
pass
return one_hot
def get_atom_formal_charge(atom: RDKitAtom) -> List[float]:
"""Get a formal charge of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
Returns
-------
List[float]
A vector of the formal charge.
"""
return [float(atom.GetFormalCharge())]
def get_atom_formal_charge_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_FORMAL_CHARGE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get one hot encoding of formal charge of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
The degree to consider. The default set is `[-2, -1, ..., 2]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A vector of the formal charge.
"""
return one_hot_encode(atom.GetFormalCharge(), allowable_set,
include_unknown_set)
def get_atom_partial_charge(atom: RDKitAtom) -> List[float]:
"""Get a partial charge of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
Returns
-------
List[float]
A vector of the parital charge.
Notes
-----
Before using this function, you must calculate `GasteigerCharge`
like `AllChem.ComputeGasteigerCharges(mol)`.
"""
gasteiger_charge = atom.GetProp('_GasteigerCharge')
if gasteiger_charge in ['-nan', 'nan', '-inf', 'inf']:
gasteiger_charge = 0.0
return [float(gasteiger_charge)]
def get_atom_total_degree_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_TOTAL_DEGREE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of the degree which an atom has.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
The degree to consider. The default set is `[0, 1, ..., 5]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the degree which an atom has.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetTotalDegree(), allowable_set,
include_unknown_set)
def get_atom_implicit_valence_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_ATOM_IMPLICIT_VALENCE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of implicit valence of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
Atom implicit valence to consider. The default set is `[0, 1, ..., 6]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of implicit valence an atom has.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetImplicitValence(), allowable_set,
include_unknown_set)
def get_atom_explicit_valence_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_ATOM_EXPLICIT_VALENCE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of explicit valence of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
Atom explicit valence to consider. The default set is `[1, ..., 6]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of explicit valence an atom has.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetExplicitValence(), allowable_set,
include_unknown_set)
#################################################################
# bond (edge) featurization
#################################################################
def get_bond_type_one_hot(bond: RDKitBond,
allowable_set: List[str] = DEFAULT_BOND_TYPE_SET,
include_unknown_set: bool = False) -> List[float]:
"""Get an one-hot feature of bond type.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
allowable_set: List[str]
The bond types to consider. The default set is `["SINGLE", "DOUBLE", "TRIPLE", "AROMATIC"]`.
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the bond type.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(str(bond.GetBondType()), allowable_set,
include_unknown_set)
def get_bond_is_in_same_ring_one_hot(bond: RDKitBond) -> List[float]:
"""Get an one-hot feature about whether atoms of a bond is in the same ring or not.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
Returns
-------
List[float]
A one-hot vector of whether a bond is in the same ring or not.
"""
return [int(bond.IsInRing())]
def get_bond_is_conjugated_one_hot(bond: RDKitBond) -> List[float]:
"""Get an one-hot feature about whether a bond is conjugated or not.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
Returns
-------
List[float]
A one-hot vector of whether a bond is conjugated or not.
"""
return [int(bond.GetIsConjugated())]
def get_bond_stereo_one_hot(bond: RDKitBond,
allowable_set: List[str] = DEFAULT_BOND_STEREO_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of the stereo configuration of a bond.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
allowable_set: List[str]
The stereo configuration types to consider.
The default set is `["STEREONONE", "STEREOANY", "STEREOZ", "STEREOE"]`.
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the stereo configuration of a bond.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(str(bond.GetStereo()), allowable_set,
include_unknown_set)
def get_bond_graph_distance_one_hot(
bond: RDKitBond,
graph_dist_matrix: np.ndarray,
allowable_set: List[int] = DEFAULT_GRAPH_DISTANCE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of graph distance.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
graph_dist_matrix: np.ndarray
The return value of `Chem.GetDistanceMatrix(mol)`. The shape is `(num_atoms, num_atoms)`.
allowable_set: List[int]
The graph distance types to consider. The default set is `[1, 2, ..., 7]`.
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the graph distance.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
graph_dist = graph_dist_matrix[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]
return one_hot_encode(graph_dist, allowable_set, include_unknown_set)
<file_sep>"""
This is a sample implementation for working DGL with DeepChem!
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class CGCNNLayer(nn.Module):
"""The convolutional layer of CGCNN.
This class was implemented using DGLGraph methods.
Please confirm how to use DGLGraph methods from below link.
See: https://docs.dgl.ai/en/0.4.x/tutorials/models/1_gnn/9_gat.html
Examples
--------
>>> import deepchem as dc
>>> from pymatgen.core import Lattice, Structure
>>> lattice = Lattice.cubic(4.2)
>>> structure = Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
>>> featurizer = dc.feat.CGCNNFeaturizer()
>>> cgcnn_graph = featurizer.featurize([structure])[0]
>>> cgcnn_graph.num_node_features
92
>>> cgcnn_graph.num_edge_features
41
>>> cgcnn_dgl_graph = cgcnn_graph.to_dgl_graph()
>>> print(type(cgcnn_dgl_graph))
<class 'dgl.heterograph.DGLHeteroGraph'>
>>> layer = CGCNNLayer(hidden_node_dim=92, edge_dim=41)
>>> node_feats = cgcnn_dgl_graph.ndata.pop('x')
>>> edge_feats = cgcnn_dgl_graph.edata.pop('edge_attr')
>>> new_node_feats, new_edge_feats = layer(cgcnn_dgl_graph, node_feats, edge_feats)
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(self,
hidden_node_dim: int,
edge_dim: int,
batch_norm: bool = True):
"""
Parameters
----------
hidden_node_dim: int
The length of the hidden node feature vectors.
edge_dim: int
The length of the edge feature vectors.
batch_norm: bool, default True
Whether to apply batch normalization or not.
"""
super(CGCNNLayer, self).__init__()
z_dim = 2 * hidden_node_dim + edge_dim
liner_out_dim = 2 * hidden_node_dim
self.linear = nn.Linear(z_dim, liner_out_dim)
self.batch_norm = nn.BatchNorm1d(liner_out_dim) if batch_norm else None
def message_func(self, edges):
z = torch.cat([edges.src['x'], edges.dst['x'], edges.data['edge_attr']],
dim=1)
z = self.linear(z)
if self.batch_norm is not None:
z = self.batch_norm(z)
gated_z, message_z = z.chunk(2, dim=1)
gated_z = torch.sigmoid(gated_z)
message_z = F.softplus(message_z)
return {'message': gated_z * message_z}
def reduce_func(self, nodes):
nbr_sumed = torch.sum(nodes.mailbox['message'], dim=1)
new_x = F.softplus(nodes.data['x'] + nbr_sumed)
return {'new_x': new_x}
def forward(self, dgl_graph, node_feats, edge_feats):
"""Update node representations.
Parameters
----------
dgl_graph: DGLGraph
DGLGraph for a batch of graphs.
node_feats: torch.Tensor
The node features. The shape is `(N, hidden_node_dim)`.
edge_feats: torch.Tensor
The edge features. The shape is `(N, hidden_node_dim)`.
Returns
-------
node_feats: torch.Tensor
The updated node features. The shape is `(N, hidden_node_dim)`.
"""
dgl_graph.ndata['x'] = node_feats
dgl_graph.edata['edge_attr'] = edge_feats
dgl_graph.update_all(self.message_func, self.reduce_func)
node_feats = dgl_graph.ndata.pop('new_x')
return node_feats
class CGCNN(nn.Module):
"""Crystal Graph Convolutional Neural Network (CGCNN).
This model takes arbitary crystal structures as an input, and predict material properties
using the element information and connection of atoms in the crystal. If you want to get
some material properties which has a high computational cost like band gap in the case
of DFT, this model may be useful. This model is one of variants of Graph Convolutional
Networks. The main differences between other GCN models are how to construct graphs and
how to update node representations. This model defines the crystal graph from structures
using distances between atoms. The crystal graph is an undirected multigraph which is defined
by nodes representing atom properties and edges representing connections between atoms
in a crystal. And, this model updates the node representations using both neighbor node
and edge representations. Please confirm the detail algorithms from [1]_.
Examples
--------
>>> import deepchem as dc
>>> from pymatgen.core import Lattice, Structure
>>> lattice = Lattice.cubic(4.2)
>>> structure = Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
>>> featurizer = dc.feat.CGCNNFeaturizer()
>>> cgcnn_feat = featurizer.featurize([structure])[0]
>>> print(type(cgcnn_feat))
<class 'deepchem.feat.graph_data.GraphData'>
>>> cgcnn_dgl_feat = cgcnn_feat.to_dgl_graph()
>>> print(type(cgcnn_dgl_feat))
<class 'dgl.heterograph.DGLHeteroGraph'>
>>> model = dc.models.CGCNN(mode='regression', n_tasks=2)
>>> out = model(cgcnn_dgl_feat)
>>> print(type(out))
<class 'torch.Tensor'>
>>> out.shape == (1, 2)
True
References
----------
.. [1] <NAME>, and <NAME>. "Crystal graph convolutional neural networks
for an accurate and interpretable prediction of material properties." Physical review letters
120.14 (2018): 145301.
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(
self,
in_node_dim: int = 92,
hidden_node_dim: int = 64,
in_edge_dim: int = 41,
num_conv: int = 3,
predictor_hidden_feats: int = 128,
n_tasks: int = 1,
mode: str = 'regression',
n_classes: int = 2,
):
"""
Parameters
----------
in_node_dim: int, default 92
The length of the initial node feature vectors. The 92 is
based on length of vectors in the atom_init.json.
hidden_node_dim: int, default 64
The length of the hidden node feature vectors.
in_edge_dim: int, default 41
The length of the initial edge feature vectors. The 41 is
based on default setting of CGCNNFeaturizer.
num_conv: int, default 3
The number of convolutional layers.
predictor_hidden_feats: int, default 128
The size for hidden representations in the output MLP predictor.
n_tasks: int, default 1
The number of the output size.
mode: str, default 'regression'
The model type, 'classification' or 'regression'.
n_classes: int, default 2
The number of classes to predict (only used in classification mode).
"""
try:
import dgl
except:
raise ImportError("This class requires DGL to be installed.")
super(CGCNN, self).__init__()
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.embedding = nn.Linear(in_node_dim, hidden_node_dim)
self.conv_layers = nn.ModuleList([
CGCNNLayer(hidden_node_dim=hidden_node_dim,
edge_dim=in_edge_dim,
batch_norm=True) for _ in range(num_conv)
])
self.pooling = dgl.mean_nodes
self.fc = nn.Linear(hidden_node_dim, predictor_hidden_feats)
if self.mode == 'regression':
self.out = nn.Linear(predictor_hidden_feats, n_tasks)
else:
self.out = nn.Linear(predictor_hidden_feats, n_tasks * n_classes)
def forward(self, dgl_graph):
"""Predict labels
Parameters
----------
dgl_graph: DGLGraph
DGLGraph for a batch of graphs. The graph expects that the node features
are stored in `ndata['x']`, and the edge features are stored in `edata['edge_attr']`.
Returns
-------
out: torch.Tensor
The output values of this model.
If mode == 'regression', the shape is `(batch_size, n_tasks)`.
If mode == 'classification', the shape is `(batch_size, n_tasks, n_classes)` (n_tasks > 1)
or `(batch_size, n_classes)` (n_tasks == 1) and the output values are probabilities of each class label.
"""
graph = dgl_graph
# embedding node features
node_feats = graph.ndata.pop('x')
edge_feats = graph.edata.pop('edge_attr')
node_feats = self.embedding(node_feats)
# convolutional layer
for conv in self.conv_layers:
node_feats = conv(graph, node_feats, edge_feats)
# pooling
graph.ndata['updated_x'] = node_feats
graph_feat = F.softplus(self.pooling(graph, 'updated_x'))
graph_feat = F.softplus(self.fc(graph_feat))
out = self.out(graph_feat)
if self.mode == 'regression':
return out
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
# for n_tasks == 1 case
logits = torch.squeeze(logits)
proba = F.softmax(logits)
return proba, logits
class CGCNNModel(TorchModel):
"""Crystal Graph Convolutional Neural Network (CGCNN).
Here is a simple example of code that uses the CGCNNModel with
materials dataset.
Examples
--------
>>> import deepchem as dc
>>> dataset_config = {"reload": False, "featurizer": dc.feat.CGCNNFeaturizer(), "transformers": []}
>>> tasks, datasets, transformers = dc.molnet.load_perovskite(**dataset_config)
>>> train, valid, test = datasets
>>> model = dc.models.CGCNNModel(mode='regression', batch_size=32, learning_rate=0.001)
>>> avg_loss = model.fit(train, nb_epoch=50)
This model takes arbitary crystal structures as an input, and predict material properties
using the element information and connection of atoms in the crystal. If you want to get
some material properties which has a high computational cost like band gap in the case
of DFT, this model may be useful. This model is one of variants of Graph Convolutional
Networks. The main differences between other GCN models are how to construct graphs and
how to update node representations. This model defines the crystal graph from structures
using distances between atoms. The crystal graph is an undirected multigraph which is defined
by nodes representing atom properties and edges representing connections between atoms
in a crystal. And, this model updates the node representations using both neighbor node
and edge representations. Please confirm the detail algorithms from [1]_.
References
----------
.. [1] <NAME>, and <NAME>. "Crystal graph convolutional neural networks
for an accurate and interpretable prediction of material properties." Physical review letters
120.14 (2018): 145301.
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(self,
in_node_dim: int = 92,
hidden_node_dim: int = 64,
in_edge_dim: int = 41,
num_conv: int = 3,
predictor_hidden_feats: int = 128,
n_tasks: int = 1,
mode: str = 'regression',
n_classes: int = 2,
**kwargs):
"""This class accepts all the keyword arguments from TorchModel.
Parameters
----------
in_node_dim: int, default 92
The length of the initial node feature vectors. The 92 is
based on length of vectors in the atom_init.json.
hidden_node_dim: int, default 64
The length of the hidden node feature vectors.
in_edge_dim: int, default 41
The length of the initial edge feature vectors. The 41 is
based on default setting of CGCNNFeaturizer.
num_conv: int, default 3
The number of convolutional layers.
predictor_hidden_feats: int, default 128
The size for hidden representations in the output MLP predictor.
n_tasks: int, default 1
The number of the output size.
mode: str, default 'regression'
The model type, 'classification' or 'regression'.
n_classes: int, default 2
The number of classes to predict (only used in classification mode).
kwargs: Dict
This class accepts all the keyword arguments from TorchModel.
"""
model = CGCNN(in_node_dim, hidden_node_dim, in_edge_dim, num_conv,
predictor_hidden_feats, n_tasks, mode, n_classes)
if mode == "regression":
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(CGCNNModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
def _prepare_batch(self, batch):
"""Create batch data for CGCNN.
Parameters
----------
batch: Tuple
The tuple are `(inputs, labels, weights)`.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: List[torch.Tensor] or None
The labels converted to torch.Tensor
weights: List[torch.Tensor] or None
The weights for each sample or sample/task pair converted to torch.Tensor
"""
try:
import dgl
except:
raise ImportError("This class requires DGL to be installed.")
inputs, labels, weights = batch
dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(CGCNNModel, self)._prepare_batch(
([], labels, weights))
return inputs, labels, weights
<file_sep>"""Monte Carlo tree search algorithm for reinforcement learning."""
from deepchem.models import TensorGraph
from deepchem.models.optimizers import Adam
from deepchem.models.tensorgraph.layers import Feature, Weights, Label, Layer
import numpy as np
import tensorflow as tf
try:
from collections.abc import Sequence as SequenceCollection
except:
from collections import Sequence as SequenceCollection
import copy
import time
class MCTSLoss(Layer):
"""This layer computes the loss function for MCTS."""
def __init__(self, value_weight, **kwargs):
super(MCTSLoss, self).__init__(**kwargs)
self.value_weight = value_weight
def create_tensor(self, **kwargs):
pred_prob, pred_value, search_prob, search_value = [
layer.out_tensor for layer in self.in_layers
]
log_prob = tf.log(pred_prob + np.finfo(np.float32).eps)
probability_loss = -tf.reduce_mean(search_prob * log_prob)
value_loss = tf.reduce_mean(tf.square(pred_value - search_value))
self.out_tensor = probability_loss + self.value_weight * value_loss
self.probability_loss = probability_loss
self.value_loss = value_loss
return self.out_tensor
class MCTS(object):
"""
Implements a Monte Carlo tree search algorithm for reinforcement learning.
This is adapted from Silver et al, "Mastering the game of Go without human
knowledge" (https://www.nature.com/articles/nature24270). The methods
described in that paper rely on features of Go that are not generally true of
all reinforcement learning problems. To transform it into a more generally
useful RL algorithm, it has been necessary to change some aspects of the
method. The overall approach used in this implementation is still the same,
although some of the details differ.
This class requires the policy to output two quantities: a vector giving the
probability of taking each action, and an estimate of the value function for
the current state. At every step of simulating an episode, it performs an
expensive tree search to explore the consequences of many possible actions.
Based on that search, it computes much better estimates for the value function
of the current state and the desired action probabilities. In then tries to
optimize the policy to make its outputs match the result of the tree search.
Optimization proceeds through a series of iterations. Each iteration consists
of two stages:
1. Simulate many episodes. At every step perform a tree search to determine
targets for the probabilities and value function, and store them into a
buffer.
2. Optimize the policy using batches drawn from the buffer generated in step 1.
The tree search involves repeatedly selecting actions starting from the
current state. This is done by using deepcopy() to clone the environment. It
is essential that this produce a deterministic sequence of states: performing
an action on the cloned environment must always lead to the same state as
performing that action on the original environment. For environments whose
state transitions are deterministic, this is not a problem. For ones whose
state transitions are stochastic, it is essential that the random number
generator used to select new states be stored as part of the environment and
be properly cloned by deepcopy().
This class does not support policies that include recurrent layers.
"""
def __init__(self,
env,
policy,
max_search_depth=100,
n_search_episodes=1000,
discount_factor=0.99,
value_weight=1.0,
optimizer=Adam(),
model_dir=None):
"""Create an object for optimizing a policy.
Parameters
----------
env: Environment
the Environment to interact with
policy: Policy
the Policy to optimize. Its create_layers() method must return a dict containing the
keys 'action_prob' and 'value', corresponding to the action probabilities and value estimate
max_search_depth: int
the maximum depth of the tree search, measured in steps
n_search_episodes: int
the number of episodes to simulate (up to max_search_depth, if they do not
terminate first) for each tree search
discount_factor: float
the discount factor to use when computing rewards
value_weight: float
a scale factor for the value loss term in the loss function
optimizer: Optimizer
the optimizer to use
model_dir: str
the directory in which the model will be saved. If None, a temporary directory will be created.
"""
self._env = copy.deepcopy(env)
self._policy = policy
self.max_search_depth = max_search_depth
self.n_search_episodes = n_search_episodes
self.discount_factor = discount_factor
self.value_weight = value_weight
self._state_is_list = isinstance(env.state_shape[0], SequenceCollection)
if optimizer is None:
self._optimizer = Adam(learning_rate=0.001, beta1=0.9, beta2=0.999)
else:
self._optimizer = optimizer
(self._graph, self._features, self._pred_prob, self._pred_value,
self._search_prob, self._search_value) = self._build_graph(
None, 'global', model_dir)
with self._graph._get_tf("Graph").as_default():
with tf.variable_scope('global'):
self._checkpoint = tf.train.Checkpoint()
self._checkpoint.save_counter # Ensure the variable has been created
self._checkpoint.listed = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
self._graph.session.run(self._checkpoint.save_counter.initializer)
def _build_graph(self, tf_graph, scope, model_dir):
"""Construct a TensorGraph containing the policy and loss calculations."""
state_shape = self._env.state_shape
state_dtype = self._env.state_dtype
if not self._state_is_list:
state_shape = [state_shape]
state_dtype = [state_dtype]
features = []
for s, d in zip(state_shape, state_dtype):
features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
policy_layers = self._policy.create_layers(features)
action_prob = policy_layers['action_prob']
value = policy_layers['value']
search_prob = Label(shape=(None, self._env.n_actions))
search_value = Label(shape=(None,))
loss = MCTSLoss(
self.value_weight,
in_layers=[action_prob, value, search_prob, search_value])
graph = TensorGraph(
batch_size=self.max_search_depth,
use_queue=False,
graph=tf_graph,
model_dir=model_dir)
for f in features:
graph._add_layer(f)
graph.add_output(action_prob)
graph.add_output(value)
graph.set_loss(loss)
graph.set_optimizer(self._optimizer)
with graph._get_tf("Graph").as_default():
with tf.variable_scope(scope):
graph.build()
if len(graph.rnn_initial_states) > 0:
raise ValueError('MCTS does not support policies with recurrent layers')
return graph, features, action_prob, value, search_prob, search_value
def fit(self,
iterations,
steps_per_iteration=10000,
epochs_per_iteration=10,
temperature=0.5,
puct_scale=None,
max_checkpoints_to_keep=5,
checkpoint_interval=600,
restore=False):
"""Train the policy.
Parameters
----------
iterations: int
the total number of iterations (simulation followed by optimization) to perform
steps_per_iteration: int
the total number of steps to simulate in each iteration. Every step consists
of a tree search, followed by selecting an action based on the results of
the search.
epochs_per_iteration: int
the number of epochs of optimization to perform for each iteration. Each
epoch involves randomly ordering all the steps that were just simulated in
the current iteration, splitting them into batches, and looping over the
batches.
temperature: float
the temperature factor to use when selecting a move for each step of
simulation. Larger values produce a broader probability distribution and
hence more exploration. Smaller values produce a stronger preference for
whatever action did best in the tree search.
puct_scale: float
the scale of the PUCT term in the expression for selecting actions during
tree search. This should be roughly similar in magnitude to the rewards
given by the environment, since the PUCT term is added to the mean
discounted reward. This may be None, in which case a value is adaptively
selected that tries to match the mean absolute value of the discounted
reward.
max_checkpoints_to_keep: int
the maximum number of checkpoint files to keep. When this number is reached, older
files are deleted.
checkpoint_interval: float
the time interval at which to save checkpoints, measured in seconds
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
"""
if puct_scale is None:
self._puct_scale = 1.0
adapt_puct = True
else:
self._puct_scale = puct_scale
adapt_puct = False
with self._graph._get_tf("Graph").as_default():
self._graph.session.run(tf.global_variables_initializer())
if restore:
self.restore()
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
manager = tf.train.CheckpointManager(
self._checkpoint, self._graph.model_dir, max_checkpoints_to_keep)
self._checkpoint_time = time.time() + checkpoint_interval
# Run the algorithm.
for iteration in range(iterations):
buffer = self._run_episodes(steps_per_iteration, temperature, manager,
adapt_puct)
self._optimize_policy(buffer, epochs_per_iteration)
# Save a file checkpoint.
with self._graph.session.as_default():
manager.save()
def predict(self, state):
"""Compute the policy's output predictions for a state.
Parameters
----------
state: array
the state of the environment for which to generate predictions
Returns
-------
the array of action probabilities, and the estimated value function
"""
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state)
tensors = [self._pred_prob, self._pred_value]
results = self._graph.session.run(tensors, feed_dict=feed_dict)
return results[:2]
def select_action(self, state, deterministic=False):
"""Select an action to perform based on the environment's state.
Parameters
----------
state: array
the state of the environment for which to select an action
deterministic: bool
if True, always return the best action (that is, the one with highest probability).
If False, randomly select an action based on the computed probabilities.
Returns
-------
the index of the selected action
"""
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state)
probabilities = self._graph.session.run(
self._pred_prob, feed_dict=feed_dict)
if deterministic:
return probabilities.argmax()
else:
return np.random.choice(
np.arange(self._env.n_actions), p=probabilities[0])
def restore(self):
"""Reload the model parameters from the most recent checkpoint file."""
last_checkpoint = tf.train.latest_checkpoint(self._graph.model_dir)
if last_checkpoint is None:
raise ValueError('No checkpoint found')
with self._graph._get_tf("Graph").as_default():
self._checkpoint.restore(last_checkpoint).run_restore_ops(
self._graph.session)
def _create_feed_dict(self, state):
"""Create a feed dict for use by predict() or select_action()."""
feed_dict = dict((f.out_tensor, np.expand_dims(s, axis=0))
for f, s in zip(self._features, state))
return feed_dict
def _run_episodes(self, steps, temperature, manager, adapt_puct):
"""Simulate the episodes for one iteration."""
buffer = []
self._env.reset()
root = TreeSearchNode(0.0)
for step in range(steps):
prob, reward = self._do_tree_search(root, temperature, adapt_puct)
state = self._env.state
if not self._state_is_list:
state = [state]
buffer.append((state, prob, reward))
action = np.random.choice(np.arange(self._env.n_actions), p=prob)
self._env.step(action)
if self._env.terminated:
self._env.reset()
root = TreeSearchNode(0.0)
else:
root = root.children[action]
if time.time() > self._checkpoint_time:
with self._graph.session.as_default():
manager.save()
self._checkpoint_time = time.time()
return buffer
def _optimize_policy(self, buffer, epochs):
"""Optimize the policy based on the replay buffer from the current iteration."""
batch_size = self._graph.batch_size
n_batches = len(buffer) // batch_size
for epoch in range(epochs):
np.random.shuffle(buffer)
def generate_batches():
for batch in range(n_batches):
indices = list(range(batch * batch_size, (batch + 1) * batch_size))
feed_dict = {}
for i, f in enumerate(self._features):
feed_dict[f] = np.stack(buffer[j][0][i] for j in indices)
feed_dict[self._search_prob] = np.stack(buffer[j][1] for j in indices)
feed_dict[self._search_value] = np.array(
[buffer[j][2] for j in indices])
yield feed_dict
loss = self._graph.fit_generator(
generate_batches(), checkpoint_interval=0)
def _do_tree_search(self, root, temperature, adapt_puct):
"""Perform the tree search for a state."""
# Build the tree.
for i in range(self.n_search_episodes):
env = copy.deepcopy(self._env)
self._create_trace(env, root, 1)
# Compute the final probabilities and expected reward.
prob = np.array([c.count**(1.0 / temperature) for c in root.children])
prob /= np.sum(prob)
reward = np.sum(p * c.mean_reward for p, c in zip(prob, root.children))
if adapt_puct:
scale = np.sum(
[p * np.abs(c.mean_reward) for p, c in zip(prob, root.children)])
self._puct_scale = 0.99 * self._puct_scale + 0.01 * scale
return prob, reward
def _create_trace(self, env, node, depth):
"""Create one trace as part of the tree search."""
node.count += 1
if env.terminated:
# Mark this node as terminal
node.children = None
node.value = 0.0
return 0.0
if node.children is not None and len(node.children) == 0:
# Expand this node.
prob_pred, value = self.predict(env.state)
node.value = float(value)
node.children = [TreeSearchNode(p) for p in prob_pred[0]]
if depth == self.max_search_depth:
reward = 0.0
future_rewards = node.value
else:
# Select the next action to perform.
total_counts = sum(c.count for c in node.children)
if total_counts == 0:
score = [c.prior_prob for c in node.children]
else:
scale = self._puct_scale * np.sqrt(total_counts)
score = [
c.mean_reward + scale * c.prior_prob / (1 + c.count)
for c in node.children
]
action = np.argmax(score)
next_node = node.children[action]
reward = env.step(action)
# Recursively build the tree.
future_rewards = self._create_trace(env, next_node, depth + 1)
# Update statistics for this node.
future_rewards = reward + self.discount_factor * future_rewards
node.total_reward += future_rewards
node.mean_reward = node.total_reward / node.count
return future_rewards
class TreeSearchNode(object):
"""Represents a node in the Monte Carlo tree search."""
def __init__(self, prior_prob):
self.count = 0
self.reward = 0.0
self.total_reward = 0.0
self.mean_reward = 0.0
self.prior_prob = prior_prob
self.children = []
<file_sep>import logging
import numpy as np
from typing import Tuple
from deepchem.data import Dataset
from deepchem.trans.transformers import Transformer
logger = logging.getLogger(__name__)
class DuplicateBalancingTransformer(Transformer):
"""Balance binary or multiclass datasets by duplicating rarer class samples.
This class balances a dataset by duplicating samples of the rarer class so
that the sum of all example weights from all classes is the same. (Up to
integer rounding of course). This can be useful when you're working on an
imabalanced dataset where there are far fewer examples of some classes than
others.
This class differs from `BalancingTransformer` in that it actually
duplicates rarer class samples rather than just increasing their sample
weights. This may be more friendly for models that are numerically fragile
and can't handle imbalanced example weights.
Examples
--------
Here's an example for a binary dataset.
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> n_classes = 2
>>> import deepchem as dc
>>> import numpy as np
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.random.randint(n_classes, size=(n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.DuplicateBalancingTransformer(dataset=dataset)
>>> dataset = transformer.transform(dataset)
And here's a multiclass dataset example.
>>> n_samples = 50
>>> n_features = 3
>>> n_tasks = 1
>>> n_classes = 5
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.random.randint(n_classes, size=(n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.DuplicateBalancingTransformer(dataset=dataset)
>>> dataset = transformer.transform(dataset)
See Also
--------
deepchem.trans.BalancingTransformer: Balance by changing sample weights.
Note
----
This transformer is only well-defined for singletask datasets. (Since
examples are actually duplicated, there's no meaningful way to duplicate
across multiple tasks in a way that preserves the balance.)
This transformer is only meaningful for classification datasets where `y`
takes on a limited set of values. This class transforms all of `X`, `y`,
`w`, `ids`.
Raises
------
`ValueError` if the provided dataset is multitask.
"""
def __init__(self, dataset: Dataset):
super(DuplicateBalancingTransformer, self).__init__(transform_X=True,
transform_y=True,
transform_w=True,
transform_ids=True,
dataset=dataset)
if len(dataset.get_task_names()) > 1:
raise ValueError(
"This transformation is only defined for singletask datsets.")
# Get the labels/weights
y = dataset.y
w = dataset.w
# Normalize shapes
if len(y.shape) == 1:
y = np.reshape(y, (len(y), 1))
if len(w.shape) == 1:
w = np.reshape(w, (len(w), 1))
if len(y.shape) != 2:
raise ValueError("y must be of shape (N,) or (N, n_tasks)")
if len(w.shape) != 2:
raise ValueError("w must be of shape (N,) or (N, n_tasks)")
self.classes = sorted(np.unique(y))
# Remove labels with zero weights
y = y[w != 0]
class_weights = []
# Note that we may have 0 elements of a given class since we remove those
# labels with zero weight.
for c in self.classes:
# this works because y is 1D
c_weight = np.sum(w[y == c])
class_weights.append(c_weight)
weight_largest = max(class_weights)
# This is the right ratio since int(N/num_c) * num_c \approx N
# for all classes
duplication_ratio = [
int(weight_largest / float(c_weight)) if c_weight > 0 else 0
for c_weight in class_weights
]
self.duplication_ratio = duplication_ratio
def transform_array(
self, X: np.ndarray, y: np.ndarray, w: np.ndarray, ids: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Transform the data in a set of (X, y, w, id) arrays.
Parameters
----------
X: np.ndarray
Array of features
y: np.ndarray
Array of labels
w: np.ndarray
Array of weights.
ids: np.ndarray
Array of identifiers
Returns
-------
Xtrans: np.ndarray
Transformed array of features
ytrans: np.ndarray
Transformed array of labels
wtrans: np.ndarray
Transformed array of weights
idtrans: np.ndarray
Transformed array of identifiers
"""
if not (len(y.shape) == 1 or (len(y.shape) == 2 and y.shape[1] == 1)):
raise ValueError("y must be of shape (N,) or (N, 1)")
if not (len(w.shape) == 1 or (len(w.shape) == 2 and w.shape[1] == 1)):
raise ValueError("w must be of shape (N,) or (N, 1)")
# Flattening is safe because of shape check above
y = y.flatten()
w = w.flatten()
X_dups, y_dups, w_dups, ids_dups = [], [], [], []
for i, c in enumerate(self.classes):
duplication_ratio = self.duplication_ratio[i]
c_inds = (y == c)
X_c = X[c_inds]
y_c = y[c_inds]
w_c = w[c_inds]
ids_c = ids[c_inds]
X_c_dup = np.repeat(X_c, duplication_ratio, axis=0)
y_c_dup = np.repeat(y_c, duplication_ratio, axis=0)
w_c_dup = np.repeat(w_c, duplication_ratio, axis=0)
ids_c_dup = np.repeat(ids_c, duplication_ratio, axis=0)
X_dups.append(X_c_dup)
y_dups.append(y_c_dup)
w_dups.append(w_c_dup)
ids_dups.append(ids_c_dup)
Xtrans = np.concatenate(X_dups, axis=0)
ytrans = np.concatenate(y_dups, axis=0)
wtrans = np.concatenate(w_dups, axis=0)
idstrans = np.concatenate(ids_dups, axis=0)
return (Xtrans, ytrans, wtrans, idstrans)
<file_sep>"""
Blood-Brain Barrier Penetration dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
BBBP_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/BBBP.csv"
BBBP_TASKS = ["p_np"]
class _BBBPLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "BBBP.csv")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=BBBP_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_bbbp(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load BBBP dataset
The blood-brain barrier penetration (BBBP) dataset is designed for the
modeling and prediction of barrier permeability. As a membrane separating
circulating blood and brain extracellular fluid, the blood-brain barrier
blocks most drugs, hormones and neurotransmitters. Thus penetration of the
barrier forms a long-standing issue in development of drugs targeting
central nervous system.
This dataset includes binary labels for over 2000 compounds on their
permeability properties.
Scaffold splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "name" - Name of the compound
- "smiles" - SMILES representation of the molecular structure
- "p_np" - Binary labels for penetration/non-penetration
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Martins, <NAME>, et al. "A Bayesian approach to in silico
blood-brain barrier penetration modeling." Journal of chemical
information and modeling 52.6 (2012): 1686-1697.
"""
loader = _BBBPLoader(featurizer, splitter, transformers, BBBP_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('bbbp', reload)
<file_sep>from abc import abstractmethod
from typing import Union, List
import torch
from dqc.utils.datastruct import ValGrad, SpinParam
from dqc.api.getxc import get_xc
from dqc.xc.base_xc import BaseXC
from dqc.utils.safeops import safenorm, safepow
class BaseNNXC(BaseXC, torch.nn.Module):
"""
Base class for the Neural Network XC (NNXC) and HybridXC classes.
Density-functional theory (DFT) is a theory used to calculate the
electronic structure of atoms, molecules, and solids. Its objective is
to use the fundamental laws of quantum mechanics to quantitatively
comprehend the properties of materials.
There are serious limitations to the tradional methods used to approximate
solutions to the Schrödinger equation of N interacting electrons moving in
an external potential. Whereas in DFT, instead of the many-body wave
function, the density (n(r)) is a function of three spatial coordinates.
The many-body electronic ground state can be described using single-particle equations and an effective potential thanks to the Kohn-Sham theory. The
exchange-correlation potential, which accounts for many-body effects, the
Hartree potential, which describes the electrostatic electron-electron
interaction, and the ionic potential resulting from the atomic cores make
up the effective potential.
The difference between the total exact energy and the total of the rest
of the energy terms (such as kinetic energy), is known as the
exchange-correlation energy. The exchange-correlation functional is obtained by calculating the functional derivate of the XC energy w.r.t the
electron density function. In this model, we are trying to build a neural
network that can be trained to calculate an exchange-correlation functional
based on a specific set of molecules/atoms/ions.
This base class can be used to build layers such as the NNLDA layer, where
the exchange correlation functional is trained based on the pre-defined LDA class of functionals. The methods in this class take the electron density as
the input and transform it accordingly. For example; The NNLDA layer
requires only the density to build an NNXC whereas a GGA based model would
require the density gradient as well. This method also takes polarization
into account.
References
----------
Encyclopedia of Condensed Matter Physics, 2005.
<NAME>, <NAME>, in Advances In Atomic, Molecular, and
Optical Physics, 2015
Kasim, <NAME>., and <NAME>. "Learning the exchange-correlation
functional from nature with fully differentiable density functional
theory." Physical Review Letters 127.12 (2021): 126403.
"""
@abstractmethod
def get_edensityxc(
self, densinfo: Union[ValGrad, SpinParam[ValGrad]]) -> torch.Tensor:
"""
This method is used to transform the electron density. The output
of this method varies depending on the layer.
Parameters
----------
densinfo: Union[ValGrad, SpinParam[ValGrad]]
Density information calculated using DQC utilities.
"""
pass
def getparamnames(self, methodname: str, prefix: str = "") -> List[str]:
"""
This method is implemented only to avoid errors while passing the
get_edensityxc method values to DQC and Xitorch.
"""
# torch.nn.module prefix has no ending dot, while xt prefix has
nnprefix = prefix if prefix == "" else prefix[:-1]
return [
name for (name, param) in self.named_parameters(prefix=nnprefix)
]
class NNLDA(BaseNNXC):
"""
Neural network xc functional of LDA
Local-density approximations (LDA) are a class of approximations to the
exchange–correlation (XC) energy. The LDA assumes variations of the
density to be gradual, i.e, it is based on the homogeneous electron
gas model. Which is why it is regarded as the simplest approach to the
exchange correlation functional. This class of functionals depend only
upon the value of the electronic density at each point in space.
Hence, in this model, we only input the density and not other components
such as the gradients of the density (which is used in other functionals
such as the GGA class).
Examples
--------
>>> from deepchem.models.dft.nnxc import NNLDA
>>> import torch
>>> import torch.nn as nn
>>> n_input, n_hidden = 2, 1
>>> nnmodel = (nn.Linear(n_input, n_hidden))
>>> output = NNLDA(nnmodel)
References
----------
Density-Functional Theory of the Electronic Structure of Molecules,Robert
<NAME> and <NAME>, Annual Review of Physical Chemistry 1995 46:1,
701-728
<NAME> and <NAME>, Rev. Mod. Phys. 61, 689 (1989)
"""
def __init__(self, nnmodel: torch.nn.Module):
super().__init__()
"""
Parameters
----------
nnmodel: torch.nn.Module
Neural network for xc functional
"""
self.nnmodel = nnmodel
def get_edensityxc(
self, densinfo: Union[ValGrad, SpinParam[ValGrad]]) -> torch.Tensor:
"""
This method transform the local electron density (n) and the spin
density (xi) for polarized and unpolarized cases, to be the input of
the neural network.
Parameters
----------
densinfo: Union[ValGrad, SpinParam[ValGrad]]
Density information calculated using DQC utilities.
Returns
-------
res
Neural network output by calculating total density (n) and the spin
density (xi). The shape of res is (ninp , ) where ninp is the number of layers in nnmodel ; which is user defined.
"""
if isinstance(densinfo, ValGrad): # unpolarized case
n = densinfo.value.unsqueeze(-1) # (*BD, nr, 1)
xi = torch.zeros_like(n)
else: # polarized case
nu = densinfo.u.value.unsqueeze(-1)
nd = densinfo.d.value.unsqueeze(-1)
n = nu + nd # (*BD, nr, 1)
xi = (nu - nd) / (n + 1e-18) # avoiding nan
ninp = n
x = torch.cat((ninp, xi), dim=-1) # (*BD, nr, 2)
nnout = self.nnmodel(x) # (*BD, nr, 1)
res = nnout * n # (*BD, nr, 1)
res = res.squeeze(-1)
return res
class NNPBE(BaseNNXC):
# neural network xc functional of GGA (receives the density and grad as inputs)
"""
Neural network xc functional of GGA class of functionals
The Purdew-Burke-Ernzerhof (PBE) functional is a popular non-empirical
functional which falls under the Generalized Gradient Approximation (GGA)
class of functionals. GGA differs from LDA by incorporating nonlocal
corrections involving gradients (or derivatives) of the electron density.
In other words, the XC potential is a complicated function in 3D space
since it depends on the electron density and its gradient.
Hence, in this model we input the electron density as well as the
calculated gradients. We calculate these values using the DQC and Xitorch
libraries.
Examples
--------
>>> from deepchem.models.dft.nnxc import NNPBE
>>> import torch
>>> import torch.nn as nn
>>> n_input, n_hidden = 3, 3
>>> nnmodel = (nn.Linear(n_input, n_hidden))
>>> output = NNPBE(nnmodel)
References
----------
Perdew, <NAME>., <NAME>, and <NAME>. "Generalized gradient approximation made simple." Physical review letters 77.18 (1996): 3865.
<NAME>,
5.09 - Electronics with Molecules,
Editor(s): <NAME>, <NAME>, <NAME>,
Comprehensive Semiconductor Science and Technology,
Elsevier,
2011,
Pages 383-479,
ISBN 9780444531537,
https://doi.org/10.1016/B978-0-44-453153-7.00033-X.
"""
def __init__(self, nnmodel: torch.nn.Module):
"""
Parameters
----------
nnmodel: torch.nn.Module
Neural network for xc functional. Shape; (3,...). This is because
the nnmodel receives an input with the shape (....,3). This
dimension is for;
(0) total density (n): (n_up + n_dn), and
(1) spin density (xi): (n_up - n_dn) / (n_up + n_dn)
(2) normalized gradients (s): |del(n)| / [2(3*pi^2)^(1/3) * n^(4/3)]
"""
super().__init__()
self.nnmodel = nnmodel
def get_edensityxc(
self, densinfo: Union[ValGrad, SpinParam[ValGrad]]) -> torch.Tensor:
"""
This method transform the local electron density (n), its gradient
and the spin density (xi) for polarized and unpolarized cases, to be
the input of the neural network.
Parameters
----------
densinfo: Union[ValGrad, SpinParam[ValGrad]]
Density information calculated using DQC utilities.
Returns
-------
res
Neural network output by calculating total density (n) and the spin
density (xi). The shape of res is (ninp , ) where ninp is the number of layers in nnmodel ; which is 3 for NNPBE. The shape of the output is (....,1) and it represents the energy density per density per unit volume.
"""
# densinfo.value: (*BD, nr)
# densinfo.grad : (*BD, nr, 3)
# collect the total density (n), spin density (xi), and normalized gradients (s)
a = 6.187335452560271 # 2 * (3 * np.pi ** 2) ** (1.0 / 3)
if isinstance(densinfo, ValGrad): # unpolarized case
assert densinfo.grad is not None
n = densinfo.value.unsqueeze(-1) # (*BD, nr, 1)
xi = torch.zeros_like(n)
n_offset = n + 1e-18 # avoiding nan
s = safenorm(densinfo.grad, dim=0).unsqueeze(-1)
else: # polarized case
assert densinfo.u.grad is not None
assert densinfo.d.grad is not None
nu = densinfo.u.value.unsqueeze(-1)
nd = densinfo.d.value.unsqueeze(-1)
n = nu + nd # (*BD, nr, 1)
n_offset = n + 1e-18 # avoiding nan
xi = (nu - nd) / n_offset
s = safenorm(densinfo.u.grad + densinfo.d.grad, dim=0).unsqueeze(-1)
# normalize the gradient
s = s / a * safepow(n, -4.0 / 3)
# decide how to transform the density to be the input of nn
ninp = n
sinp = s
# get the neural network output
x = torch.cat((ninp, xi, sinp), dim=-1) # (*BD, nr, 3)
nnout = self.nnmodel(x) # (*BD, nr, 1)
res = nnout * n
res = res.squeeze(-1)
return res
class HybridXC(BaseNNXC):
"""
The HybridXC module computes XC energy by summing XC energy computed
from libxc(any conventional DFT functional) and the trainable neural
network with tunable weights.
This layer constructs a hybrid functional based on the user's choice
of what model is to be used to train the functional. (Currently, we only
support an LDA based model). This hybrid functional is a combination of
the xc that is trained by a neural network, and a conventional DFT
functional.
Examples
--------
>>> from deepchem.models.dft.nnxc import HybridXC
>>> import torch
>>> import torch.nn as nn
>>> n_input, n_hidden = 2, 1
>>> nnmodel = (nn.Linear(n_input, n_hidden))
>>> output = HybridXC("lda_x", nnmodel, aweight0=0.0)
"""
def __init__(self,
xcstr: str,
nnmodel: torch.nn.Module,
aweight0: float = 0.0,
bweight0: float = 1.0):
super().__init__()
"""
Parameters
----------
xcstr: str
The choice of xc to use. Some of the commonly used ones are:
lda_x, lda_c_pw, lda_c_ow, lda_c_pz, lda_xc_lp_a, lda_xc_lp_b.
The rest of the possible values can be found under the
"LDA Functionals" section in the reference given below.
nnmodel: nn.Module
trainable neural network for prediction xc energy.
aweight0: float
weight of the neural network
bweight0: float
weight of the default xc
References
----------
https://tddft.org/programs/libxc/functionals/
"""
self.xc = get_xc(xcstr)
family = self.xc.family
if family == 1:
self.nnxc = NNLDA(nnmodel)
else:
self.nnxc = NNPBE(nnmodel)
self.aweight = torch.nn.Parameter(
torch.tensor(aweight0, requires_grad=True))
self.bweight = torch.nn.Parameter(
torch.tensor(bweight0, requires_grad=True))
self.weight_activation = torch.nn.Identity()
@property
def family(self) -> int:
"""
This method determines the type of model to be used, to train the
neural network. Currently we only support an LDA based model and will
implement more in subsequent iterations.
Returns
-------
xc.family
"""
return self.xc.family
def get_edensityxc(
self, densinfo: Union[ValGrad, SpinParam[ValGrad]]) -> torch.Tensor:
"""Get electron density from xc
This function reflects eqn. 4 in the `paper <https://arxiv.org/abs/2102.04229>_`.
Parameters
----------
densinfo: Union[ValGrad, SpinParam[ValGrad]]
Density information calculated using DQC utilities.
Returns
-------
Total calculated electron density with tunable weights.
"""
nnxc_ene = self.nnxc.get_edensityxc(densinfo)
xc_ene = self.xc.get_edensityxc(densinfo)
aweight = self.weight_activation(self.aweight)
bweight = self.weight_activation(self.bweight)
return nnxc_ene * aweight + xc_ene * bweight
<file_sep>try:
from deepchem.data.data_loader import DFTYamlLoader
has_dqc = True
except ModuleNotFoundError:
has_dqc = False
import pytest
@pytest.mark.dqc
def test_dftloader():
inputs = 'deepchem/data/tests/dftdata.yaml'
k = DFTYamlLoader()
data = k.create_dataset(inputs)
assert data.X.dtype == ('O')
assert len(data) == 2
assert ((data.X)[0]).get_weight() == 1340
assert ((data.X)[0]).get_true_val() == 0.09194410469
<file_sep>"""
Testing construction of Vina models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import unittest
import tensorflow as tf
import deepchem as dc
import numpy as np
from tensorflow.python.framework import test_util
from deepchem.models.tensorflow_models.vina_model import VinaModel
from deepchem.models.tensorflow_models.vina_model import get_cells
from deepchem.models.tensorflow_models.vina_model import put_atoms_in_cells
from deepchem.models.tensorflow_models.vina_model import compute_neighbor_cells
from deepchem.models.tensorflow_models.vina_model import compute_closest_neighbors
from deepchem.models.tensorflow_models.vina_model import get_cells_for_atoms
from deepchem.models.tensorflow_models.vina_model import compute_neighbor_list
import deepchem.utils.rdkit_util as rdkit_util
from deepchem.utils import pad_array
class TestVinaModel(test_util.TensorFlowTestCase):
"""
Test Container usage.
"""
def setUp(self):
super(TestVinaModel, self).setUp()
self.root = '/tmp'
def test_vina_model(self):
"""Simple test that a vina model can be initialized."""
vina_model = VinaModel()
def test_get_cells(self):
"""Test that tensorflow can compute grid cells."""
N = 10
start = 0
stop = 4
nbr_cutoff = 1
with self.session() as sess:
ndim = 3
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim).eval()
assert len(cells.shape) == 2
assert cells.shape[0] == 4**ndim
ndim = 2
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim).eval()
assert len(cells.shape) == 2
assert cells.shape[0] == 4**ndim
# TODO(rbharath): Check that this operation is differentiable.
def test_compute_neighbor_list(self):
"""Test that neighbor list can be computed with tensorflow"""
N = 10
start = 0
stop = 12
nbr_cutoff = 3
ndim = 3
M = 6
k = 5
# The number of cells which we should theoretically have
n_cells = int(((stop - start) / nbr_cutoff)**ndim)
with self.session() as sess:
coords = start + np.random.rand(N, ndim) * (stop - start)
coords = tf.stack(coords)
nbr_list = compute_neighbor_list(
coords, nbr_cutoff, N, M, n_cells, ndim=ndim, k=k)
nbr_list = nbr_list.eval()
assert nbr_list.shape == (N, M)
def test_put_atoms_in_cells(self):
"""Test that atoms can be partitioned into spatial cells."""
N = 10
start = 0
stop = 4
nbr_cutoff = 1
ndim = 3
k = 5
# The number of cells which we should theoretically have
n_cells = ((stop - start) / nbr_cutoff)**ndim
with self.session() as sess:
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
coords = np.random.rand(N, ndim)
_, atoms_in_cells = put_atoms_in_cells(coords, cells, N, n_cells, ndim, k)
atoms_in_cells = atoms_in_cells.eval()
assert len(atoms_in_cells) == n_cells
# Each atom neighbors tensor should be (k, ndim) shaped.
for atoms in atoms_in_cells:
assert atoms.shape == (k, ndim)
def test_compute_neighbor_cells(self):
"""Test that indices of neighboring cells can be computed."""
N = 10
start = 0
stop = 4
nbr_cutoff = 1
ndim = 3
# The number of cells which we should theoretically have
n_cells = ((stop - start) / nbr_cutoff)**ndim
# TODO(rbharath): The test below only checks that shapes work out.
# Need to do a correctness implementation vs. a simple CPU impl.
with self.session() as sess:
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
nbr_cells = compute_neighbor_cells(cells, ndim, n_cells)
nbr_cells = nbr_cells.eval()
assert len(nbr_cells) == n_cells
nbr_cells = [nbr_cell for nbr_cell in nbr_cells]
for nbr_cell in nbr_cells:
assert nbr_cell.shape == (26,)
def test_compute_closest_neighbors(self):
"""Test that closest neighbors can be computed properly"""
N = 10
start = 0
stop = 4
nbr_cutoff = 1
ndim = 3
k = 5
# The number of cells which we should theoretically have
n_cells = ((stop - start) / nbr_cutoff)**ndim
# TODO(rbharath): The test below only checks that shapes work out.
# Need to do a correctness implementation vs. a simple CPU impl.
with self.session() as sess:
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
nbr_cells = compute_neighbor_cells(cells, ndim, n_cells)
coords = np.random.rand(N, ndim)
_, atoms_in_cells = put_atoms_in_cells(coords, cells, N, n_cells, ndim, k)
nbrs = compute_closest_neighbors(coords, cells, atoms_in_cells, nbr_cells,
N, n_cells)
def test_get_cells_for_atoms(self):
"""Test that atoms are placed in the correct cells."""
N = 10
start = 0
stop = 4
nbr_cutoff = 1
ndim = 3
k = 5
# The number of cells which we should theoretically have
n_cells = ((stop - start) / nbr_cutoff)**ndim
# TODO(rbharath): The test below only checks that shapes work out.
# Need to do a correctness implementation vs. a simple CPU impl.
with self.session() as sess:
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
coords = np.random.rand(N, ndim)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
cells_for_atoms = cells_for_atoms.eval()
assert cells_for_atoms.shape == (N, 1)
def test_vina_construct_graph(self):
"""Test that vina model graph can be constructed."""
data_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(data_dir, "1jld_protein.pdb")
ligand_file = os.path.join(data_dir, "1jld_ligand.pdb")
vina_model = VinaModel()
# TODO(rbharath): Commenting this out due to weird segfaults
#def test_vina_generate_conformers(self):
# """Test that Vina Model can generate conformers"""
# data_dir = os.path.dirname(os.path.realpath(__file__))
# protein_file = os.path.join(data_dir, "1jld_protein.pdb")
# ligand_file = os.path.join(data_dir, "1jld_ligand.pdb")
# max_protein_atoms = 3500
# max_ligand_atoms = 100
# print("Loading protein file")
# protein_xyz, protein_mol = rdkit_util.load_molecule(protein_file)
# protein_Z = pad_array(
# np.array([atom.GetAtomicNum() for atom in protein_mol.GetAtoms()]),
# max_protein_atoms)
# print("Loading ligand file")
# ligand_xyz, ligand_mol = rdkit_util.load_molecule(ligand_file)
# ligand_Z = pad_array(
# np.array([atom.GetAtomicNum() for atom in ligand_mol.GetAtoms()]),
# max_ligand_atoms)
<file_sep>"""
Script that trains DAG models on delaney dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load Delaney dataset
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney(
featurizer='GraphConv', split='index')
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
n_atom_feat = 75
batch_size = 64
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
reshard_size = 512
transformer = dc.trans.DAGTransformer(max_atoms=max_atoms)
train_dataset.reshard(reshard_size)
train_dataset = transformer.transform(train_dataset)
valid_dataset.reshard(reshard_size)
valid_dataset = transformer.transform(valid_dataset)
model = dc.models.DAGModel(
len(delaney_tasks),
max_atoms=max_atoms,
n_atom_feat=n_atom_feat,
batch_size=batch_size,
learning_rate=1e-3,
use_queue=False,
mode='regression')
# Fit trained model
model.fit(train_dataset, nb_epoch=50, checkpoint_interval=100)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Script that computes correlations of FACTORS tasks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import tempfile
import shutil
import deepchem as dc
import pandas as pd
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from FACTORS_datasets import load_factors
###Load data###
shard_size = 2000
print("About to load FACTORS data.")
FACTORS_tasks, datasets, transformers = load_factors(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
y_train = train_dataset.y
n_tasks = y_train.shape[1]
all_results = []
for task in range(n_tasks):
y_task = y_train[:, task]
for other_task in range(n_tasks):
if task == other_task:
continue
y_other = y_train[:, other_task]
r2 = dc.metrics.pearson_r2_score(y_task, y_other)
print("r2 for %s-%s is %f" % (task, other_task, r2))
all_results.append(r2)
# the histogram of the data
n, bins, patches = plt.hist(np.array(all_results), 50, normed=True, stacked=True,
facecolor='green', alpha=0.75)
plt.xlabel('Cross-task Correlations')
plt.ylabel('Probability Density')
plt.title('Histogram of Factors Intertask Correlations')
plt.grid(True)
plt.savefig("Factors_correlations.png")
<file_sep>import os
import deepchem as dc
import numpy as np
import pytest
def load_unlabelled_data():
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = []
input_file = os.path.join(current_dir, "../../data/tests/no_labels.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
def test_transform_unlabelled():
ul_dataset = load_unlabelled_data()
# transforming y should raise an exception
with pytest.raises(ValueError):
dc.trans.transformers.Transformer(
transform_y=True).transform(ul_dataset)
# transforming w should raise an exception
with pytest.raises(ValueError):
dc.trans.transformers.Transformer(
transform_w=True).transform(ul_dataset)
# transforming X should be okay
dc.trans.NormalizationTransformer(transform_X=True,
dataset=ul_dataset).transform(ul_dataset)
def test_y_normalization_transformer():
"""Tests normalization transformer."""
solubility_dataset = load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that y_t has zero mean, unit std.
assert np.isclose(y_t.mean(), 0.)
assert np.isclose(y_t.std(), 1.)
# Check that untransform does the right thing.
np.testing.assert_allclose(normalization_transformer.untransform(y_t), y)
def test_X_normalization_transformer():
"""Tests normalization transformer."""
solubility_dataset = load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that X_t has zero mean, unit std.
# np.set_printoptions(threshold='nan')
mean = X_t.mean(axis=0)
assert np.amax(np.abs(mean - np.zeros_like(mean))) < 1e-7
orig_std_array = X.std(axis=0)
std_array = X_t.std(axis=0)
# Entries with zero std are not normalized
for orig_std, std in zip(orig_std_array, std_array):
if not np.isclose(orig_std, 0):
assert np.isclose(std, 1)
# Check that untransform does the right thing.
np.testing.assert_allclose(normalization_transformer.untransform(X_t),
X,
atol=1e-7)
<file_sep>"""Custom Keras Layers.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import numpy as np
import tensorflow as tf
from deepchem.nn import activations
from deepchem.nn import initializations
from deepchem.nn import model_ops
def affine(x, W, b):
return tf.matmul(x, W) + b
def tf_affine(x, vm, scope):
W = vm.var(scope, 'W')
b = vm.var(scope, 'b')
return tf.matmul(x, W) + b
def cos(x, y):
denom = (
model_ops.sqrt(model_ops.sum(tf.square(x)) * model_ops.sum(tf.square(y)))
+ model_ops.epsilon())
return model_ops.dot(x, tf.transpose(y)) / denom
<file_sep>"""
Test for bond feature vector generator and reactions mapping
"""
from deepchem.feat.molecule_featurizers.dmpnn_featurizer import bond_features, map_reac_to_prod
from rdkit import Chem
import pytest
import numpy as np
@pytest.fixture
def example_smiles_n_b_features():
"""
Sample data for testing
Returns
-------
dictionary
format {'smiles':required feature vector}
"""
feature_vector_C1OC1 = [[
0, True, False, False, False, False, True, 1, 0, 0, 0, 0, 0, 0
], [0, True, False, False, False, False, True, 1, 0, 0, 0, 0, 0,
0], [0, True, False, False, False, False, True, 1, 0, 0, 0, 0, 0, 0]]
feature_vector_NN = [[
0, False, False, True, False, False, False, 1, 0, 0, 0, 0, 0, 0
]]
return {'C1OC1': feature_vector_C1OC1, 'N#N': feature_vector_NN}
def test_bond_features_none():
"""
Test for bond_features() with 'None' input for bond
"""
f_bond = bond_features(None)
req_f = list(np.zeros((14,), dtype=int))
req_f[0] = 1
assert len(f_bond) == len(req_f)
assert f_bond == req_f
def test_bond_features(example_smiles_n_b_features):
"""
Test for bond_features() function
"""
for smiles in example_smiles_n_b_features.keys():
b_f = []
m = Chem.MolFromSmiles(smiles)
for b in m.GetBonds():
b_f.append(bond_features(b))
print(b_f)
k = np.array(b_f)
req_f = np.array(example_smiles_n_b_features[smiles])
assert k.shape == req_f.shape
assert b_f == example_smiles_n_b_features[smiles]
def test_reaction_mapping():
"""
Test for map_reac_to_prod() function
"""
def mappings(r_smile, p_smile):
"""
Function to return mappings based on given reactant and product smiles
"""
r_mol = Chem.MolFromSmiles(r_smile)
p_mol = Chem.MolFromSmiles(p_smile)
return map_reac_to_prod(r_mol, p_mol)
# both reactant and product are null
r_smile = ''
p_smile = ''
assert mappings(r_smile, p_smile) == ({}, [], [])
# reactant is null
r_smile = ''
p_smile = '[H:6][CH2:1][CH:2]=[CH:3][CH:4]=[CH2:5]'
assert mappings(r_smile, p_smile) == ({}, [0, 1, 2, 3, 4], [])
# product is null
r_smile = '[CH2:1]=[CH:2][CH:3]=[CH:4][CH2:5][H:6]'
p_smile = ''
assert mappings(r_smile, p_smile) == ({}, [], [0, 1, 2, 3, 4])
# valid reaction: [CH2:1]=[CH:2][CH:3]=[CH:4][CH2:5][H:6]>> [H:6][CH2:1][CH:2]=[CH:3][CH:4]=[CH2:5]
r_smile = '[CH2:1]=[CH:2][CH:3]=[CH:4][CH2:5][H:6]'
p_smile = '[H:6][CH2:1][CH:2]=[CH:3][CH:4]=[CH2:5]'
assert mappings(r_smile, p_smile) == ({
0: 0,
1: 1,
2: 2,
3: 3,
4: 4
}, [], [])
<file_sep>import deepchem as dc
import numpy as np
import os
import unittest
class TestAtomicConformation(unittest.TestCase):
def test_featurize(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
sdf_file = os.path.join(current_dir, 'data', 'water.sdf')
pdb_file = os.path.join(current_dir, 'data', '3zso_ligand_hyd.pdb')
smiles = 'CCC'
featurizer = dc.feat.AtomicConformationFeaturizer()
features = featurizer.featurize([sdf_file, pdb_file, smiles])
assert len(features) == 3
# Check the SDF file.
assert features[0].num_atoms == 60
assert features[0].atomic_number[0] == 8
assert features[0].atomic_number[1] == 1
assert np.all(features[0].formal_charge == 0)
for i in range(60):
assert (features[0].partial_charge[i] < 0) == (i % 3 == 0)
# Check the PDB file.
assert features[1].num_atoms == 47
assert features[1].atomic_number[0] == 6
assert features[1].atomic_number[35] == 7
assert features[1].atomic_number[46] == 1
for i in range(47):
if i == 36:
assert features[1].formal_charge[i] == 1
else:
assert features[1].formal_charge[i] == 0
if features[1].atomic_number[i] in (
7, 8): # N and O should be negative
assert features[1].partial_charge[i] < 0
elif features[1].atomic_number[i] == 1: # H should be positive
assert features[1].partial_charge[i] > 0
# Check the SMILES string.
assert features[2].num_atoms == 11
assert sum(features[2].atomic_number == 6) == 3
assert sum(features[2].atomic_number == 1) == 8
assert np.all(features[2].formal_charge == 0)
for i in range(11):
assert (features[2].partial_charge[i] <
0) == (features[2].atomic_number[i] == 6)
<file_sep>import numpy as np
import torch
try:
import torch.utils.tensorboard
_has_tensorboard = True
except:
_has_tensorboard = False
import time
import logging
import os
import datetime
from deepchem.data import Dataset, NumpyDataset
from deepchem.metrics import Metric
from deepchem.models.losses import Loss
from deepchem.models.models import Model
from deepchem.models.optimizers import Adam, Optimizer, LearningRateSchedule
from deepchem.trans import Transformer, undo_transforms
from deepchem.utils.evaluate import GeneratorEvaluator
from collections.abc import Sequence as SequenceCollection
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from deepchem.utils.typing import LossFn, OneOrMany
from deepchem.models.wandblogger import WandbLogger
try:
import wandb
wandb.ensure_configured()
if wandb.api.api_key is None:
_has_wandb = False
wandb.termwarn(
"W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable."
)
else:
_has_wandb = True
except (ImportError, AttributeError):
_has_wandb = False
logger = logging.getLogger(__name__)
class TorchModel(Model):
"""This is a DeepChem model implemented by a PyTorch model.
Here is a simple example of code that uses TorchModel to train
a PyTorch model on a DeepChem dataset.
>>> import torch
>>> import deepchem as dc
>>> import numpy as np
>>> X, y = np.random.random((10, 100)), np.random.random((10, 1))
>>> dataset = dc.data.NumpyDataset(X=X, y=y)
>>> pytorch_model = torch.nn.Sequential(
... torch.nn.Linear(100, 1000),
... torch.nn.Tanh(),
... torch.nn.Linear(1000, 1))
>>> model = dc.models.TorchModel(pytorch_model, loss=dc.models.losses.L2Loss())
>>> loss = model.fit(dataset, nb_epoch=5)
The loss function for a model can be defined in two different
ways. For models that have only a single output and use a
standard loss function, you can simply provide a
dc.models.losses.Loss object. This defines the loss for each
sample or sample/task pair. The result is automatically
multiplied by the weights and averaged over the batch.
For more complicated cases, you can instead provide a function
that directly computes the total loss. It must be of the form
f(outputs, labels, weights), taking the list of outputs from
the model, the expected values, and any weight matrices. It
should return a scalar equal to the value of the loss function
for the batch. No additional processing is done to the
result; it is up to you to do any weighting, averaging, adding
of penalty terms, etc.
You can optionally provide an output_types argument, which
describes how to interpret the model's outputs. This should
be a list of strings, one for each output. You can use an
arbitrary output_type for a output, but some output_types are
special and will undergo extra processing:
- 'prediction': This is a normal output, and will be returned by predict().
If output types are not specified, all outputs are assumed
to be of this type.
- 'loss': This output will be used in place of the normal
outputs for computing the loss function. For example,
models that output probability distributions usually do it
by computing unbounded numbers (the logits), then passing
them through a softmax function to turn them into
probabilities. When computing the cross entropy, it is more
numerically stable to use the logits directly rather than
the probabilities. You can do this by having the model
produce both probabilities and logits as outputs, then
specifying output_types=['prediction', 'loss']. When
predict() is called, only the first output (the
probabilities) will be returned. But during training, it is
the second output (the logits) that will be passed to the
loss function.
- 'variance': This output is used for estimating the
uncertainty in another output. To create a model that can
estimate uncertainty, there must be the same number of
'prediction' and 'variance' outputs. Each variance output
must have the same shape as the corresponding prediction
output, and each element is an estimate of the variance in
the corresponding prediction. Also be aware that if a model
supports uncertainty, it MUST use dropout on every layer,
and dropout most be enabled during uncertainty prediction.
Otherwise, the uncertainties it computes will be inaccurate.
- other: Arbitrary output_types can be used to extract outputs
produced by the model, but will have no additional
processing performed.
"""
def __init__(self,
model: torch.nn.Module,
loss: Union[Loss, LossFn],
output_types: Optional[List[str]] = None,
batch_size: int = 100,
model_dir: Optional[str] = None,
learning_rate: Union[float, LearningRateSchedule] = 0.001,
optimizer: Optional[Optimizer] = None,
tensorboard: bool = False,
wandb: bool = False,
log_frequency: int = 100,
device: Optional[torch.device] = None,
regularization_loss: Optional[Callable] = None,
wandb_logger: Optional[WandbLogger] = None,
**kwargs) -> None:
"""Create a new TorchModel.
Parameters
----------
model: torch.nn.Module
the PyTorch model implementing the calculation
loss: dc.models.losses.Loss or function
a Loss or function defining how to compute the training loss for each
batch, as described above
output_types: list of strings, optional (default None)
the type of each output from the model, as described above
batch_size: int, optional (default 100)
default batch size for training and evaluating
model_dir: str, optional (default None)
the directory on disk where the model will be stored. If this is None,
a temporary directory is created.
learning_rate: float or LearningRateSchedule, optional (default 0.001)
the learning rate to use for fitting. If optimizer is specified, this is
ignored.
optimizer: Optimizer, optional (default None)
the optimizer to use for fitting. If this is specified, learning_rate is
ignored.
tensorboard: bool, optional (default False)
whether to log progress to TensorBoard during training
wandb: bool, optional (default False)
whether to log progress to Weights & Biases during training
log_frequency: int, optional (default 100)
The frequency at which to log data. Data is logged using
`logging` by default. If `tensorboard` is set, data is also
logged to TensorBoard. If `wandb` is set, data is also logged
to Weights & Biases. Logging happens at global steps. Roughly,
a global step corresponds to one batch of training. If you'd
like a printout every 10 batch steps, you'd set
`log_frequency=10` for example.
device: torch.device, optional (default None)
the device on which to run computations. If None, a device is
chosen automatically.
regularization_loss: Callable, optional
a function that takes no arguments, and returns an extra contribution to add
to the loss function
wandb_logger: WandbLogger
the Weights & Biases logger object used to log data and metrics
"""
super(TorchModel, self).__init__(model=model,
model_dir=model_dir,
**kwargs)
self.loss = loss # not used
self.learning_rate = learning_rate # not used
self.output_types = output_types # not used
if isinstance(loss, Loss):
self._loss_fn: LossFn = _StandardLoss(self, loss)
else:
self._loss_fn = loss
self.batch_size = batch_size
if optimizer is None:
self.optimizer: Optimizer = Adam(learning_rate=learning_rate)
else:
self.optimizer = optimizer
self.tensorboard = tensorboard
self.regularization_loss = regularization_loss
# Select a device.
if device is None:
if torch.cuda.is_available():
device = torch.device('cuda')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
self.device = device
self.model = model.to(device)
# W&B logging
if wandb:
logger.warning(
"`wandb` argument is deprecated. Please use `wandb_logger` instead. "
"This argument will be removed in a future release of DeepChem."
)
if wandb and not _has_wandb:
logger.warning(
"You set wandb to True but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
self.wandb = wandb and _has_wandb
self.wandb_logger = wandb_logger
# If `wandb=True` and no logger is provided, initialize default logger
if self.wandb and (self.wandb_logger is None):
self.wandb_logger = WandbLogger()
# Setup and initialize W&B logging
if (self.wandb_logger
is not None) and (not self.wandb_logger.initialized):
self.wandb_logger.setup()
# Update config with KerasModel params
wandb_logger_config = dict(loss=loss,
output_types=output_types,
batch_size=batch_size,
model_dir=model_dir,
learning_rate=learning_rate,
optimizer=optimizer,
tensorboard=tensorboard,
log_frequency=log_frequency,
regularization_loss=regularization_loss)
wandb_logger_config.update(**kwargs)
if self.wandb_logger is not None:
self.wandb_logger.update_config(wandb_logger_config)
self.log_frequency = log_frequency
if self.tensorboard and not _has_tensorboard:
raise ImportError(
"This class requires tensorboard to be installed.")
if self.tensorboard:
self._summary_writer = torch.utils.tensorboard.SummaryWriter(
self.model_dir)
if output_types is None:
self._prediction_outputs = None
self._loss_outputs = None
self._variance_outputs = None
self._other_outputs = None
else:
self._prediction_outputs = []
self._loss_outputs = []
self._variance_outputs = []
self._other_outputs = []
for i, type in enumerate(output_types):
if type == 'prediction':
self._prediction_outputs.append(i)
elif type == 'loss':
self._loss_outputs.append(i)
elif type == 'variance':
self._variance_outputs.append(i)
else:
self._other_outputs.append(i)
if len(self._loss_outputs) == 0:
self._loss_outputs = self._prediction_outputs
self._built = False
self._output_functions: Dict[Any, Any] = {}
self._optimizer_for_vars: Dict[Any, Any] = {}
def _ensure_built(self) -> None:
"""The first time this is called, create internal data structures."""
if self._built:
return
self._built = True
self._global_step = 0
self._pytorch_optimizer = self.optimizer._create_pytorch_optimizer(
self.model.parameters())
if isinstance(self.optimizer.learning_rate, LearningRateSchedule):
self._lr_schedule = self.optimizer.learning_rate._create_pytorch_schedule(
self._pytorch_optimizer)
else:
self._lr_schedule = None
def fit(self,
dataset: Dataset,
nb_epoch: int = 10,
max_checkpoints_to_keep: int = 5,
checkpoint_interval: int = 1000,
deterministic: bool = False,
restore: bool = False,
variables: Optional[List[torch.nn.Parameter]] = None,
loss: Optional[LossFn] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
"""Train this model on a dataset.
Parameters
----------
dataset: Dataset
the Dataset to train on
nb_epoch: int
the number of epochs to train for
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in training steps.
Set this to 0 to disable automatic checkpointing.
deterministic: bool
if True, the samples are processed in order. If False, a different random
order is used for each epoch.
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
variables: list of torch.nn.Parameter
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
all_losses: Optional[List[float]], optional (default None)
If specified, all logged losses are appended into this list. Note that
you can call `fit()` repeatedly with the same list and losses will
continue to be appended.
Returns
-------
The average loss over the most recent checkpoint interval
"""
return self.fit_generator(
self.default_generator(dataset,
epochs=nb_epoch,
deterministic=deterministic),
max_checkpoints_to_keep, checkpoint_interval, restore, variables,
loss, callbacks, all_losses)
def fit_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
max_checkpoints_to_keep: int = 5,
checkpoint_interval: int = 1000,
restore: bool = False,
variables: Optional[List[torch.nn.Parameter]] = None,
loss: Optional[LossFn] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
"""Train this model on data from a generator.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in training steps.
Set this to 0 to disable automatic checkpointing.
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
variables: list of torch.nn.Parameter
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
all_losses: Optional[List[float]], optional (default None)
If specified, all logged losses are appended into this list. Note that
you can call `fit()` repeatedly with the same list and losses will
continue to be appended.
Returns
-------
The average loss over the most recent checkpoint interval
"""
if not isinstance(callbacks, SequenceCollection):
callbacks = [callbacks]
self._ensure_built()
self.model.train()
avg_loss = 0.0
last_avg_loss = 0.0
averaged_batches = 0
if loss is None:
loss = self._loss_fn
if variables is None:
optimizer = self._pytorch_optimizer
lr_schedule = self._lr_schedule
else:
var_key = tuple(variables)
if var_key in self._optimizer_for_vars:
optimizer, lr_schedule = self._optimizer_for_vars[var_key]
else:
optimizer = self.optimizer._create_pytorch_optimizer(variables)
if isinstance(self.optimizer.learning_rate,
LearningRateSchedule):
lr_schedule = self.optimizer.learning_rate._create_pytorch_schedule(
optimizer)
else:
lr_schedule = None
self._optimizer_for_vars[var_key] = (optimizer, lr_schedule)
time1 = time.time()
# Main training loop.
for batch in generator:
if restore:
self.restore()
restore = False
inputs: OneOrMany[torch.Tensor]
inputs, labels, weights = self._prepare_batch(batch)
# Execute the loss function, accumulating the gradients.
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
optimizer.zero_grad()
outputs = self.model(inputs)
if isinstance(outputs, torch.Tensor):
outputs = [outputs]
if self._loss_outputs is not None:
outputs = [outputs[i] for i in self._loss_outputs]
batch_loss = loss(outputs, labels, weights)
batch_loss.backward()
optimizer.step()
if lr_schedule is not None:
lr_schedule.step()
self._global_step += 1
current_step = self._global_step
avg_loss += batch_loss
# Report progress and write checkpoints.
averaged_batches += 1
should_log = (current_step % self.log_frequency == 0)
if should_log:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
# Capture the last avg_loss in case of return since we're resetting to 0 now
last_avg_loss = avg_loss
avg_loss = 0.0
averaged_batches = 0
if checkpoint_interval > 0 and current_step % checkpoint_interval == checkpoint_interval - 1:
self.save_checkpoint(max_checkpoints_to_keep)
for c in callbacks:
c(self, current_step)
if self.tensorboard and should_log:
self._log_scalar_to_tensorboard('loss', batch_loss,
current_step)
if (self.wandb_logger is not None) and should_log:
all_data = dict({'train/loss': batch_loss})
self.wandb_logger.log_data(all_data, step=current_step)
# Report final results.
if averaged_batches > 0:
avg_loss = float(avg_loss) / averaged_batches
logger.info('Ending global_step %d: Average loss %g' %
(current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
last_avg_loss = avg_loss
if checkpoint_interval > 0:
self.save_checkpoint(max_checkpoints_to_keep)
time2 = time.time()
logger.info("TIMING: model fitting took %0.3f s" % (time2 - time1))
return last_avg_loss
def fit_on_batch(self,
X: Sequence,
y: Sequence,
w: Sequence,
variables: Optional[List[torch.nn.Parameter]] = None,
loss: Optional[LossFn] = None,
callbacks: Union[Callable, List[Callable]] = [],
checkpoint: bool = True,
max_checkpoints_to_keep: int = 5) -> float:
"""Perform a single step of training.
Parameters
----------
X: ndarray
the inputs for the batch
y: ndarray
the labels for the batch
w: ndarray
the weights for the batch
variables: list of torch.nn.Parameter
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
checkpoint: bool
if true, save a checkpoint after performing the training step
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
Returns
-------
the loss on the batch
"""
self._ensure_built()
dataset = NumpyDataset(X, y, w)
return self.fit(dataset,
nb_epoch=1,
max_checkpoints_to_keep=max_checkpoints_to_keep,
checkpoint_interval=self._global_step +
2 if checkpoint else 0,
variables=variables,
loss=loss,
callbacks=callbacks)
def _predict(self, generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[Transformer], uncertainty: bool,
other_output_types: Optional[OneOrMany[str]]):
"""
Predict outputs for data provided by a generator.
This is the private implementation of prediction. Do not
call it directly. Instead call one of the public prediction
methods.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
uncertainty: bool
specifies whether this is being called as part of estimating uncertainty.
If True, it sets the training flag so that dropout will be enabled, and
returns the values of the uncertainty outputs.
other_output_types: list, optional
Provides a list of other output_types (strings) to predict from model.
Returns:
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
results: Optional[List[List[np.ndarray]]] = None
variances: Optional[List[List[np.ndarray]]] = None
if uncertainty and (other_output_types is not None):
raise ValueError(
'This model cannot compute uncertainties and other output types simultaneously. Please invoke one at a time.'
)
if uncertainty:
if self._variance_outputs is None or len(
self._variance_outputs) == 0:
raise ValueError('This model cannot compute uncertainties')
if len(self._variance_outputs) != len(self._prediction_outputs):
raise ValueError(
'The number of variances must exactly match the number of outputs'
)
if other_output_types:
if self._other_outputs is None or len(self._other_outputs) == 0:
raise ValueError(
'This model cannot compute other outputs since no other output_types were specified.'
)
self._ensure_built()
self.model.eval()
for batch in generator:
inputs, labels, weights = batch
inputs, _, _ = self._prepare_batch((inputs, None, None))
# Invoke the model.
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
output_values = self.model(inputs)
if isinstance(output_values, torch.Tensor):
output_values = [output_values]
output_values = [t.detach().cpu().numpy() for t in output_values]
# Apply tranformers and record results.
if uncertainty:
var = [output_values[i] for i in self._variance_outputs]
if variances is None:
variances = [var]
else:
for i, t in enumerate(var):
variances[i].append(t)
access_values = []
if other_output_types:
access_values += self._other_outputs
elif self._prediction_outputs is not None:
access_values += self._prediction_outputs
if len(access_values) > 0:
output_values = [output_values[i] for i in access_values]
if len(transformers) > 0:
if len(output_values) > 1:
raise ValueError(
"predict() does not support Transformers for models with multiple outputs."
)
elif len(output_values) == 1:
output_values = [
undo_transforms(output_values[0], transformers)
]
if results is None:
results = [[] for i in range(len(output_values))]
for i, t in enumerate(output_values):
results[i].append(t)
# Concatenate arrays to create the final results.
final_results = []
final_variances = []
if results is not None:
for r in results:
final_results.append(np.concatenate(r, axis=0))
if uncertainty and variances is not None:
for v in variances:
final_variances.append(np.concatenate(v, axis=0))
return zip(final_results, final_variances)
if len(final_results) == 1:
return final_results[0]
else:
return final_results
def predict_on_generator(
self,
generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[Transformer] = [],
output_types: Optional[OneOrMany[str]] = None
) -> OneOrMany[np.ndarray]:
"""
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
output_types: String or list of Strings
If specified, all outputs of this type will be retrieved
from the model. If output_types is specified, outputs must
be None.
Returns:
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
return self._predict(generator, transformers, False, output_types)
def predict_on_batch(
self,
X: np.typing.ArrayLike,
transformers: List[Transformer] = []) -> OneOrMany[np.ndarray]:
"""Generates predictions for input samples, processing samples in a batch.
Parameters
----------
X: ndarray
the input data, as a Numpy array.
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
dataset = NumpyDataset(X=X, y=None)
return self.predict(dataset, transformers)
def predict_uncertainty_on_batch(
self,
X: Sequence,
masks: int = 50) -> OneOrMany[Tuple[np.ndarray, np.ndarray]]:
"""
Predict the model's outputs, along with the uncertainty in each one.
The uncertainty is computed as described in https://arxiv.org/abs/1703.04977.
It involves repeating the prediction many times with different dropout masks.
The prediction is computed as the average over all the predictions. The
uncertainty includes both the variation among the predicted values (epistemic
uncertainty) and the model's own estimates for how well it fits the data
(aleatoric uncertainty). Not all models support uncertainty prediction.
Parameters
----------
X: ndarray
the input data, as a Numpy array.
masks: int
the number of dropout masks to average over
Returns
-------
for each output, a tuple (y_pred, y_std) where y_pred is the predicted
value of the output, and each element of y_std estimates the standard
deviation of the corresponding element of y_pred
"""
dataset = NumpyDataset(X=X, y=None)
return self.predict_uncertainty(dataset, masks)
def predict(
self,
dataset: Dataset,
transformers: List[Transformer] = [],
output_types: Optional[List[str]] = None) -> OneOrMany[np.ndarray]:
"""
Uses self to make predictions on provided Dataset object.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
output_types: String or list of Strings
If specified, all outputs of this type will be retrieved
from the model. If output_types is specified, outputs must
be None.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
generator = self.default_generator(dataset,
mode='predict',
pad_batches=False)
return self.predict_on_generator(generator,
transformers=transformers,
output_types=output_types)
def predict_embedding(self, dataset: Dataset) -> OneOrMany[np.ndarray]:
"""
Predicts embeddings created by underlying model if any exist.
An embedding must be specified to have `output_type` of
`'embedding'` in the model definition.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
Returns
-------
a NumPy array of the embeddings model produces, or a list
of arrays if it produces multiple embeddings
"""
generator = self.default_generator(dataset,
mode='predict',
pad_batches=False)
return self._predict(generator, [], False, ['embedding'])
def predict_uncertainty(
self,
dataset: Dataset,
masks: int = 50) -> OneOrMany[Tuple[np.ndarray, np.ndarray]]:
"""
Predict the model's outputs, along with the uncertainty in each one.
The uncertainty is computed as described in https://arxiv.org/abs/1703.04977.
It involves repeating the prediction many times with different dropout masks.
The prediction is computed as the average over all the predictions. The
uncertainty includes both the variation among the predicted values (epistemic
uncertainty) and the model's own estimates for how well it fits the data
(aleatoric uncertainty). Not all models support uncertainty prediction.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
masks: int
the number of dropout masks to average over
Returns
-------
for each output, a tuple (y_pred, y_std) where y_pred is the predicted
value of the output, and each element of y_std estimates the standard
deviation of the corresponding element of y_pred
"""
sum_pred: List[np.ndarray] = []
sum_sq_pred: List[np.ndarray] = []
sum_var: List[np.ndarray] = []
for i in range(masks):
generator = self.default_generator(dataset,
mode='uncertainty',
pad_batches=False)
results = self._predict(generator, [], True, None)
if len(sum_pred) == 0:
for p, v in results:
sum_pred.append(p)
sum_sq_pred.append(p * p)
sum_var.append(v)
else:
for j, (p, v) in enumerate(results):
sum_pred[j] += p
sum_sq_pred[j] += p * p
sum_var[j] += v
output = []
std = []
for i in range(len(sum_pred)):
p = sum_pred[i] / masks
output.append(p)
std.append(
np.sqrt(sum_sq_pred[i] / masks - p * p + sum_var[i] / masks))
if len(output) == 1:
return (output[0], std[0])
else:
return list(zip(output, std))
def evaluate_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
metrics: List[Metric],
transformers: List[Transformer] = [],
per_task_metrics: bool = False):
"""Evaluate the performance of this model on the data produced by a generator.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
metric: list of deepchem.metrics.Metric
Evaluation metric
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
per_task_metrics: bool
If True, return per-task scores.
Returns
-------
dict
Maps tasks to scores under metric.
"""
evaluator = GeneratorEvaluator(self, generator, transformers)
return evaluator.compute_model_performance(metrics, per_task_metrics)
def compute_saliency(self, X: np.ndarray) -> OneOrMany[np.ndarray]:
"""Compute the saliency map for an input sample.
This computes the Jacobian matrix with the derivative of each output element
with respect to each input element. More precisely,
- If this model has a single output, it returns a matrix of shape
(output_shape, input_shape) with the derivatives.
- If this model has multiple outputs, it returns a list of matrices, one
for each output.
This method cannot be used on models that take multiple inputs.
Parameters
----------
X: ndarray
the input data for a single sample
Returns
-------
the Jacobian matrix, or a list of matrices
"""
input_shape = X.shape
X = np.reshape(X, [1] + list(X.shape))
self._ensure_built()
X_batch, _, _ = self._prepare_batch(([X], None, None))
# Compute the gradients.
X_tensor = X_batch[0]
X_tensor.requires_grad_(True)
outputs = self.model(X_tensor)
if isinstance(outputs, torch.Tensor):
outputs = [outputs]
final_result = []
for output in outputs:
output_shape = tuple(output.shape[1:])
output = output.reshape([-1])
result = []
grad_output = torch.zeros(output.shape[0], device=self.device)
for i in range(output.shape[0]):
grad_output.zero_()
grad_output[i] = 1
output.backward(grad_output, retain_graph=True)
result.append(X_tensor.grad.clone())
X_tensor.grad.zero_()
final_result.append(
torch.reshape(torch.stack(result),
output_shape + input_shape).cpu().numpy())
if len(final_result) == 1:
return final_result[0]
return final_result
def _prepare_batch(
self, batch: Tuple[Any, Any, Any]
) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]:
inputs, labels, weights = batch
inputs = [
x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs
]
input_tensors = [torch.as_tensor(x, device=self.device) for x in inputs]
if labels is not None:
labels = [
x.astype(np.float32) if x.dtype == np.float64 else x
for x in labels
]
label_tensors = [
torch.as_tensor(x, device=self.device) for x in labels
]
else:
label_tensors = []
if weights is not None:
weights = [
x.astype(np.float32) if x.dtype == np.float64 else x
for x in weights
]
weight_tensors = [
torch.as_tensor(x, device=self.device) for x in weights
]
else:
weight_tensors = []
return (input_tensors, label_tensors, weight_tensors)
def default_generator(
self,
dataset: Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
"""Create a generator that iterates batches for a dataset.
Subclasses may override this method to customize how model inputs are
generated from the data.
Parameters
----------
dataset: Dataset
the data to iterate
epochs: int
the number of times to iterate over the full dataset
mode: str
allowed values are 'fit' (called during training), 'predict' (called
during prediction), and 'uncertainty' (called during uncertainty
prediction)
deterministic: bool
whether to iterate over the dataset in order, or randomly shuffle the
data for each epoch
pad_batches: bool
whether to pad each batch up to this model's preferred batch size
Returns
-------
a generator that iterates batches, each represented as a tuple of lists:
([inputs], [outputs], [weights])
"""
for epoch in range(epochs):
logger.info("Starting training for epoch %d at %s" %
(epoch, datetime.datetime.now().ctime()))
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
yield ([X_b], [y_b], [w_b])
def save_checkpoint(self,
max_checkpoints_to_keep: int = 5,
model_dir: Optional[str] = None) -> None:
"""Save a checkpoint to disk.
Usually you do not need to call this method, since fit() saves checkpoints
automatically. If you have disabled automatic checkpointing during fitting,
this can be called to manually write checkpoints.
Parameters
----------
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
model_dir: str, default None
Model directory to save checkpoint to. If None, revert to self.model_dir
"""
self._ensure_built()
if model_dir is None:
model_dir = self.model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# Save the checkpoint to a file.
data = {
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self._pytorch_optimizer.state_dict(),
'global_step': self._global_step
}
temp_file = os.path.join(model_dir, 'temp_checkpoint.pt')
torch.save(data, temp_file)
# Rename and delete older files.
paths = [
os.path.join(model_dir, 'checkpoint%d.pt' % (i + 1))
for i in range(max_checkpoints_to_keep)
]
if os.path.exists(paths[-1]):
os.remove(paths[-1])
for i in reversed(range(max_checkpoints_to_keep - 1)):
if os.path.exists(paths[i]):
os.rename(paths[i], paths[i + 1])
os.rename(temp_file, paths[0])
def get_checkpoints(self, model_dir: Optional[str] = None):
"""Get a list of all available checkpoint files.
Parameters
----------
model_dir: str, default None
Directory to get list of checkpoints from. Reverts to self.model_dir if None
"""
if model_dir is None:
model_dir = self.model_dir
files = sorted(os.listdir(model_dir))
files = [
f for f in files if f.startswith('checkpoint') and f.endswith('.pt')
]
return [os.path.join(model_dir, f) for f in files]
def restore(self,
checkpoint: Optional[str] = None,
model_dir: Optional[str] = None) -> None:
"""Reload the values of all variables from a checkpoint file.
Parameters
----------
checkpoint: str
the path to the checkpoint file to load. If this is None, the most recent
checkpoint will be chosen automatically. Call get_checkpoints() to get a
list of all available checkpoints.
model_dir: str, default None
Directory to restore checkpoint from. If None, use self.model_dir. If
checkpoint is not None, this is ignored.
"""
logger.info('Restoring model')
self._ensure_built()
if checkpoint is None:
checkpoints = sorted(self.get_checkpoints(model_dir))
if len(checkpoints) == 0:
raise ValueError('No checkpoint found')
checkpoint = checkpoints[0]
data = torch.load(checkpoint, map_location=self.device)
self.model.load_state_dict(data['model_state_dict'])
self._pytorch_optimizer.load_state_dict(data['optimizer_state_dict'])
self._global_step = data['global_step']
def get_global_step(self) -> int:
"""Get the number of steps of fitting that have been performed."""
return self._global_step
def _log_scalar_to_tensorboard(self, name: str, value: Any, step: int):
"""Log a scalar value to Tensorboard."""
self._summary_writer.add_scalar(name, value, step)
def _create_assignment_map(self,
source_model: "TorchModel",
include_top: bool = True,
**kwargs) -> Dict[Any, Any]:
"""
Creates a default assignment map between parameters of source and current model.
This is used only when a custom assignment map is missing. This assumes the
model is made of different layers followed by a dense layer for mapping to
output tasks. include_top is used to control whether or not the final dense
layer is used. The default assignment map is useful in cases where the type
of task is different (classification vs regression) and/or number of tasks.
Parameters
----------
source_model: dc.models.TorchModel
Source model to copy parameter values from.
include_top: bool, default True
if true, copies the last dense layer
"""
assignment_map: Dict[Any, Any] = {}
source_vars = list(source_model.model.parameters())
dest_vars = list(self.model.parameters())
if not include_top:
source_vars = source_vars[:-2]
dest_vars = dest_vars[:-2]
for source_var, dest_var in zip(source_vars, dest_vars):
assignment_map[source_var] = dest_var
return assignment_map
def _create_value_map(self, source_model: "TorchModel",
**kwargs) -> Dict[Any, Any]:
"""
Creates a value map between parameters in the source model and their
current values. This is used only when a custom value map is missing, and
assumes the restore method has been called.
Parameters
----------
source_model: dc.models.TorchModel
Source model to create value map from
"""
value_map: Dict[Any, Any] = {}
source_vars = list(source_model.model.parameters())
for source_var in source_vars:
value_map[source_var] = source_var.detach().cpu().numpy()
return value_map
def load_from_pretrained(self,
source_model: "TorchModel",
assignment_map: Optional[Dict[Any, Any]] = None,
value_map: Optional[Dict[Any, Any]] = None,
checkpoint: Optional[str] = None,
model_dir: Optional[str] = None,
include_top: bool = True,
inputs: Optional[Sequence[Any]] = None,
**kwargs) -> None:
"""Copies parameter values from a pretrained model. `source_model` can either
be a pretrained model or a model with the same architecture. `value_map`
is a parameter-value dictionary. If no `value_map` is provided, the parameter
values are restored to the `source_model` from a checkpoint and a default
`value_map` is created. `assignment_map` is a dictionary mapping parameters
from the `source_model` to the current model. If no `assignment_map` is
provided, one is made from scratch and assumes the model is composed of
several different layers, with the final one being a dense layer. include_top
is used to control whether or not the final dense layer is used. The default
assignment map is useful in cases where the type of task is different
(classification vs regression) and/or number of tasks in the setting.
Parameters
----------
source_model: dc.TorchModel, required
source_model can either be the pretrained model or a dc.TorchModel with
the same architecture as the pretrained model. It is used to restore from
a checkpoint, if value_map is None and to create a default assignment map
if assignment_map is None
assignment_map: Dict, default None
Dictionary mapping the source_model parameters and current model parameters
value_map: Dict, default None
Dictionary containing source_model trainable parameters mapped to numpy
arrays. If value_map is None, the values are restored and a default
parameter map is created using the restored values
checkpoint: str, default None
the path to the checkpoint file to load. If this is None, the most recent
checkpoint will be chosen automatically. Call get_checkpoints() to get a
list of all available checkpoints
model_dir: str, default None
Restore source model from custom model directory if needed
include_top: bool, default True
if True, copies the weights and bias associated with the final dense
layer. Used only when assignment map is None
inputs: List, input tensors for model
if not None, then the weights are built for both the source and self.
"""
if inputs is not None:
# Ensure weights for both models are built.
source_model.model(inputs)
self.model(inputs)
self._ensure_built()
if value_map is None:
logger.info(
"No value map provided. Creating default value map from restored model."
)
source_model.restore(model_dir=model_dir, checkpoint=checkpoint)
value_map = self._create_value_map(source_model=source_model)
if assignment_map is None:
logger.info(
"No assignment map provided. Creating custom assignment map.")
assignment_map = self._create_assignment_map(
source_model=source_model, include_top=include_top)
for source_var, dest_var in assignment_map.items():
assert source_var.shape == dest_var.shape
dest_var.data = torch.as_tensor(value_map[source_var],
device=self.device)
class _StandardLoss(object):
"""The implements the loss function for models that use a dc.models.losses.Loss."""
def __init__(self, model: TorchModel, loss: Loss) -> None:
self.model = model
self.loss = loss # not used
self.criterion = loss._create_pytorch_loss()
def __call__(self, outputs: List, labels: List, weights: List) -> float:
if len(outputs) != 1 or len(labels) != 1 or len(weights) != 1:
raise ValueError(
"Loss functions expects exactly one each of outputs, labels, and weights"
)
losses = self.criterion(outputs[0], labels[0])
w = weights[0]
if len(w.shape) < len(losses.shape):
if isinstance(w, torch.Tensor):
shape = tuple(w.shape)
else:
shape = w.shape
shape = tuple(-1 if x is None else x for x in shape)
w = w.reshape(shape + (1,) * (len(losses.shape) - len(w.shape)))
loss = losses * w
loss = loss.mean()
if self.model.regularization_loss is not None:
loss += self.model.regularization_loss()
return loss
<file_sep>import numpy as np
from typing import DefaultDict, Optional
from deepchem.utils.typing import PymatgenComposition
from deepchem.feat import MaterialCompositionFeaturizer
elements_tl = [
'H', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Na', 'Mg', 'Al', 'Si', 'P', 'S',
'Cl', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn',
'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc',
'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba',
'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er',
'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl',
'Pb', 'Bi', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu'
]
class ElemNetFeaturizer(MaterialCompositionFeaturizer):
"""
Fixed size vector of length 86 containing raw fractional elemental
compositions in the compound. The 86 chosen elements are based on the
original implementation at https://github.com/NU-CUCIS/ElemNet.
Returns a vector containing fractional compositions of each element
in the compound.
References
----------
.. [1] <NAME>., <NAME>., Paul, A. et al. Sci Rep 8, 17593 (2018).
https://doi.org/10.1038/s41598-018-35934-y
Examples
--------
>>> import deepchem as dc
>>> comp = "Fe2O3"
>>> featurizer = dc.feat.ElemNetFeaturizer()
>>> features = featurizer.featurize([comp])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(86,)
>>> round(sum(features[0]))
1
Note
----
This class requires Pymatgen to be installed.
"""
def get_vector(self, comp: DefaultDict) -> Optional[np.ndarray]:
"""
Converts a dictionary containing element names and corresponding
compositional fractions into a vector of fractions.
Parameters
----------
comp: collections.defaultdict object
Dictionary mapping element names to fractional compositions.
Returns
-------
fractions: np.ndarray
Vector of fractional compositions of each element.
"""
if all(e in elements_tl for e in comp):
return np.array([comp[e] if e in comp else 0 for e in elements_tl],
np.float32)
else:
return None
def _featurize(self, datapoint: PymatgenComposition,
**kwargs) -> Optional[np.ndarray]:
"""
Calculate 86 dimensional vector containing fractional compositions of
each element in the compound.
Parameters
----------
datapoint: pymatgen.core.Composition object
Composition object.
Returns
-------
feats: np.ndarray
86 dimensional vector containing fractional compositions of elements.
"""
if 'composition' in kwargs and datapoint is None:
datapoint = kwargs.get("composition")
raise DeprecationWarning(
'Composition is being phased out as a parameter, please pass "datapoint" instead.'
)
fractions = datapoint.fractional_composition.get_el_amt_dict()
return self.get_vector(fractions)
<file_sep>"""
Various utilities around hash functions.
"""
from typing import Callable, Dict, Optional, Tuple, Any, List
import numpy as np
import hashlib
def hash_ecfp(ecfp: str, size: int = 1024) -> int:
"""
Returns an int < size representing given ECFP fragment.
Input must be a string. This utility function is used for various
ECFP based fingerprints.
Parameters
----------
ecfp: str
String to hash. Usually an ECFP fragment.
size: int, optional (default 1024)
Hash to an int in range [0, size)
Returns
-------
ecfp_hash: int
An int < size representing given ECFP fragment
"""
bytes_ecfp = ecfp.encode('utf-8')
md5 = hashlib.md5()
md5.update(bytes_ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (size)
return ecfp_hash
def hash_sybyl(sybyl, sybyl_types):
return (sybyl_types.index(sybyl))
def hash_ecfp_pair(ecfp_pair: Tuple[str, str], size: int = 1024) -> int:
"""Returns an int < size representing that ECFP pair.
Input must be a tuple of strings. This utility is primarily used for
spatial contact featurizers. For example, if a protein and ligand
have close contact region, the first string could be the protein's
fragment and the second the ligand's fragment. The pair could be
hashed together to achieve one hash value for this contact region.
Parameters
----------
ecfp_pair: Tuple[str, str]
Pair of ECFP fragment strings
size: int, optional (default 1024)
Hash to an int in range [0, size)
Returns
-------
ecfp_hash: int
An int < size representing given ECFP pair.
"""
ecfp = "%s,%s" % (ecfp_pair[0], ecfp_pair[1])
bytes_ecfp = ecfp.encode('utf-8')
md5 = hashlib.md5()
md5.update(bytes_ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (size)
return ecfp_hash
def vectorize(hash_function: Callable[[Any, int], int],
feature_dict: Optional[Dict[int, str]] = None,
size: int = 1024,
feature_list: Optional[List] = None) -> np.ndarray:
"""Helper function to vectorize a spatial description from a hash.
Hash functions are used to perform spatial featurizations in
DeepChem. However, it's necessary to convert backwards from
the hash function to feature vectors. This function aids in
this conversion procedure. It creates a vector of zeros of length
`size`. It then loops through `feature_dict`, uses `hash_function`
to hash the stored value to an integer in range [0, size) and bumps
that index.
Parameters
----------
hash_function: Function, Callable[[str, int], int]
Should accept two arguments, `feature`, and `size` and
return a hashed integer. Here `feature` is the item to
hash, and `size` is an int. For example, if `size=1024`,
then hashed values must fall in range `[0, 1024)`.
feature_dict: Dict, optional (default None)
Maps unique keys to features computed.
size: int (default 1024)
Length of generated bit vector
feature_list: List, optional (default None)
List of features.
Returns
-------
feature_vector: np.ndarray
A numpy array of shape `(size,)`
"""
feature_vector = np.zeros(size)
if feature_dict is not None:
on_channels = [
hash_function(feature, size)
for key, feature in feature_dict.items()
]
feature_vector[on_channels] += 1
elif feature_list is not None:
feature_vector[0] += len(feature_list)
return feature_vector
<file_sep>import os
import pytest
import numpy as np
import deepchem as dc
try:
import torch
except ModuleNotFoundError:
pass
def test_atom_vocab_random_mask():
from deepchem.models.torch_models.grover import GroverModel
from deepchem.feat.vocabulary_builders import GroverAtomVocabularyBuilder
smiles = np.array(['CC', 'CCC'])
dataset = dc.data.NumpyDataset(X=smiles)
atom_vocab = GroverAtomVocabularyBuilder()
atom_vocab.build(dataset)
vocab_labels = GroverModel.atom_vocab_random_mask(atom_vocab, smiles)
assert len(vocab_labels) == 5 # 5 atoms
def test_bond_vocab_random_mask():
from deepchem.models.torch_models.grover import GroverModel
from deepchem.feat.vocabulary_builders import GroverBondVocabularyBuilder
smiles = np.array(['CC', 'CCC'])
dataset = dc.data.NumpyDataset(X=smiles)
bond_vocab = GroverBondVocabularyBuilder()
bond_vocab.build(dataset)
vocab_labels = GroverModel.bond_vocab_random_mask(bond_vocab, smiles)
assert len(vocab_labels) == 3 # 3 bonds
@pytest.mark.torch
def testGroverPretrain(grover_graph_attributes):
from deepchem.models.torch_models.grover import GroverPretrain
from deepchem.models.torch_models.grover_layers import GroverEmbedding, GroverAtomVocabPredictor, GroverBondVocabPredictor, GroverFunctionalGroupPredictor
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, _, _ = grover_graph_attributes
components = {}
components['embedding'] = GroverEmbedding(node_fdim=f_atoms.shape[1],
edge_fdim=f_bonds.shape[1])
components['atom_vocab_task_atom'] = GroverAtomVocabPredictor(
vocab_size=10, in_features=128)
components['atom_vocab_task_bond'] = GroverAtomVocabPredictor(
vocab_size=10, in_features=128)
components['bond_vocab_task_atom'] = GroverBondVocabPredictor(
vocab_size=10, in_features=128)
components['bond_vocab_task_bond'] = GroverBondVocabPredictor(
vocab_size=10, in_features=128)
components['functional_group_predictor'] = GroverFunctionalGroupPredictor(
functional_group_size=10)
model = GroverPretrain(**components)
inputs = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
output = model(inputs)
assert len(output) == 8
# 9: number of atoms
assert output[0].shape == (9, 10)
assert output[1].shape == (9, 10)
# 6: number of bonds
assert output[2].shape == (6, 10)
assert output[3].shape == (6, 10)
# 3: number of molecules
assert output[4].shape == (3, 10)
assert output[5].shape == (3, 10)
assert output[6].shape == (3, 10)
assert output[7].shape == (3, 10)
@pytest.mark.torch
def test_grover_finetune_regression(grover_graph_attributes):
import torch.nn as nn
from deepchem.models.torch_models.grover_layers import GroverEmbedding
from deepchem.models.torch_models.readout import GroverReadout
from deepchem.models.torch_models.grover import GroverFinetune
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, fg_labels, additional_features = grover_graph_attributes
inputs = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
components = {}
components['embedding'] = GroverEmbedding(node_fdim=f_atoms.shape[1],
edge_fdim=f_bonds.shape[1])
components['readout'] = GroverReadout(rtype="mean", in_features=128)
components['mol_atom_from_atom_ffn'] = nn.Linear(
in_features=additional_features.shape[1] + 128, out_features=128)
components['mol_atom_from_bond_ffn'] = nn.Linear(
in_features=additional_features.shape[1] + 128, out_features=128)
model = GroverFinetune(**components, mode='regression', hidden_size=128)
model.training = False
output = model((inputs, additional_features))
assert output.shape == (3, 1)
@pytest.mark.torch
def test_grover_finetune_classification(grover_graph_attributes):
import torch.nn as nn
from deepchem.models.torch_models.grover_layers import GroverEmbedding
from deepchem.models.torch_models.readout import GroverReadout
from deepchem.models.torch_models.grover import GroverFinetune
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, fg_labels, additional_features = grover_graph_attributes
inputs = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
components = {}
components['embedding'] = GroverEmbedding(node_fdim=f_atoms.shape[1],
edge_fdim=f_bonds.shape[1])
components['readout'] = GroverReadout(rtype="mean", in_features=128)
components['mol_atom_from_atom_ffn'] = nn.Linear(
in_features=additional_features.shape[1] + 128, out_features=128)
components['mol_atom_from_bond_ffn'] = nn.Linear(
in_features=additional_features.shape[1] + 128, out_features=128)
n_classes = 2
model = GroverFinetune(**components,
mode='classification',
n_classes=n_classes,
hidden_size=128)
model.training = False
output = model((inputs, additional_features))
assert len(output) == n_classes
# logits for class 1
assert output[0].shape == (3, 2)
# logits for class 2
assert output[1].shape == (3, 2)
@pytest.mark.torch
def test_grover_pretraining_task_overfit(tmpdir):
import deepchem as dc
from deepchem.feat.vocabulary_builders import (GroverAtomVocabularyBuilder,
GroverBondVocabularyBuilder)
from deepchem.models.torch_models.grover import GroverModel
import pandas as pd
df = pd.DataFrame({'smiles': ['CC'], 'preds': [0]})
filepath = os.path.join(tmpdir, 'example.csv')
df.to_csv(filepath, index=False)
dataset_path = os.path.join(filepath)
loader = dc.data.CSVLoader(tasks=['preds'],
featurizer=dc.feat.DummyFeaturizer(),
feature_field=['smiles'])
dataset = loader.create_dataset(dataset_path)
av = GroverAtomVocabularyBuilder()
av.build(dataset)
bv = GroverBondVocabularyBuilder()
bv.build(dataset)
fg = dc.feat.CircularFingerprint()
loader2 = dc.data.CSVLoader(
tasks=['preds'],
featurizer=dc.feat.GroverFeaturizer(features_generator=fg),
feature_field='smiles')
graph_data = loader2.create_dataset(dataset_path)
model = GroverModel(node_fdim=151,
edge_fdim=165,
atom_vocab=av,
bond_vocab=bv,
features_dim=2048,
hidden_size=128,
functional_group_size=85,
task='pretraining',
device=torch.device('cpu'))
# since pretraining is a self-supervision task where labels are generated during
# preparing batch, we mock _prepare_batch_for_pretraining to set all labels to 0.
# The test here is checking whether the model predict 0's after overfitting.
def _prepare_batch_for_pretraining(batch):
from deepchem.feat.graph_data import BatchGraphData
from deepchem.utils.grover import extract_grover_attributes
X, y, w = batch
batchgraph = BatchGraphData(X[0])
fgroup_label = getattr(batchgraph, 'fg_labels')
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, _, _ = extract_grover_attributes(
batchgraph)
# preparing for test by setting 0 labels
atom_vocab_label = torch.zeros(f_atoms.shape[0]).long()
bond_vocab_label = torch.zeros(f_bonds.shape[0] // 2).long()
fg_task = torch.zeros(fgroup_label.shape)
labels = {
"av_task": atom_vocab_label,
"bv_task": bond_vocab_label,
"fg_task": fg_task
}
inputs = (f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a)
return inputs, labels, w
model._prepare_batch_for_pretraining = _prepare_batch_for_pretraining
loss = model.fit(graph_data, nb_epoch=200)
assert loss < 0.1
@pytest.mark.torch
def test_grover_model_overfit_finetune(tmpdir):
from deepchem.models.torch_models.grover import GroverModel
from deepchem.feat.vocabulary_builders import (GroverAtomVocabularyBuilder,
GroverBondVocabularyBuilder)
# arranging test - preparing dataset
import pandas as pd
df = pd.DataFrame({'smiles': ['CC', 'CCC'], 'preds': [0, 0]})
filepath = os.path.join(tmpdir, 'example.csv')
df.to_csv(filepath, index=False)
dataset_path = os.path.join(filepath)
loader = dc.data.CSVLoader(tasks=['preds'],
featurizer=dc.feat.DummyFeaturizer(),
feature_field=['smiles'])
dataset = loader.create_dataset(dataset_path)
av = GroverAtomVocabularyBuilder()
av.build(dataset)
bv = GroverBondVocabularyBuilder()
bv.build(dataset)
fg = dc.feat.CircularFingerprint()
loader2 = dc.data.CSVLoader(
tasks=['preds'],
featurizer=dc.feat.GroverFeaturizer(features_generator=fg),
feature_field='smiles')
graph_data = loader2.create_dataset(dataset_path)
# acting - tests
model = GroverModel(node_fdim=151,
edge_fdim=165,
atom_vocab=av,
bond_vocab=bv,
features_dim=2048,
hidden_size=128,
functional_group_size=85,
mode='regression',
task='finetuning',
model_dir='gm_ft',
device=torch.device('cpu'))
loss = model.fit(graph_data, nb_epoch=200)
scores = model.evaluate(
graph_data,
metrics=[dc.metrics.Metric(dc.metrics.mean_squared_error, np.mean)])
# asserting
assert loss < 0.01
assert scores['mean-mean_squared_error'] < 0.01
@pytest.mark.torch
@pytest.mark.parametrize('task', ['pretraining', 'finetuning'])
def test_grover_model_save_restore(tmpdir, task):
# arranging for tests
from deepchem.models.torch_models.grover import GroverModel
from deepchem.feat.vocabulary_builders import (GroverAtomVocabularyBuilder,
GroverBondVocabularyBuilder)
atom_vocabulary = GroverAtomVocabularyBuilder(max_size=100)
bond_vocabulary = GroverBondVocabularyBuilder(max_size=100)
model_config = {
'node_fdim': 151,
'edge_fdim': 165,
'atom_vocab': atom_vocabulary,
'bond_vocab': bond_vocabulary,
'features_dim': 2048,
'hidden_size': 128,
'functional_group_size': 85,
'mode': 'regression',
'model_dir': tmpdir,
'task': task
}
old_model = GroverModel(**model_config, device=torch.device('cpu'))
old_model._ensure_built()
old_model.save_checkpoint()
new_model = GroverModel(**model_config, device=torch.device('cpu'))
new_model._ensure_built()
# checking weights don't match before restore
old_state = old_model.model.state_dict()
new_state = new_model.model.state_dict()
for key in new_state.keys():
# norm layers and cached zero vectors have constant weights
if 'norm' not in key and 'zero' not in key:
assert not torch.allclose(old_state[key], new_state[key])
# restoring model
new_model.restore()
# checking matching of weights after restore
old_state = old_model.model.state_dict()
new_state = new_model.model.state_dict()
for key in new_state.keys():
assert torch.allclose(old_state[key], new_state[key])
@pytest.mark.torch
def test_load_from_pretrained_embeddings(tmpdir):
from deepchem.models.torch_models.grover import GroverModel
from deepchem.feat.vocabulary_builders import (GroverAtomVocabularyBuilder,
GroverBondVocabularyBuilder)
atom_vocabulary = GroverAtomVocabularyBuilder(max_size=100)
bond_vocabulary = GroverBondVocabularyBuilder(max_size=100)
pretrain_dir = os.path.join(tmpdir, 'pretrain_model')
model_config = {
'node_fdim': 151,
'edge_fdim': 165,
'atom_vocab': atom_vocabulary,
'bond_vocab': bond_vocabulary,
'features_dim': 2048,
'hidden_size': 128,
'functional_group_size': 85,
'mode': 'regression',
'model_dir': pretrain_dir,
}
model_config['task'] = 'pretraining'
pretrain_model = GroverModel(**model_config, device=torch.device('cpu'))
pretrain_model._ensure_built()
pretrain_model.save_checkpoint()
model_config['task'] = 'finetuning'
model_config['model_dir'] = os.path.join(tmpdir, 'finetune_model')
finetune_model = GroverModel(**model_config, device=torch.device('cpu'))
finetune_model._ensure_built()
pm_e_sdict = pretrain_model.model.embedding.state_dict()
fm_e_sdict = finetune_model.model.embedding.state_dict()
# asserting that weights are not same before reloading
for key in pm_e_sdict.keys():
# notm and bias layers have constant weights, hence they are not checked
if 'norm' not in key and 'bias' not in key:
assert not torch.allclose(pm_e_sdict[key], fm_e_sdict[key])
# acting - loading pretrained weights
finetune_model.load_from_pretrained(source_model=pretrain_model,
components=['embedding'])
fm_pretrained_e_sdict = finetune_model.model.embedding.state_dict()
# asserting that weight matches after loading
for key in pm_e_sdict.keys():
assert torch.allclose(pm_e_sdict[key], fm_pretrained_e_sdict[key])
<file_sep># Kaggle Dataset Examples
The Kaggle dataset is an in-house dataset from Merck that was first introduced in the following paper:
<NAME>, et al. "Deep neural nets as a method for quantitative structure–activity relationships." Journal of chemical information and modeling 55.2 (2015): 263-274.
It contains 100,000 unique Merck in-house compounds that were
measured on 15 enzyme inhibition and ADME/TOX datasets.
Unlike most of the other datasets featured in MoleculeNet,
the Kaggle collection does not have structures for the
compounds tested since they were proprietary Merck compounds.
However, the collection does feature pre-computed descriptors
for these compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
This folder contains examples training models on the Kaggle dataset.
<file_sep>"""
Train low-data siamese models on Tox21. Test last fold only.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
import tensorflow as tf
from datasets import load_tox21_convmol
# Number of folds for split
K = 4
# num positive/negative ligands
n_pos = 10
n_neg = 10
# Set batch sizes for network
test_batch_size = 128
support_batch_size = n_pos + n_neg
nb_epochs = 1
n_train_trials = 2000
n_eval_trials = 20
n_steps_per_trial = 1
learning_rate = 1e-4
log_every_n_samples = 50
# Number of features on conv-mols
n_feat = 75
tox21_tasks, dataset, transformers = load_tox21_convmol()
# Define metric
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode="classification")
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
train_folds = fold_datasets[:-1]
train_dataset = dc.splits.merge_fold_datasets(train_folds)
test_dataset = fold_datasets[-1]
# Train support model on train
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
support_model.add(dc.nn.GraphPool())
support_model.add(dc.nn.GraphConv(128, 64, activation='relu'))
support_model.add(dc.nn.GraphPool())
support_model.add(dc.nn.GraphConv(64, 128, activation='relu'))
support_model.add(dc.nn.GraphPool())
support_model.add(dc.nn.Dense(128, 64, activation='tanh'))
support_model.add_test(dc.nn.GraphGather(test_batch_size, activation='tanh'))
support_model.add_support(
dc.nn.GraphGather(support_batch_size, activation='tanh'))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=learning_rate)
model.fit(
train_dataset,
nb_epochs=nb_epochs,
n_episodes_per_epoch=n_train_trials,
n_pos=n_pos,
n_neg=n_neg,
log_every_n_samples=log_every_n_samples)
mean_scores, std_scores = model.evaluate(
test_dataset, metric, n_pos, n_neg, n_trials=n_eval_trials)
print("Mean Scores on evaluation dataset")
print(mean_scores)
print("Standard Deviations on evaluation dataset")
print(std_scores)
print("Median of Mean Scores")
print(np.median(np.array(mean_scores.values())))
<file_sep>"""
Script that trains Sklearn multitask models on nci dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
from deepchem.molnet import load_nci
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from deepchem.data import Dataset
from deepchem.models.multitask import SingletaskToMultitask
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.models.sklearn_models import SklearnModel
from deepchem.utils.evaluate import Evaluator
np.random.seed(123)
# Set some global variables up top
verbosity = "high"
nci_tasks, nci_dataset, transformers = load_nci()
(train_dataset, valid_dataset, test_dataset) = nci_dataset
classification_metric = Metric(
metrics.roc_auc_score, np.mean, mode="classification")
def model_builder(model_dir):
sklearn_model = RandomForestRegressor(n_estimators=500)
return SklearnModel(sklearn_model, model_dir)
model = SingletaskToMultitask(nci_tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
train_evaluator = Evaluator(
model, train_dataset, transformers, verbosity=verbosity)
train_scores = train_evaluator.compute_model_performance(
[classification_metric])
print("Train scores")
print(train_scores)
valid_evaluator = Evaluator(
model, valid_dataset, transformers, verbosity=verbosity)
valid_scores = valid_evaluator.compute_model_performance(
[classification_metric])
print("Validation scores")
print(valid_scores)
<file_sep>"""
Script that trains TF multitask models on MUV dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
import deepchem as dc
from deepchem.molnet import load_muv
np.random.seed(123)
# Load MUV data
muv_tasks, muv_datasets, transformers = load_muv(splitter='stratified')
train_dataset, valid_dataset, test_dataset = muv_datasets
# Build model
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
rate = dc.models.optimizers.ExponentialDecay(0.001, 0.8, 1000)
model = dc.models.MultitaskClassifier(
len(muv_tasks),
n_features=1024,
dropouts=[.25],
learning_rate=rate,
weight_init_stddevs=[.1],
batch_size=64,
verbosity="high")
# Fit trained model
model.fit(train_dataset)
# Evaluate train/test scores
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import numpy as np
import unittest
from deepchem.feat import MordredDescriptors
class TestMordredDescriptors(unittest.TestCase):
"""
Test MordredDescriptors.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
def test_mordred_descriptors(self):
"""
Test simple descriptors.
"""
featurizer = MordredDescriptors()
descriptors = featurizer([self.mol])
assert descriptors.shape == (1, 1613)
assert np.allclose(descriptors[0][0:3],
np.array([9.54906713, 9.03919229, 1.0]))
def test_mordred_descriptors_with_3D_info(self):
"""
Test simple descriptors with 3D info
"""
from rdkit import Chem
from rdkit.Chem import AllChem
featurizer = MordredDescriptors(ignore_3D=False)
descriptors = featurizer([self.mol])
assert descriptors.shape == (1, 1826)
assert np.allclose(descriptors[0][780:784],
np.array([0.0, 0.0, 0.0, 0.0]))
# calculate coordinates
mol = self.mol
mol_with_conf = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol_with_conf, AllChem.ETKDG())
descriptors = featurizer([mol_with_conf])
assert descriptors.shape == (1, 1826)
# not zero values
assert not np.allclose(descriptors[0][780:784],
np.array([0.0, 0.0, 0.0, 0.0]))
<file_sep>"""
Script that trains graph-conv models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import sys
from deepchem.models.tensorgraph import TensorGraph
from deepchem.metrics import to_one_hot
from deepchem.feat.mol_graphs import ConvMol
from deepchem.models.tensorgraph.layers import Input, GraphConv, BatchNorm, GraphPool, Dense, GraphGather, \
SoftMax, SoftMaxCrossEntropy, Concat, WeightedError, Label, Constant, Weights, Feature, AlphaShare, SluiceLoss, Add
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_tox21
def sluice_model(batch_size, tasks):
model = TensorGraph(
model_dir=model_dir,
batch_size=batch_size,
use_queue=False,
tensorboard=True)
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None,), dtype=tf.int32)
sluice_loss = []
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
deg_adjs.append(deg_adj)
gc1 = GraphConv(
64,
activation_fn=tf.nn.relu,
in_layers=[atom_features, degree_slice, membership] + deg_adjs)
as1 = AlphaShare(in_layers=[gc1, gc1])
sluice_loss.append(gc1)
batch_norm1a = BatchNorm(in_layers=[as1[0]])
batch_norm1b = BatchNorm(in_layers=[as1[1]])
gp1a = GraphPool(in_layers=[batch_norm1a, degree_slice, membership] +
deg_adjs)
gp1b = GraphPool(in_layers=[batch_norm1b, degree_slice, membership] +
deg_adjs)
gc2a = GraphConv(
64,
activation_fn=tf.nn.relu,
in_layers=[gp1a, degree_slice, membership] + deg_adjs)
gc2b = GraphConv(
64,
activation_fn=tf.nn.relu,
in_layers=[gp1b, degree_slice, membership] + deg_adjs)
as2 = AlphaShare(in_layers=[gc2a, gc2b])
sluice_loss.append(gc2a)
sluice_loss.append(gc2b)
batch_norm2a = BatchNorm(in_layers=[as2[0]])
batch_norm2b = BatchNorm(in_layers=[as2[1]])
gp2a = GraphPool(in_layers=[batch_norm2a, degree_slice, membership] +
deg_adjs)
gp2b = GraphPool(in_layers=[batch_norm2b, degree_slice, membership] +
deg_adjs)
densea = Dense(out_channels=128, activation_fn=None, in_layers=[gp2a])
denseb = Dense(out_channels=128, activation_fn=None, in_layers=[gp2b])
batch_norm3a = BatchNorm(in_layers=[densea])
batch_norm3b = BatchNorm(in_layers=[denseb])
as3 = AlphaShare(in_layers=[batch_norm3a, batch_norm3b])
sluice_loss.append(batch_norm3a)
sluice_loss.append(batch_norm3b)
gg1a = GraphGather(
batch_size=batch_size,
activation_fn=tf.nn.tanh,
in_layers=[as3[0], degree_slice, membership] + deg_adjs)
gg1b = GraphGather(
batch_size=batch_size,
activation_fn=tf.nn.tanh,
in_layers=[as3[1], degree_slice, membership] + deg_adjs)
costs = []
labels = []
count = 0
for task in tasks:
if count < len(tasks) / 2:
classification = Dense(
out_channels=2, activation_fn=None, in_layers=[gg1a])
print("first half:")
print(task)
else:
classification = Dense(
out_channels=2, activation_fn=None, in_layers=[gg1b])
print('second half')
print(task)
count += 1
softmax = SoftMax(in_layers=[classification])
model.add_output(softmax)
label = Label(shape=(None, 2))
labels.append(label)
cost = SoftMaxCrossEntropy(in_layers=[label, classification])
costs.append(cost)
entropy = Concat(in_layers=costs)
task_weights = Weights(shape=(None, len(tasks)))
task_loss = WeightedError(in_layers=[entropy, task_weights])
s_cost = SluiceLoss(in_layers=sluice_loss)
total_loss = Add(in_layers=[task_loss, s_cost])
model.set_loss(total_loss)
def feed_dict_generator(dataset, batch_size, epochs=1):
for epoch in range(epochs):
for ind, (X_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(batch_size, pad_batches=True)):
d = {}
for index, label in enumerate(labels):
d[label] = to_one_hot(y_b[:, index])
d[task_weights] = w_b
multiConvMol = ConvMol.agglomerate_mols(X_b)
d[atom_features] = multiConvMol.get_atom_features()
d[degree_slice] = multiConvMol.deg_slice
d[membership] = multiConvMol.membership
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
d[deg_adjs[i - 1]] = multiConvMol.get_deg_adjacency_lists()[i]
yield d
return model, feed_dict_generator, labels, task_weights
model_dir = "tmp/graphconv"
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = load_tox21(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = tox21_datasets
print(train_dataset.data_dir)
print(valid_dataset.data_dir)
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
# Batch size of models
batch_size = 100
num_epochs = 10
model, generator, labels, task_weights = sluice_model(batch_size, tox21_tasks)
model.fit_generator(
generator(train_dataset, batch_size, epochs=num_epochs),
checkpoint_interval=1000)
print("Evaluating model")
train_scores = model.evaluate_generator(
generator(train_dataset, batch_size), [metric],
transformers,
labels,
weights=[task_weights],
per_task_metrics=True)
valid_scores = model.evaluate_generator(
generator(valid_dataset, batch_size), [metric],
transformers,
labels,
weights=[task_weights],
per_task_metrics=True)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import os
import numpy as np
import deepchem as dc
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
input_file = os.path.join(current_dir,
"../../models/tests/assets/example.csv")
loader = dc.data.CSVLoader(tasks=tasks,
feature_field="smiles",
featurizer=featurizer)
return loader.create_dataset(input_file)
def test_y_minmax_transformer():
"""Tests MinMax transformer."""
solubility_dataset = load_solubility_data()
minmax_transformer = dc.trans.MinMaxTransformer(transform_y=True,
dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = minmax_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged before and after transformation
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since transform_y is true
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since transform_y is true
np.testing.assert_allclose(w, w_t)
# Check minimum and maximum values of transformed y are 0 and 1
np.testing.assert_allclose(y_t.min(), 0.)
np.testing.assert_allclose(y_t.max(), 1.)
# Check untransform works correctly
y_restored = minmax_transformer.untransform(y_t)
assert np.max(y_restored - y) < 1e-5
def test_y_minmax_random():
"""Test on random example"""
n_samples = 100
n_features = 10
n_tasks = 10
X = np.random.randn(n_samples, n_features)
y = np.random.randn(n_samples, n_tasks)
dataset = dc.data.NumpyDataset(X, y)
minmax_transformer = dc.trans.MinMaxTransformer(transform_y=True,
dataset=dataset)
w, ids = dataset.w, dataset.ids
dataset = minmax_transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (dataset.X, dataset.y, dataset.w, dataset.ids)
# Check ids are unchanged before and after transformation
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since transform_y is true
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since transform_y is true
np.testing.assert_allclose(w, w_t)
# Check minimum and maximum values of transformed y are 0 and 1
np.testing.assert_allclose(y_t.min(), 0.)
np.testing.assert_allclose(y_t.max(), 1.)
# Test if dimensionality expansion is handled correctly by untransform
y_t = np.expand_dims(y_t, axis=-1)
y_restored = minmax_transformer.untransform(y_t)
assert y_restored.shape == y.shape + (1,)
np.testing.assert_allclose(np.squeeze(y_restored, axis=-1), y)
def test_X_minmax_transformer():
solubility_dataset = load_solubility_data()
minmax_transformer = dc.trans.MinMaxTransformer(transform_X=True,
dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = minmax_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged before and after transformation
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since transform_y is true
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since transform_y is true
np.testing.assert_allclose(w, w_t)
# Check minimum and maximum values of transformed y are 0 and 1
np.testing.assert_allclose(X_t.min(), 0.)
np.testing.assert_allclose(X_t.max(), 1.)
# Check untransform works correctly
np.testing.assert_allclose(minmax_transformer.untransform(X_t), X)
<file_sep>from typing import Callable, List, Optional
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class MordredDescriptors(MolecularFeaturizer):
"""Mordred descriptors.
This class computes a list of chemical descriptors using Mordred.
Please see the details about all descriptors from [1]_, [2]_.
Attributes
----------
descriptors: List[str]
List of Mordred descriptor names used in this class.
References
----------
.. [1] Moriwaki, Hirotomo, et al. "Mordred: a molecular descriptor calculator."
Journal of cheminformatics 10.1 (2018): 4.
.. [2] http://mordred-descriptor.github.io/documentation/master/descriptors.html
Note
----
This class requires Mordred to be installed.
Examples
--------
>>> import deepchem as dc
>>> smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O']
>>> featurizer = dc.feat.MordredDescriptors(ignore_3D=True)
>>> features = featurizer.featurize(smiles)
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(1613,)
"""
def __init__(self, ignore_3D: bool = True):
"""
Parameters
----------
ignore_3D: bool, optional (default True)
Whether to use 3D information or not.
"""
self.ignore_3D = ignore_3D
self.calc: Optional[Callable] = None
self.descriptors: Optional[List] = None
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Mordred descriptors.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of Mordred descriptors for `mol`.
If ignore_3D is True, the length is 1613.
If ignore_3D is False, the length is 1826.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.calc is None:
try:
from mordred import Calculator, descriptors, is_missing
self.is_missing = is_missing
self.calc = Calculator(descriptors, ignore_3D=self.ignore_3D)
self.descriptors = list(descriptors.__all__)
except ModuleNotFoundError:
raise ImportError(
"This class requires Mordred to be installed.")
feature = self.calc(datapoint)
# convert errors to zero
feature = [
0.0 if self.is_missing(val) or isinstance(val, str) else val
for val in feature
]
return np.asarray(feature)
<file_sep>import logging
from typing import Any, Callable, Dict, List, Optional, Tuple
from deepchem.data import Dataset
from deepchem.trans import Transformer
from deepchem.models import Model
from deepchem.metrics import Metric
logger = logging.getLogger(__name__)
def _convert_hyperparam_dict_to_filename(hyper_params: Dict[str, Any]) -> str:
"""Helper function that converts a dictionary of hyperparameters to a string that can be a filename.
Parameters
----------
hyper_params: Dict
Maps string of hyperparameter name to int/float/string/list etc.
Returns
-------
filename: str
A filename of form "_key1_value1_value2_..._key2..."
"""
filename = ""
keys = sorted(hyper_params.keys())
for key in keys:
filename += "_%s" % str(key)
value = hyper_params[key]
if isinstance(value, int):
filename += "_%s" % str(value)
elif isinstance(value, float):
filename += "_%f" % value
else:
filename += "%s" % str(value)
return filename
class HyperparamOpt(object):
"""Abstract superclass for hyperparameter search classes.
This class is an abstract base class for hyperparameter search
classes in DeepChem. Hyperparameter search is performed on
`dc.models.Model` classes. Each hyperparameter object accepts a
`dc.models.Model` class upon construct. When the `hyperparam_search`
class is invoked, this class is used to construct many different
concrete models which are trained on the specified training set and
evaluated on a given validation set.
Different subclasses of `HyperparamOpt` differ in the choice of
strategy for searching the hyperparameter evaluation space. This
class itself is an abstract superclass and should never be directly
instantiated.
"""
def __init__(self, model_builder: Callable[..., Model]):
"""Initialize Hyperparameter Optimizer.
Note this is an abstract constructor which should only be used by
subclasses.
Parameters
----------
model_builder: constructor function.
This parameter must be constructor function which returns an
object which is an instance of `dc.models.Model`. This function
must accept two arguments, `model_params` of type `dict` and
`model_dir`, a string specifying a path to a model directory.
See the example.
"""
if self.__class__.__name__ == "HyperparamOpt":
raise ValueError(
"HyperparamOpt is an abstract superclass and cannot be directly instantiated. \
You probably want to instantiate a concrete subclass instead.")
self.model_builder = model_builder
def hyperparam_search(
self,
params_dict: Dict,
train_dataset: Dataset,
valid_dataset: Dataset,
metric: Metric,
output_transformers: List[Transformer] = [],
nb_epoch: int = 10,
use_max: bool = True,
logfile: str = 'results.txt',
logdir: Optional[str] = None,
**kwargs) -> Tuple[Model, Dict[str, Any], Dict[str, Any]]:
"""Conduct Hyperparameter search.
This method defines the common API shared by all hyperparameter
optimization subclasses. Different classes will implement
different search methods but they must all follow this common API.
Parameters
----------
params_dict: Dict
Dictionary mapping strings to values. Note that the
precise semantics of `params_dict` will change depending on the
optimizer that you're using. Depending on the type of
hyperparameter optimization, these values can be
ints/floats/strings/lists/etc. Read the documentation for the
concrete hyperparameter optimization subclass you're using to
learn more about what's expected.
train_dataset: Dataset
dataset used for training
valid_dataset: Dataset
dataset used for validation(optimization on valid scores)
metric: Metric
metric used for evaluation
output_transformers: list[Transformer]
Transformers for evaluation. This argument is needed since
`train_dataset` and `valid_dataset` may have been transformed
for learning and need the transform to be inverted before
the metric can be evaluated on a model.
nb_epoch: int, (default 10)
Specifies the number of training epochs during each iteration of optimization.
use_max: bool, optional
If True, return the model with the highest score. Else return
model with the minimum score.
logdir: str, optional
The directory in which to store created models. If not set, will
use a temporary directory.
logfile: str, optional (default `results.txt`)
Name of logfile to write results to. If specified, this must
be a valid file name. If not specified, results of hyperparameter
search will be written to `logdir/results.txt`.
Returns
-------
Tuple[`best_model`, `best_hyperparams`, `all_scores`]
`(best_model, best_hyperparams, all_scores)` where `best_model` is
an instance of `dc.models.Model`, `best_hyperparams` is a
dictionary of parameters, and `all_scores` is a dictionary mapping
string representations of hyperparameter sets to validation
scores.
"""
raise NotImplementedError
<file_sep>"""
Tests for ImageLoader.
"""
import os
import unittest
import tempfile
from scipy import misc
import deepchem as dc
import zipfile
import numpy as np
class TestImageLoader(unittest.TestCase):
"""
Test ImageLoader
"""
def setUp(self):
super(TestImageLoader, self).setUp()
from PIL import Image
self.current_dir = os.path.dirname(os.path.abspath(__file__))
self.tif_image_path = os.path.join(self.current_dir, "a_image.tif")
# Create image file
self.data_dir = tempfile.mkdtemp()
self.face = misc.face()
self.face_path = os.path.join(self.data_dir, "face.png")
Image.fromarray(self.face).save(self.face_path)
self.face_copy_path = os.path.join(self.data_dir, "face_copy.png")
Image.fromarray(self.face).save(self.face_copy_path)
# Create zip of image file
self.zip_path = os.path.join(self.data_dir, "face.zip")
zipf = zipfile.ZipFile(self.zip_path, "w", zipfile.ZIP_DEFLATED)
zipf.write(self.face_path)
zipf.close()
# Create zip of multiple image files
self.multi_zip_path = os.path.join(self.data_dir, "multi_face.zip")
zipf = zipfile.ZipFile(self.multi_zip_path, "w", zipfile.ZIP_DEFLATED)
zipf.write(self.face_path)
zipf.write(self.face_copy_path)
zipf.close()
# Create zip of multiple image files, multiple_types
self.multitype_zip_path = os.path.join(self.data_dir,
"multitype_face.zip")
zipf = zipfile.ZipFile(self.multitype_zip_path, "w",
zipfile.ZIP_DEFLATED)
zipf.write(self.face_path)
zipf.write(self.tif_image_path)
zipf.close()
# Create image directory
self.image_dir = tempfile.mkdtemp()
face_path = os.path.join(self.image_dir, "face.png")
Image.fromarray(self.face).save(face_path)
face_copy_path = os.path.join(self.image_dir, "face_copy.png")
Image.fromarray(self.face).save(face_copy_path)
def test_png_simple_load(self):
loader = dc.data.ImageLoader()
dataset = loader.create_dataset(self.face_path)
# These are the known dimensions of face.png
assert dataset.X.shape == (1, 768, 1024, 3)
def test_png_simple_load_with_labels(self):
loader = dc.data.ImageLoader()
dataset = loader.create_dataset((self.face_path, np.array(1)))
# These are the known dimensions of face.png
assert dataset.X.shape == (1, 768, 1024, 3)
assert (dataset.y == np.ones((1,))).all()
def test_tif_simple_load(self):
loader = dc.data.ImageLoader()
dataset = loader.create_dataset(self.tif_image_path)
# TODO(rbharath): Where are the color channels?
assert dataset.X.shape == (1, 44, 330)
def test_png_multi_load(self):
loader = dc.data.ImageLoader()
dataset = loader.create_dataset([self.face_path, self.face_copy_path])
assert dataset.X.shape == (2, 768, 1024, 3)
def test_png_zip_load(self):
loader = dc.data.ImageLoader()
dataset = loader.create_dataset(self.zip_path)
assert dataset.X.shape == (1, 768, 1024, 3)
def test_png_multi_zip_load(self):
loader = dc.data.ImageLoader()
dataset = loader.create_dataset(self.multi_zip_path)
assert dataset.X.shape == (2, 768, 1024, 3)
def test_multitype_zip_load(self):
loader = dc.data.ImageLoader()
dataset = loader.create_dataset(self.multitype_zip_path)
# Since the different files have different shapes, makes an object array
assert dataset.X.shape == (2,)
def test_directory_load(self):
loader = dc.data.ImageLoader()
dataset = loader.create_dataset(self.image_dir)
assert dataset.X.shape == (2, 768, 1024, 3)
<file_sep>import warnings
from keras import backend as K
from keras import objectives
from keras.layers import Input, Lambda
from keras.layers.convolutional import Convolution1D
from keras.layers.core import Dense, Flatten, RepeatVector
from keras.layers.recurrent import GRU
from keras.layers.wrappers import TimeDistributed
from keras.models import Model
class MoleculeVAE():
autoencoder = None
def __init__(self):
warnings.warn("Deprecated. Will be removed in DeepChem 1.4.",
DeprecationWarning)
def create(self,
charset_length,
max_length=120,
latent_rep_size=292,
weights_file=None):
x = Input(shape=(max_length, charset_length))
_, z = self._buildEncoder(x, latent_rep_size, max_length)
self.encoder = Model(x, z)
encoded_input = Input(shape=(latent_rep_size,))
self.decoder = Model(encoded_input,
self._buildDecoder(encoded_input, latent_rep_size,
max_length, charset_length))
x1 = Input(shape=(max_length, charset_length))
vae_loss, z1 = self._buildEncoder(x1, latent_rep_size, max_length)
self.autoencoder = Model(x1,
self._buildDecoder(z1, latent_rep_size, max_length,
charset_length))
if weights_file:
self.autoencoder.load_weights(weights_file)
self.encoder.load_weights(weights_file, by_name=True)
self.decoder.load_weights(weights_file, by_name=True)
self.autoencoder.compile(
optimizer='Adam', loss=vae_loss, metrics=['accuracy'])
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation='relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(
shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = -0.5 * K.mean(
1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
return (vae_loss, Lambda(
sampling, output_shape=(latent_rep_size,),
name='lambda')([z_mean, z_log_var]))
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
h = Dense(latent_rep_size, name='latent_input', activation='relu')(z)
h = RepeatVector(max_length, name='repeat_vector')(h)
h = GRU(501, return_sequences=True, name='gru_1')(h)
h = GRU(501, return_sequences=True, name='gru_2')(h)
h = GRU(501, return_sequences=True, name='gru_3')(h)
return TimeDistributed(
Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
def save(self, filename):
self.autoencoder.save_weights(filename)
def load(self, charset_length, weights_file, latent_rep_size=292):
self.create(
charset_length,
weights_file=weights_file,
latent_rep_size=latent_rep_size)
<file_sep>"""
Computes putative binding pockets on protein.
"""
import logging
import numpy as np
from typing import Any, List, Optional, Tuple
from deepchem.models import Model
from deepchem.utils.rdkit_utils import load_molecule
from deepchem.utils.coordinate_box_utils import CoordinateBox, get_face_boxes, merge_overlapping_boxes
from deepchem.utils.fragment_utils import get_contact_atom_indices
logger = logging.getLogger(__name__)
def extract_active_site(
protein_file: str,
ligand_file: str,
cutoff: float = 4.0) -> Tuple[CoordinateBox, np.ndarray]:
"""Extracts a box for the active site.
Parameters
----------
protein_file: str
Location of protein PDB
ligand_file: str
Location of ligand input file
cutoff: float, optional (default 4.0)
The distance in angstroms from the protein pocket to
consider for featurization.
Returns
-------
Tuple[CoordinateBox, np.ndarray]
A tuple of `(CoordinateBox, np.ndarray)` where the second entry is
of shape `(N, 3)` with `N` the number of atoms in the active site.
"""
protein = load_molecule(protein_file, add_hydrogens=False)
ligand = load_molecule(ligand_file, add_hydrogens=True, calc_charges=True)
protein_contacts, ligand_contacts = get_contact_atom_indices(
[protein, ligand], cutoff=cutoff)
protein_coords = protein[0]
pocket_coords = protein_coords[protein_contacts]
x_min = int(np.floor(np.amin(pocket_coords[:, 0])))
x_max = int(np.ceil(np.amax(pocket_coords[:, 0])))
y_min = int(np.floor(np.amin(pocket_coords[:, 1])))
y_max = int(np.ceil(np.amax(pocket_coords[:, 1])))
z_min = int(np.floor(np.amin(pocket_coords[:, 2])))
z_max = int(np.ceil(np.amax(pocket_coords[:, 2])))
box = CoordinateBox((x_min, x_max), (y_min, y_max), (z_min, z_max))
return box, pocket_coords
class BindingPocketFinder(object):
"""Abstract superclass for binding pocket detectors
Many times when working with a new protein or other macromolecule,
it's not clear what zones of the macromolecule may be good targets
for potential ligands or other molecules to interact with. This
abstract class provides a template for child classes that
algorithmically locate potential binding pockets that are good
potential interaction sites.
Note that potential interactions sites can be found by many
different methods, and that this abstract class doesn't specify the
technique to be used.
"""
def find_pockets(self, molecule: Any):
"""Finds potential binding pockets in proteins.
Parameters
----------
molecule: object
Some representation of a molecule.
"""
raise NotImplementedError
class ConvexHullPocketFinder(BindingPocketFinder):
"""Implementation that uses convex hull of protein to find pockets.
Based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4112621/pdf/1472-6807-14-18.pdf
"""
def __init__(self, scoring_model: Optional[Model] = None, pad: float = 5.0):
"""Initialize the pocket finder.
Parameters
----------
scoring_model: Model, optional (default None)
If specified, use this model to prune pockets.
pad: float, optional (default 5.0)
The number of angstroms to pad around a binding pocket's atoms
to get a binding pocket box.
"""
self.scoring_model = scoring_model
self.pad = pad
def find_all_pockets(self, protein_file: str) -> List[CoordinateBox]:
"""Find list of binding pockets on protein.
Parameters
----------
protein_file: str
Protein to load in.
Returns
-------
List[CoordinateBox]
List of binding pockets on protein. Each pocket is a `CoordinateBox`
"""
coords, _ = load_molecule(protein_file)
return get_face_boxes(coords, self.pad)
def find_pockets(self, macromolecule_file: str) -> List[CoordinateBox]:
"""Find list of suitable binding pockets on protein.
This function computes putative binding pockets on this protein.
This class uses the `ConvexHull` to compute binding pockets. Each
face of the hull is converted into a coordinate box used for
binding.
Parameters
----------
macromolecule_file: str
Location of the macromolecule file to load
Returns
-------
List[CoordinateBox]
List of pockets. Each pocket is a `CoordinateBox`
"""
coords, _ = load_molecule(macromolecule_file,
add_hydrogens=False,
calc_charges=False)
boxes = get_face_boxes(coords, self.pad)
boxes = merge_overlapping_boxes(boxes)
return boxes
<file_sep>import numpy as np
import os
import tensorflow as tf
import deepchem as dc
import pyanitools as pya
import app
def convert_species_to_atomic_nums(s):
PERIODIC_TABLE = {"H": 1, "C": 6, "N": 7, "O": 8}
res = []
for k in s:
res.append(PERIODIC_TABLE[k])
return np.array(res, dtype=np.float32)
# replace with your own scratch directory
data_dir = "/media/yutong/datablob/datasets"
model_dir = "/media/yutong/datablob/models"
all_dir = os.path.join(data_dir, "all")
test_dir = os.path.join(data_dir, "test")
fold_dir = os.path.join(data_dir, "fold")
train_dir = os.path.join(fold_dir, "train")
valid_dir = os.path.join(fold_dir, "valid")
def load_roiterberg_ANI(mode="atomization"):
"""
Load the ANI dataset.
Parameters
----------
mode: str
Accepted modes are "relative", "atomization", or "absolute". These settings are used
to adjust the dynamic range of the model, with absolute having the greatest and relative
having the lowest. Note that for atomization we approximate the single atom energy
using a different level of theory
Returns
-------
tuples
Elements returned are 3-tuple (a,b,c) where and b are the train and test datasets, respectively,
and c is an array of indices denoting the group of each
"""
if "ROITBERG_ANI" not in os.environ:
raise ValueError(
"Please set environment variable ROITBERG_ANI to where the ani_dgb_s0x.h5 files are."
)
base_dir = os.environ["ROITBERG_ANI"]
# Number of conformations in each file increases exponentially.
# Start with a smaller dataset before continuing. Use all of them
# for production
hdf5files = [
'ani_gdb_s01.h5',
'ani_gdb_s02.h5',
# 'ani_gdb_s03.h5',
# 'ani_gdb_s04.h5',
# 'ani_gdb_s05.h5',
# 'ani_gdb_s06.h5',
# 'ani_gdb_s07.h5',
# 'ani_gdb_s08.h5'
]
hdf5files = [os.path.join(base_dir, f) for f in hdf5files]
groups = []
def shard_generator():
shard_size = 4096 * 64
row_idx = 0
group_idx = 0
X_cache = []
y_cache = []
w_cache = []
ids_cache = []
for hdf5file in hdf5files:
adl = pya.anidataloader(hdf5file)
for data in adl:
# Extract the data
P = data['path']
R = data['coordinates']
E = data['energies']
S = data['species']
smi = data['smiles']
if len(S) > 23:
print("skipping:", smi, "due to atom count.")
continue
# Print the data
print("Processing: ", P)
print(" Smiles: ", "".join(smi))
print(" Symbols: ", S)
print(" Coordinates: ", R.shape)
print(" Energies: ", E.shape)
Z_padded = np.zeros((23,), dtype=np.float32)
nonpadded = convert_species_to_atomic_nums(S)
Z_padded[:nonpadded.shape[0]] = nonpadded
if mode == "relative":
offset = np.amin(E)
elif mode == "atomization":
# self-interaction energies taken from
# https://github.com/isayev/ANI1_dataset README
atomizationEnergies = {
0: 0,
1: -0.500607632585,
6: -37.8302333826,
7: -54.5680045287,
8: -75.0362229210
}
offset = 0
for z in nonpadded:
offset -= atomizationEnergies[z]
elif mode == "absolute":
offset = 0
else:
raise Exception("Unsupported mode: ", mode)
for k in range(len(E)):
R_padded = np.zeros((23, 3), dtype=np.float32)
R_padded[:R[k].shape[0], :R[k].shape[1]] = R[k]
X = np.concatenate([np.expand_dims(Z_padded, 1), R_padded], axis=1)
y = E[k] - offset
if len(X_cache) == shard_size:
yield np.array(X_cache), np.array(y_cache), np.array(
w_cache), np.array(ids_cache)
X_cache = []
y_cache = []
w_cache = []
ids_cache = []
else:
X_cache.append(X)
y_cache.append(np.array(y).reshape((1,)))
w_cache.append(np.array(1).reshape((1,)))
ids_cache.append(row_idx)
row_idx += 1
groups.append(group_idx)
group_idx += 1
# flush once more at the end
if len(X_cache) > 0:
yield np.array(X_cache), np.array(y_cache), np.array(w_cache), np.array(
ids_cache)
tasks = ["ani"]
dataset = dc.data.DiskDataset.create_dataset(
shard_generator(), tasks=tasks, data_dir=all_dir)
print("Number of groups", np.amax(groups))
splitter = dc.splits.RandomGroupSplitter(groups)
train_dataset, test_dataset = splitter.train_test_split(
dataset, train_dir=fold_dir, test_dir=test_dir, frac_train=.8)
return train_dataset, test_dataset, groups
def broadcast(dataset, metadata):
new_metadata = []
for (_, _, _, ids) in dataset.itershards():
for idx in ids:
new_metadata.append(metadata[idx])
return new_metadata
if __name__ == "__main__":
max_atoms = 23
batch_size = 64 # CHANGED FROM 16
layer_structures = [128, 128, 64]
atom_number_cases = [1, 6, 7, 8]
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
print("Fitting new model...")
train_valid_dataset, test_dataset, all_groups = load_roiterberg_ANI(
mode="atomization")
splitter = dc.splits.RandomGroupSplitter(
broadcast(train_valid_dataset, all_groups))
print("Performing 1-fold split...")
train_dataset, valid_dataset = splitter.train_test_split(
train_valid_dataset, train_dir=train_dir, test_dir=valid_dir)
transformers = [
dc.trans.NormalizationTransformer(
transform_y=True, dataset=train_dataset)
]
print("Total training set shape: ", train_dataset.get_shape())
for transformer in transformers:
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
model = dc.models.ANIRegression(
1,
max_atoms,
layer_structures=layer_structures,
atom_number_cases=atom_number_cases,
batch_size=batch_size,
learning_rate=0.001,
use_queue=True,
model_dir=model_dir,
mode="regression")
# # For production, set nb_epoch to 100+
for i in range(10):
model.fit(train_dataset, nb_epoch=1, checkpoint_interval=100)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
test_scores = model.evaluate(test_dataset, metric, transformers)
# print("Train scores")
# print(train_scores)
print("Validation scores")
print(valid_scores)
print("Test scores")
print(test_scores)
coords = np.array([
[0.3, 0.4, 0.5],
[0.8, 0.2, 0.3],
[0.1, 0.3, 0.8],
])
atomic_nums = np.array([1, 8, 1])
print("Prediction of a single test set structure:")
print(model.pred_one(coords, atomic_nums))
print("Gradient of a single test set structure:")
print(model.grad_one(coords, atomic_nums))
# print("Minimization of a single test set structure:")
# print(model.minimize_structure(coords, atomic_nums))
app.webapp.model = model
app.webapp.run(host='0.0.0.0', debug=False)
<file_sep>import os
import logging
import unittest
import deepchem as dc
logger = logging.getLogger(__name__)
class TestDrop(unittest.TestCase):
"""
Test how loading of malformed compounds is handled.
Called TestDrop since these compounds were silently and erroneously dropped.
"""
def test_drop(self):
"""Test on dataset where RDKit fails on some strings."""
current_dir = os.path.dirname(os.path.realpath(__file__))
logger.info("About to load emols dataset.")
dataset_file = os.path.join(current_dir, "mini_emols.csv")
# Featurize emols dataset
logger.info("About to featurize datasets.")
featurizer = dc.feat.CircularFingerprint(size=1024)
emols_tasks = ['activity']
loader = dc.data.CSVLoader(tasks=emols_tasks,
feature_field="smiles",
featurizer=featurizer)
dataset = loader.create_dataset(dataset_file)
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
assert len(X) == len(y) == len(w) == len(ids)
<file_sep>from __future__ import absolute_import, division, print_function
import numpy as np
import sys
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from scipy.signal import correlate2d
from simdna.simulations import loaded_motifs
def get_motif_scores(encoded_sequences,
motif_names,
max_scores=None,
return_positions=False,
GC_fraction=0.4):
"""
Computes pwm log odds.
Parameters
----------
encoded_sequences : 4darray
motif_names : list of strings
max_scores : int, optional
return_positions : boolean, optional
GC_fraction : float, optional
Returns
-------
(num_samples, num_motifs, seq_length) complete score array by default.
If max_scores, (num_samples, num_motifs*max_scores) max score array.
If max_scores and return_positions, (num_samples, 2*num_motifs*max_scores)
array with max scores and their positions.
"""
num_samples, _, _, seq_length = encoded_sequences.shape
scores = np.ones((num_samples, len(motif_names), seq_length))
for j, motif_name in enumerate(motif_names):
pwm = loaded_motifs.getPwm(motif_name).getRows().T
log_pwm = np.log(pwm)
gc_pwm = 0.5 * np.array(
[[1 - GC_fraction, GC_fraction, GC_fraction, 1 - GC_fraction]] * len(
pwm[0])).T
gc_log_pwm = np.log(gc_pwm)
scores[:, j, :] = get_pssm_scores(encoded_sequences,
log_pwm) - get_pssm_scores(
encoded_sequences, gc_log_pwm)
if max_scores is not None:
sorted_scores = np.sort(scores)[:, :, ::-1][:, :, :max_scores]
if return_positions:
sorted_positions = scores.argsort()[:, :, ::-1][:, :, :max_scores]
return np.concatenate(
(sorted_scores.reshape((num_samples, len(motif_names) * max_scores)),
sorted_positions.reshape(
(num_samples, len(motif_names) * max_scores))),
axis=1)
else:
return sorted_scores.reshape((num_samples, len(motif_names) * max_scores))
else:
return scores
def get_pssm_scores(encoded_sequences, pssm):
"""
Convolves pssm and its reverse complement with encoded sequences
and returns the maximum score at each position of each sequence.
Parameters
----------
encoded_sequences: 3darray
(num_examples, 1, 4, seq_length) array
pssm: 2darray
(4, pssm_length) array
Returns
-------
scores: 2darray
(num_examples, seq_length) array
"""
encoded_sequences = encoded_sequences.squeeze(axis=1)
# initialize fwd and reverse scores to -infinity
fwd_scores = np.full_like(encoded_sequences, -np.inf, float)
rc_scores = np.full_like(encoded_sequences, -np.inf, float)
# cross-correlate separately for each base,
# for both the PSSM and its reverse complement
for base_indx in range(encoded_sequences.shape[1]):
base_pssm = pssm[base_indx][None]
base_pssm_rc = base_pssm[:, ::-1]
fwd_scores[:, base_indx, :] = correlate2d(
encoded_sequences[:, base_indx, :], base_pssm, mode='same')
rc_scores[:, base_indx, :] = correlate2d(
encoded_sequences[:, -(base_indx + 1), :], base_pssm_rc, mode='same')
# sum over the bases
fwd_scores = fwd_scores.sum(axis=1)
rc_scores = rc_scores.sum(axis=1)
# take max of fwd and reverse scores at each position
scores = np.maximum(fwd_scores, rc_scores)
return scores
def one_hot_encode(sequences):
sequence_length = len(sequences[0])
integer_type = np.int8 if sys.version_info[
0] == 2 else np.int32 # depends on Python version
integer_array = LabelEncoder().fit(
np.array(('ACGTN',)).view(integer_type)).transform(
sequences.view(integer_type)).reshape(
len(sequences), sequence_length)
one_hot_encoding = OneHotEncoder(
sparse=False, n_values=5, dtype=integer_type).fit_transform(integer_array)
return one_hot_encoding.reshape(len(sequences), 1, sequence_length,
5).swapaxes(2, 3)[:, :, [0, 1, 2, 4], :]
def reverse_complement(encoded_seqs):
return encoded_seqs[..., ::-1, ::-1]
def get_sequence_strings(encoded_sequences):
"""
Converts encoded sequences into an array with sequence strings
"""
num_samples, _, _, seq_length = np.shape(encoded_sequences)
sequence_characters = np.chararray((num_samples, seq_length))
sequence_characters[:] = 'N'
for i, letter in enumerate(['A', 'C', 'G', 'T']):
letter_indxs = (encoded_sequences[:, :, i, :] == 1).squeeze()
sequence_characters[letter_indxs] = letter
# return 1D view of sequence characters
return sequence_characters.view('S%s' % (seq_length)).ravel()
def encode_fasta_sequences(fname):
"""
One hot encodes sequences in fasta file
"""
name, seq_chars = None, []
sequences = []
with open(fname) as fp:
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name:
sequences.append(''.join(seq_chars).upper())
name, seq_chars = line, []
else:
seq_chars.append(line)
if name is not None:
sequences.append(''.join(seq_chars).upper())
return one_hot_encode(np.array(sequences))
<file_sep>import unittest
import pytest
try:
from tensorflow import keras
from tensorflow.keras.layers import Input
from tensorflow.keras import activations
from deepchem.models.layers import MolGANConvolutionLayer, MolGANMultiConvolutionLayer, MolGANAggregationLayer, MolGANEncoderLayer
has_tensorflow = True
except:
has_tensorflow = False
class test_molgan_layers(unittest.TestCase):
"""
Unit testing for MolGAN basic layers
"""
@pytest.mark.tensorflow
def test_graph_convolution_layer(self):
vertices = 9
nodes = 5
edges = 5
units = 128
layer = MolGANConvolutionLayer(units=units, edges=edges)
adjacency_tensor = Input(shape=(vertices, vertices, edges))
node_tensor = Input(shape=(vertices, nodes))
output = layer([adjacency_tensor, node_tensor])
model = keras.Model(inputs=[adjacency_tensor, node_tensor],
outputs=[output])
assert model.output_shape == [((None, vertices, vertices, edges),
(None, vertices, nodes), (None, vertices,
units))]
assert layer.units == units
assert layer.activation == activations.tanh
assert layer.edges == 5
assert layer.dropout_rate == 0.0
@pytest.mark.tensorflow
def test_aggregation_layer(self):
vertices = 9
units = 128
layer = MolGANAggregationLayer(units=units)
hidden_tensor = Input(shape=(vertices, units))
output = layer(hidden_tensor)
model = keras.Model(inputs=[hidden_tensor], outputs=[output])
assert model.output_shape == (None, units)
assert layer.units == units
assert layer.activation == activations.tanh
assert layer.dropout_rate == 0.0
@pytest.mark.tensorflow
def test_multigraph_convolution_layer(self):
vertices = 9
nodes = 5
edges = 5
first_convolution_unit = 128
second_convolution_unit = 64
units = [first_convolution_unit, second_convolution_unit]
layer = MolGANMultiConvolutionLayer(units=units, edges=edges)
adjacency_tensor = Input(shape=(vertices, vertices, edges))
node_tensor = Input(shape=(vertices, nodes))
hidden_tensor = layer([adjacency_tensor, node_tensor])
model = keras.Model(inputs=[adjacency_tensor, node_tensor],
outputs=[hidden_tensor])
assert model.output_shape == (None, vertices, second_convolution_unit)
assert layer.units == units
assert layer.activation == activations.tanh
assert layer.edges == 5
assert layer.dropout_rate == 0.0
@pytest.mark.tensorflow
def test_graph_encoder_layer(self):
vertices = 9
nodes = 5
edges = 5
first_convolution_unit = 128
second_convolution_unit = 64
aggregation_unit = 128
units = [(first_convolution_unit, second_convolution_unit),
aggregation_unit]
layer = MolGANEncoderLayer(units=units, edges=edges)
adjacency_tensor = Input(shape=(vertices, vertices, edges))
node_tensor = Input(shape=(vertices, nodes))
output = layer([adjacency_tensor, node_tensor])
model = keras.Model(inputs=[adjacency_tensor, node_tensor],
outputs=[output])
assert model.output_shape == (None, aggregation_unit)
assert layer.graph_convolution_units == (first_convolution_unit,
second_convolution_unit)
assert layer.auxiliary_units == aggregation_unit
assert layer.activation == activations.tanh
assert layer.edges == 5
assert layer.dropout_rate == 0.0
if __name__ == '__main__':
unittest.main()
<file_sep>"""
Contains an abstract base class that supports chemically aware data splits.
"""
import numpy as np
from deepchem.data import NumpyDataset
from deepchem.splits import Splitter
def merge_fold_datasets(fold_datasets):
"""Merges fold datasets together.
Assumes that fold_datasets were outputted from k_fold_split. Specifically,
assumes that each dataset contains the same datapoints, listed in the same
ordering.
"""
if not len(fold_datasets):
return None
# All datasets share features and identifiers by assumption.
X = fold_datasets[0].X
ids = fold_datasets[0].ids
ys, ws = [], []
for fold_dataset in fold_datasets:
ys.append(fold_dataset.y)
ws.append(fold_dataset.w)
y = np.concatenate(ys, axis=1)
w = np.concatenate(ws, axis=1)
return NumpyDataset(X, y, w, ids)
class TaskSplitter(Splitter):
"""Provides a simple interface for splitting datasets task-wise.
For some learning problems, the training and test datasets should
have different tasks entirely. This is a different paradigm from the
usual Splitter, which ensures that split datasets have different
datapoints, not different tasks.
"""
def __init__(self):
"Creates Task Splitter object."
pass
def train_valid_test_split(self,
dataset,
frac_train=.8,
frac_valid=.1,
frac_test=.1):
"""Performs a train/valid/test split of the tasks for dataset.
If split is uneven, spillover goes to test.
Parameters
----------
dataset: dc.data.Dataset
Dataset to be split
frac_train: float, optional
Proportion of tasks to be put into train. Rounded to nearest int.
frac_valid: float, optional
Proportion of tasks to be put into valid. Rounded to nearest int.
frac_test: float, optional
Proportion of tasks to be put into test. Rounded to nearest int.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1)
n_tasks = len(dataset.get_task_names())
n_train = int(np.round(frac_train * n_tasks))
n_valid = int(np.round(frac_valid * n_tasks))
X, y, w, ids = dataset.X, dataset.y, dataset.w, dataset.ids
train_dataset = NumpyDataset(X, y[:, :n_train], w[:, :n_train], ids)
valid_dataset = NumpyDataset(X, y[:, n_train:n_train + n_valid],
w[:, n_train:n_train + n_valid], ids)
test_dataset = NumpyDataset(X, y[:, n_train + n_valid:],
w[:, n_train + n_valid:], ids)
return train_dataset, valid_dataset, test_dataset
def k_fold_split(self, dataset, K):
"""Performs a K-fold split of the tasks for dataset.
If split is uneven, spillover goes to last fold.
Parameters
----------
dataset: dc.data.Dataset
Dataset to be split
K: int
Number of splits to be made
"""
n_tasks = len(dataset.get_task_names())
n_per_fold = int(np.round(n_tasks / float(K)))
if K * n_per_fold != n_tasks:
print("Assigning extra tasks to last fold due to uneven split")
X, y, w, ids = dataset.X, dataset.y, dataset.w, dataset.ids
fold_datasets = []
for fold in range(K):
if fold != K - 1:
fold_tasks = range(fold * n_per_fold, (fold + 1) * n_per_fold)
else:
fold_tasks = range(fold * n_per_fold, n_tasks)
if len(w.shape) == 1:
w_tasks = w
elif w.shape[1] == 1:
w_tasks = w[:, 0]
else:
w_tasks = w[:, fold_tasks]
fold_datasets.append(NumpyDataset(X, y[:, fold_tasks], w_tasks,
ids))
return fold_datasets
<file_sep>from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from dragonn.metrics import ClassificationResult
from keras.layers.core import (Activation, Dense, Dropout, Flatten, Permute,
Reshape, TimeDistributedDense)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
from keras.layers.core import (Activation, Dense, Flatten,
TimeDistributedDense)
from keras.layers.recurrent import GRU
from keras.callbacks import EarlyStopping
#class SequenceDNN(Model):
# """
# Sequence DNN models.
#
# Parameters
# ----------
# seq_length : int, optional
# length of input sequence.
# keras_model : instance of keras.models.Sequential, optional
# seq_length or keras_model must be specified.
# num_tasks : int, optional
# number of tasks. Default: 1.
# num_filters : list[int] | tuple[int]
# number of convolutional filters in each layer. Default: (15,).
# conv_width : list[int] | tuple[int]
# width of each layer's convolutional filters. Default: (15,).
# pool_width : int
# width of max pooling after the last layer. Default: 35.
# L1 : float
# strength of L1 penalty.
# dropout : float
# dropout probability in every convolutional layer. Default: 0.
# verbose: int
# Verbosity level during training. Valida values: 0, 1, 2.
#
# Returns
# -------
# Compiled DNN model.
# """
#
# def __init__(self,
# seq_length=None,
# keras_model=None,
# use_RNN=False,
# num_tasks=1,
# num_filters=(15, 15, 15),
# conv_width=(15, 15, 15),
# pool_width=35,
# GRU_size=35,
# TDD_size=15,
# L1=0,
# dropout=0.0,
# num_epochs=100,
# verbose=1):
# self.num_tasks = num_tasks
# self.num_epochs = num_epochs
# self.verbose = verbose
# self.train_metrics = []
# self.valid_metrics = []
# if keras_model is not None and seq_length is None:
# self.model = keras_model
# self.num_tasks = keras_model.layers[-1].output_shape[-1]
# elif seq_length is not None and keras_model is None:
# self.model = Sequential()
# assert len(num_filters) == len(conv_width)
# for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
# conv_height = 4 if i == 0 else 1
# self.model.add(
# Convolution2D(
# nb_filter=nb_filter,
# nb_row=conv_height,
# nb_col=nb_col,
# activation='linear',
# init='he_normal',
# input_shape=(1, 4, seq_length),
# W_regularizer=l1(L1),
# b_regularizer=l1(L1)))
# self.model.add(Activation('relu'))
# self.model.add(Dropout(dropout))
# self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
# if use_RNN:
# num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
# self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
# self.model.add(Permute((2, 1)))
# self.model.add(GRU(GRU_size, return_sequences=True))
# self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
# self.model.add(Flatten())
# self.model.add(Dense(output_dim=self.num_tasks))
# self.model.add(Activation('sigmoid'))
# self.model.compile(optimizer='adam', loss='binary_crossentropy')
# else:
# raise ValueError(
# "Exactly one of seq_length or keras_model must be specified!")
#
# def train(self,
# X,
# y,
# validation_data,
# early_stopping_metric='Loss',
# early_stopping_patience=5,
# save_best_model_to_prefix=None):
# if y.dtype != bool:
# assert set(np.unique(y)) == {0, 1}
# y = y.astype(bool)
# multitask = y.shape[1] > 1
# if not multitask:
# num_positives = y.sum()
# num_sequences = len(y)
# num_negatives = num_sequences - num_positives
# if self.verbose >= 1:
# print('Training model (* indicates new best result)...')
# X_valid, y_valid = validation_data
# early_stopping_wait = 0
# best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
# for epoch in range(1, self.num_epochs + 1):
# self.model.fit(
# X,
# y,
# batch_size=128,
# nb_epoch=1,
# class_weight={
# True: num_sequences / num_positives,
# False: num_sequences / num_negatives
# } if not multitask else None,
# verbose=self.verbose >= 2)
# epoch_train_metrics = self.test(X, y)
# epoch_valid_metrics = self.test(X_valid, y_valid)
# self.train_metrics.append(epoch_train_metrics)
# self.valid_metrics.append(epoch_valid_metrics)
# if self.verbose >= 1:
# print('Epoch {}:'.format(epoch))
# print('Train {}'.format(epoch_train_metrics))
# print('Valid {}'.format(epoch_valid_metrics), end='')
# current_metric = epoch_valid_metrics[early_stopping_metric].mean()
# if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
# if self.verbose >= 1:
# print(' *')
# best_metric = current_metric
# best_epoch = epoch
# early_stopping_wait = 0
# if save_best_model_to_prefix is not None:
# self.save(save_best_model_to_prefix)
# else:
# if self.verbose >= 1:
# print()
# if early_stopping_wait >= early_stopping_patience:
# break
# early_stopping_wait += 1
# if self.verbose >= 1:
# print('Finished training after {} epochs.'.format(epoch))
# if save_best_model_to_prefix is not None:
# print("The best model's architecture and weights (from epoch {0}) "
# 'were saved to {1}.arch.json and {1}.weights.h5'.format(
# best_epoch, save_best_model_to_prefix))
#
# def predict(self, X):
# return self.model.predict(X, batch_size=128, verbose=False)
#
# def get_sequence_filters(self):
# """
# Returns 3D array of 2D sequence filters.
# """
# return self.model.layers[0].get_weights()[0].squeeze(axis=1)
#
# def deeplift(self, X, batch_size=200):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) deeplift score array.
# """
# assert len(np.shape(X)) == 4 and np.shape(X)[1] == 1
# from deeplift.conversion import keras_conversion as kc
#
# # convert to deeplift model and get scoring function
# deeplift_model = kc.convert_sequential_model(self.model, verbose=False)
# score_func = deeplift_model.get_target_contribs_func(
# find_scores_layer_idx=0)
# # use a 40% GC reference
# input_references = [np.array([0.3, 0.2, 0.2, 0.3])[None, None, :, None]]
# # get deeplift scores
# deeplift_scores = np.zeros((self.num_tasks,) + X.shape)
# for i in range(self.num_tasks):
# deeplift_scores[i] = score_func(
# task_idx=i,
# input_data_list=[X],
# batch_size=batch_size,
# progress_update=None,
# input_references_list=input_references)
# return deeplift_scores
#
# def in_silico_mutagenesis(self, X):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) ISM score array.
# """
# mutagenesis_scores = np.empty(X.shape + (self.num_tasks,), dtype=np.float32)
# wild_type_predictions = self.predict(X)
# wild_type_predictions = wild_type_predictions[:, np.newaxis, np.newaxis,
# np.newaxis]
# for sequence_index, (sequence, wild_type_prediction) in enumerate(
# zip(X, wild_type_predictions)):
# mutated_sequences = np.repeat(
# sequence[np.newaxis], np.prod(sequence.shape), axis=0)
# # remove wild-type
# arange = np.arange(len(mutated_sequences))
# horizontal_cycle = np.tile(
# np.arange(sequence.shape[-1]), sequence.shape[-2])
# mutated_sequences[arange, :, :, horizontal_cycle] = 0
# # add mutant
# vertical_repeat = np.repeat(
# np.arange(sequence.shape[-2]), sequence.shape[-1])
# mutated_sequences[arange, :, vertical_repeat, horizontal_cycle] = 1
# # make mutant predictions
# mutated_predictions = self.predict(mutated_sequences)
# mutated_predictions = mutated_predictions.reshape(sequence.shape +
# (self.num_tasks,))
# mutagenesis_scores[
# sequence_index] = wild_type_prediction - mutated_predictions
# return np.rollaxis(mutagenesis_scores, -1)
#
# @staticmethod
# def _plot_scores(X, output_directory, peak_width, score_func, score_name):
# from dragonn.plot import plot_bases_on_ax
# scores = score_func(X).squeeze(
# axis=2) # (num_task, num_samples, num_bases, sequence_length)
# try:
# os.makedirs(output_directory)
# except OSError:
# pass
# num_tasks = len(scores)
# for task_index, task_scores in enumerate(scores):
# for sequence_index, sequence_scores in enumerate(task_scores):
# # sequence_scores is num_bases x sequence_length
# basewise_max_sequence_scores = sequence_scores.max(axis=0)
# plt.clf()
# figure, (top_axis, bottom_axis) = plt.subplots(2)
# top_axis.plot(
# range(1,
# len(basewise_max_sequence_scores) + 1),
# basewise_max_sequence_scores)
# top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
# peak_position = basewise_max_sequence_scores.argmax()
# top_axis.axvspan(
# peak_position - peak_width,
# peak_position + peak_width,
# color='grey',
# alpha=0.1)
# peak_sequence_scores = sequence_scores[:, peak_position - peak_width:
# peak_position + peak_width].T
# # Set non-max letter_heights to zero
# letter_heights = np.zeros_like(peak_sequence_scores)
# letter_heights[np.arange(len(letter_heights)),
# peak_sequence_scores.argmax(axis=1)] = \
# basewise_max_sequence_scores[peak_position - peak_width :
# peak_position + peak_width]
# plot_bases_on_ax(letter_heights, bottom_axis)
# bottom_axis.set_xticklabels(
# tuple(
# map(str,
# np.arange(peak_position - peak_width,
# peak_position + peak_width + 1))))
# bottom_axis.tick_params(axis='x', labelsize='small')
# plt.xlabel('Position')
# plt.ylabel('Score')
# plt.savefig(
# os.path.join(output_directory, 'sequence_{}{}'.format(
# sequence_index, '_task_{}'.format(task_index)
# if num_tasks > 1 else '')))
# plt.close()
#
# def plot_deeplift(self, X, output_directory, peak_width=10):
# self._plot_scores(
# X,
# output_directory,
# peak_width,
# score_func=self.deeplift,
# score_name='DeepLift')
#
# def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
# self._plot_scores(
# X,
# output_directory,
# peak_width,
# score_func=self.in_silico_mutagenesis,
# score_name='ISM')
#
# def plot_architecture(self, output_file):
# from dragonn.visualize_util import plot as plot_keras_model
# plot_keras_model(self.model, output_file, show_shape=True)
#
# def save(self, save_best_model_to_prefix):
# arch_fname = save_best_model_to_prefix + '.arch.json'
# weights_fname = save_best_model_to_prefix + '.weights.h5'
# open(arch_fname, 'w').write(self.model.to_json())
# self.model.save_weights(weights_fname, overwrite=True)
#
# @staticmethod
# def load(arch_fname, weights_fname=None):
# model_json_string = open(arch_fname).read()
# sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
# if weights_fname is not None:
# sequence_dnn.model.load_weights(weights_fname)
# return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
self.model = Sequential()
self.model.add(
GRU(gru_size, return_sequences=True, input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X,
y,
batch_size=128,
nb_epoch=100,
validation_data=validation_data,
class_weight={
True: num_sequences / num_positives,
False: num_sequences / num_negatives
} if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self,
prefix='./gkmSVM',
word_length=11,
mismatches=3,
C=1,
threads=1,
cache_memory=100,
verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str,
(word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(('gkmtrain', self.options, pos_fname, neg_fname,
self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join([
'gkmpredict', test_fname, self.model_file, temp_ofp.name, threads_option
])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
<file_sep>"""
Tests for Pose Generation
"""
import os
import platform
import tempfile
import unittest
import logging
import numpy as np
import deepchem as dc
import pytest
IS_WINDOWS = platform.system() == 'Windows'
IS_LINUX = platform.system() == 'Linux'
class TestPoseGeneration(unittest.TestCase):
"""Does sanity checks on pose generation."""
def test_vina_initialization(self):
"""Test that VinaPoseGenerator can be initialized."""
dc.dock.VinaPoseGenerator()
@unittest.skipIf(not IS_LINUX, 'Skip the test on Windows and Mac.')
def test_gnina_initialization(self):
"""Test that GninaPoseGenerator can be initialized."""
dc.dock.GninaPoseGenerator()
def test_pocket_vina_initialization(self):
"""Test that VinaPoseGenerator can be initialized."""
pocket_finder = dc.dock.ConvexHullPocketFinder()
dc.dock.VinaPoseGenerator(pocket_finder=pocket_finder)
@unittest.skipIf(IS_WINDOWS, "vina is not supported in windows")
@pytest.mark.slow
def test_vina_poses_and_scores(self):
"""Test that VinaPoseGenerator generates poses and scores
This test takes some time to run, about a minute and a half on
development laptop.
"""
# Let's turn on logging since this test will run for a while
logging.basicConfig(level=logging.INFO)
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
vpg = dc.dock.VinaPoseGenerator(pocket_finder=None)
with tempfile.TemporaryDirectory() as tmp:
poses, scores = vpg.generate_poses((protein_file, ligand_file),
exhaustiveness=1,
num_modes=1,
out_dir=tmp,
generate_scores=True)
assert len(poses) == 1
assert len(scores) == 1
protein, ligand = poses[0]
from rdkit import Chem
assert isinstance(protein, Chem.Mol)
assert isinstance(ligand, Chem.Mol)
@pytest.mark.slow
@unittest.skipIf(not IS_LINUX, 'Skip the test on Windows and Mac.')
def test_gnina_poses_and_scores(self):
"""Test that GninaPoseGenerator generates poses and scores
This test takes some time to run, about 3 minutes on
development laptop.
"""
# Let's turn on logging since this test will run for a while
logging.basicConfig(level=logging.INFO)
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
gpg = dc.dock.GninaPoseGenerator()
with tempfile.TemporaryDirectory() as tmp:
poses, scores = gpg.generate_poses((protein_file, ligand_file),
exhaustiveness=1,
num_modes=1,
out_dir=tmp)
assert len(poses) == 1
assert len(scores) == 1
protein, ligand = poses[0]
from rdkit import Chem
assert isinstance(protein, Chem.Mol)
assert isinstance(ligand, Chem.Mol)
@unittest.skipIf(IS_WINDOWS, "vina is not supported in windows")
@pytest.mark.slow
def test_vina_poses_no_scores(self):
"""Test that VinaPoseGenerator generates poses.
This test takes some time to run, about a minute and a half on
development laptop.
"""
# Let's turn on logging since this test will run for a while
logging.basicConfig(level=logging.INFO)
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
vpg = dc.dock.VinaPoseGenerator(pocket_finder=None)
with tempfile.TemporaryDirectory() as tmp:
poses = vpg.generate_poses((protein_file, ligand_file),
exhaustiveness=1,
num_modes=1,
out_dir=tmp,
generate_scores=False)
assert len(poses) == 1
protein, ligand = poses[0]
from rdkit import Chem
assert isinstance(protein, Chem.Mol)
assert isinstance(ligand, Chem.Mol)
@unittest.skipIf(IS_WINDOWS, "vina is not supported in windows")
@pytest.mark.slow
def test_vina_pose_specified_centroid(self):
"""Test that VinaPoseGenerator creates pose files with specified centroid/box dims.
This test takes some time to run, about a minute and a half on
development laptop.
"""
# Let's turn on logging since this test will run for a while
logging.basicConfig(level=logging.INFO)
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
centroid = np.array([56.21891368, 25.95862964, 3.58950065])
box_dims = np.array([51.354, 51.243, 55.608])
vpg = dc.dock.VinaPoseGenerator(pocket_finder=None)
with tempfile.TemporaryDirectory() as tmp:
poses, scores = vpg.generate_poses((protein_file, ligand_file),
centroid=centroid,
box_dims=box_dims,
exhaustiveness=1,
num_modes=1,
out_dir=tmp,
generate_scores=True)
assert len(poses) == 1
assert len(scores) == 1
protein, ligand = poses[0]
from rdkit import Chem
assert isinstance(protein, Chem.Mol)
assert isinstance(ligand, Chem.Mol)
@unittest.skipIf(IS_WINDOWS, "vina is not supported in windows")
@pytest.mark.slow
def test_pocket_vina_poses(self):
"""Test that VinaPoseGenerator creates pose files.
This test is quite slow and takes about 5 minutes to run on a
development laptop.
"""
# Let's turn on logging since this test will run for a while
logging.basicConfig(level=logging.INFO)
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
# Note this may download autodock Vina...
convex_finder = dc.dock.ConvexHullPocketFinder()
vpg = dc.dock.VinaPoseGenerator(pocket_finder=convex_finder)
with tempfile.TemporaryDirectory() as tmp:
poses, scores = vpg.generate_poses((protein_file, ligand_file),
exhaustiveness=1,
num_modes=1,
num_pockets=2,
out_dir=tmp,
generate_scores=True)
assert len(poses) == 2
assert len(scores) == 2
from rdkit import Chem
for pose in poses:
protein, ligand = pose
assert isinstance(protein, Chem.Mol)
assert isinstance(ligand, Chem.Mol)
<file_sep>import pytest
@pytest.mark.torch
def test_smiles_call():
"""Test __call__ method for the featurizer, which is inherited from HuggingFace's RobertaTokenizerFast"""
from deepchem.feat.roberta_tokenizer import RobertaFeaturizer
smiles = ["Cn1c(=O)c2c(ncn2C)n(C)c1=O", "CC(=O)N1CN(C(C)=O)C(O)C1O"]
long_molecule_smiles = [
"CCCCCCCCCCCCCCCCCCCC(=O)OCCCNC(=O)c1ccccc1SSc1ccccc1C(=O)NCCCOC(=O)CCCCCCCCCCCCCCCCCCC"
]
featurizer = RobertaFeaturizer.from_pretrained(
"seyonec/SMILES_tokenized_PubChem_shard00_160k")
embedding = featurizer(smiles, add_special_tokens=True, truncation=True)
embedding_long = featurizer(long_molecule_smiles * 2,
add_special_tokens=True,
truncation=True)
for emb in [embedding, embedding_long]:
assert 'input_ids' in emb.keys() and 'attention_mask' in emb.keys()
assert len(emb['input_ids']) == 2 and len(emb['attention_mask']) == 2
@pytest.mark.torch
def test_smiles_featurize():
"""Test the .featurize method, which will convert the dictionary output to an array
Checks that all SMILES are featurized and that each featurization
contains input_ids and attention_mask
"""
from deepchem.feat.roberta_tokenizer import RobertaFeaturizer
smiles = ["Cn1c(=O)c2c(ncn2C)n(C)c1=O", "CC(=O)N1CN(C(C)=O)C(O)C1O"]
long_molecule_smiles = [
"CCCCCCCCCCCCCCCCCCCC(=O)OCCCNC(=O)c1ccccc1SSc1ccccc1C(=O)NCCCOC(=O)CCCCCCCCCCCCCCCCCCC"
]
featurizer = RobertaFeaturizer.from_pretrained(
"seyonec/SMILES_tokenized_PubChem_shard00_160k")
feats = featurizer.featurize(smiles,
add_special_tokens=True,
truncation=True)
assert (len(feats) == 2)
assert (all([len(f) == 2 for f in feats]))
long_feat = featurizer.featurize(long_molecule_smiles,
add_special_tokens=True,
truncation=True)
assert (len(long_feat) == 1)
assert (len(long_feat[0] == 2))
<file_sep>Featurizers
===========
DeepChem contains an extensive collection of featurizers. If you
haven't run into this terminology before, a "featurizer" is chunk of
code which transforms raw input data into a processed form suitable
for machine learning. Machine learning methods often need data to be
pre-chewed for them to process. Think of this like a mama penguin
chewing up food so the baby penguin can digest it easily.
Now if you've watched a few introductory deep learning lectures, you
might ask, why do we need something like a featurizer? Isn't part of
the promise of deep learning that we can learn patterns directly from
raw data?
Unfortunately it turns out that deep learning techniques need
featurizers just like normal machine learning methods do. Arguably,
they are less dependent on sophisticated featurizers and more capable
of learning sophisticated patterns from simpler data. But
nevertheless, deep learning systems can't simply chew up raw files.
For this reason, :code:`deepchem` provides an extensive collection of
featurization methods which we will review on this page.
.. contents:: Contents
:local:
Molecule Featurizers
---------------------
These featurizers work with datasets of molecules.
Graph Convolution Featurizers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We are simplifying our graph convolution models by a joint data representation (:code:`GraphData`)
in a future version of DeepChem, so we provide several featurizers.
:code:`ConvMolFeaturizer` and :code:`WeaveFeaturizer` are used
with graph convolution models which inherited :code:`KerasModel`.
:code:`ConvMolFeaturizer` is used with graph convolution models
except :code:`WeaveModel`. :code:`WeaveFeaturizer` are only used with :code:`WeaveModel`.
On the other hand, :code:`MolGraphConvFeaturizer` is used
with graph convolution models which inherited :code:`TorchModel`.
:code:`MolGanFeaturizer` will be used with MolGAN model,
a GAN model for generation of small molecules.
ConvMolFeaturizer
*****************
.. autoclass:: deepchem.feat.ConvMolFeaturizer
:members:
:inherited-members:
WeaveFeaturizer
***************
.. autoclass:: deepchem.feat.WeaveFeaturizer
:members:
:inherited-members:
MolGanFeaturizer
**********************
.. autoclass:: deepchem.feat.MolGanFeaturizer
:members:
:inherited-members:
MolGraphConvFeaturizer
**********************
.. autoclass:: deepchem.feat.MolGraphConvFeaturizer
:members:
:inherited-members:
PagtnMolGraphFeaturizer
**********************
.. autoclass:: deepchem.feat.PagtnMolGraphFeaturizer
:members:
:inherited-members:
DMPNNFeaturizer
**********************
.. autoclass:: deepchem.feat.DMPNNFeaturizer
:members:
:inherited-members:
GroverFeaturizer
****************
.. autoclass:: deepchem.feat.GroverFeaturizer
:members:
RDKitConformerFeaturizer
*************************
.. autoclass:: deepchem.feat.RDKitConformerFeaturizer
:members:
MXMNetFeaturizer
**********************
.. autoclass:: deepchem.feat.MXMNetFeaturizer
:members:
:inherited-members:
Utilities
*********
Here are some constants that are used by the graph convolutional featurizers for molecules.
.. autoclass:: deepchem.feat.graph_features.GraphConvConstants
:members:
:undoc-members:
There are a number of helper methods used by the graph convolutional classes which we document here.
.. autofunction:: deepchem.feat.graph_features.one_of_k_encoding
.. autofunction:: deepchem.feat.graph_features.one_of_k_encoding_unk
.. autofunction:: deepchem.feat.graph_features.get_intervals
.. autofunction:: deepchem.feat.graph_features.safe_index
.. autofunction:: deepchem.feat.graph_features.get_feature_list
.. autofunction:: deepchem.feat.graph_features.features_to_id
.. autofunction:: deepchem.feat.graph_features.id_to_features
.. autofunction:: deepchem.feat.graph_features.atom_to_id
This function helps compute distances between atoms from a given base atom.
.. autofunction:: deepchem.feat.graph_features.find_distance
This function is important and computes per-atom feature vectors used by
graph convolutional featurizers.
.. autofunction:: deepchem.feat.graph_features.atom_features
This function computes the bond features used by graph convolutional
featurizers.
.. autofunction:: deepchem.feat.graph_features.bond_features
This function computes atom-atom features (for atom pairs which may not have bonds between them.)
.. autofunction:: deepchem.feat.graph_features.pair_features
MACCSKeysFingerprint
^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.MACCSKeysFingerprint
:members:
MATFeaturizer
^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.MATFeaturizer
:members:
:inherited-members:
CircularFingerprint
^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.CircularFingerprint
:members:
:inherited-members:
PubChemFingerprint
^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.PubChemFingerprint
:members:
Mol2VecFingerprint
^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.Mol2VecFingerprint
:members:
:inherited-members:
RDKitDescriptors
^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.RDKitDescriptors
:members:
:inherited-members:
MordredDescriptors
^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.MordredDescriptors
:members:
:inherited-members:
CoulombMatrix
^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.CoulombMatrix
:members:
:inherited-members:
CoulombMatrixEig
^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.CoulombMatrixEig
:members:
:inherited-members:
AtomCoordinates
^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.AtomicCoordinates
:members:
:inherited-members:
BPSymmetryFunctionInput
^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.BPSymmetryFunctionInput
:members:
:inherited-members:
SmilesToSeq
^^^^^^^^^^^
.. autoclass:: deepchem.feat.SmilesToSeq
:members:
:inherited-members:
SmilesToImage
^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.SmilesToImage
:members:
:inherited-members:
OneHotFeaturizer
^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.OneHotFeaturizer
:members:
:inherited-members:
SparseMatrixOneHotFeaturizer
^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.SparseMatrixOneHotFeaturizer
:members:
:inherited-members:
RawFeaturizer
^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.RawFeaturizer
:members:
:inherited-members:
SNAPFeaturizer
^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.SNAPFeaturizer
:members:
:inherited-members:
Molecular Complex Featurizers
-------------------------------
These featurizers work with three dimensional molecular complexes.
RdkitGridFeaturizer
^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.RdkitGridFeaturizer
:members:
:inherited-members:
AtomicConvFeaturizer
^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.AtomicConvFeaturizer
:members:
:inherited-members:
Inorganic Crystal Featurizers
------------------------------
These featurizers work with datasets of inorganic crystals.
MaterialCompositionFeaturizer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Material Composition Featurizers are those that work with datasets of crystal
compositions with periodic boundary conditions.
For inorganic crystal structures, these featurizers operate on chemical
compositions (e.g. "MoS2"). They should be applied on systems that have
periodic boundary conditions. Composition featurizers are not designed
to work with molecules.
ElementPropertyFingerprint
**************************
.. autoclass:: deepchem.feat.ElementPropertyFingerprint
:members:
:inherited-members:
ElemNetFeaturizer
*****************
.. autoclass:: deepchem.feat.ElemNetFeaturizer
:members:
MaterialStructureFeaturizer
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Material Structure Featurizers are those that work with datasets of crystals with
periodic boundary conditions. For inorganic crystal structures, these
featurizers operate on pymatgen.Structure objects, which include a
lattice and 3D coordinates that specify a periodic crystal structure.
They should be applied on systems that have periodic boundary conditions.
Structure featurizers are not designed to work with molecules.
SineCoulombMatrix
*****************
.. autoclass:: deepchem.feat.SineCoulombMatrix
:members:
:inherited-members:
CGCNNFeaturizer
***************
.. autoclass:: deepchem.feat.CGCNNFeaturizer
:members:
:inherited-members:
LCNNFeaturizer
^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.LCNNFeaturizer
:members:
:inherited-members:
Molecule Tokenizers
-------------------
A tokenizer is in charge of preparing the inputs for a natural language processing model.
For many scientific applications, it is possible to treat inputs as "words"/"sentences" and
use NLP methods to make meaningful predictions. For example, SMILES strings or DNA sequences
have grammatical structure and can be usefully modeled with NLP techniques. DeepChem provides
some scientifically relevant tokenizers for use in different applications. These tokenizers are
based on those from the Huggingface transformers library (which DeepChem tokenizers inherit from).
The base classes PreTrainedTokenizer and PreTrainedTokenizerFast implements the common methods
for encoding string inputs in model inputs and instantiating/saving python tokenizers
either from a local file or directory or from a pretrained tokenizer provided by the library
(downloaded from HuggingFace’s AWS S3 repository).
PreTrainedTokenizer `(transformers.PreTrainedTokenizer)`_ thus implements
the main methods for using all the tokenizers:
- Tokenizing (spliting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e. tokenizing + convert to integers)
- Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece…)
- Managing special tokens like mask, beginning-of-sentence, etc tokens (adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization)
BatchEncoding holds the output of the tokenizer’s encoding methods
(__call__, encode_plus and batch_encode_plus) and is derived from a Python dictionary.
When the tokenizer is a pure python tokenizer, this class behave just like a standard python dictionary
and hold the various model inputs computed by these methodes (input_ids, attention_mask…).
For more details on the base tokenizers which the DeepChem tokenizers inherit from,
please refer to the following: `HuggingFace tokenizers docs`_
Tokenization methods on string-based corpuses in the life sciences are
becoming increasingly popular for NLP-based applications to chemistry and biology.
One such example is ChemBERTa, a transformer for molecular property prediction.
DeepChem offers a tutorial for utilizing ChemBERTa using an alternate tokenizer,
a Byte-Piece Encoder, which can be found `here.`_
.. _`(transformers.PreTrainedTokenizer)`: https://huggingface.co/transformers/main_classes/tokenizer.html#transformers.PreTrainedTokenizer
.. _`HuggingFace tokenizers docs`: https://huggingface.co/transformers/main_classes/tokenizer.html
.. _`here.`: https://github.com/deepchem/deepchem/blob/master/examples/tutorials/22_Transfer_Learning_With_HuggingFace_tox21.ipynb
SmilesTokenizer
^^^^^^^^^^^^^^^
The :code:`dc.feat.SmilesTokenizer` module inherits from the BertTokenizer class in transformers.
It runs a WordPiece tokenization algorithm over SMILES strings using the tokenisation SMILES regex developed by Schwaller et. al.
The SmilesTokenizer employs an atom-wise tokenization strategy using the following Regex expression: ::
SMI_REGEX_PATTERN = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#||\+|\\\\\/|:||@|\?|>|\*|\$|\%[0–9]{2}|[0–9])"
To use, please install the transformers package using the following pip command: ::
pip install transformers
References:
- `RXN Mapper: Unsupervised Attention-Guided Atom-Mapping`_
- `Molecular Transformer: Unsupervised Attention-Guided Atom-Mapping`_
.. autoclass:: deepchem.feat.SmilesTokenizer
:members:
BasicSmilesTokenizer
^^^^^^^^^^^^^^^^^^^^
The :code:`dc.feat.BasicSmilesTokenizer` module uses a regex tokenization pattern to tokenise SMILES strings.
The regex is developed by Schwaller et. al. The tokenizer is to be used on SMILES in cases
where the user wishes to not rely on the transformers API.
References:
- `Molecular Transformer: Unsupervised Attention-Guided Atom-Mapping`_
.. autoclass:: deepchem.feat.BasicSmilesTokenizer
:members:
.. _`RXN Mapper: Unsupervised Attention-Guided Atom-Mapping`: https://chemrxiv.org/articles/Unsupervised_Attention-Guided_Atom-Mapping/12298559
.. _`Molecular Transformer: Unsupervised Attention-Guided Atom-Mapping`: https://pubs.acs.org/doi/10.1021/acscentsci.9b00576
HuggingFaceFeaturizer
^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.huggingface_featurizer.HuggingFaceFeaturizer
:members:
:inherited-members:
GroverAtomVocabTokenizer
^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.vocabulary_builders.grover_vocab.GroverAtomVocabTokenizer
:members:
GroverBondVocabTokenizer
^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.vocabulary_builders.grover_vocab.GroverBondVocabTokenizer
:members:
Vocabulary Builders
-------------------
Tokenizers uses a vocabulary to tokenize the datapoint. To build a vocabulary, an algorithm which generates vocabulary from a corpus is required. A corpus is usually a collection of molecules, DNA sequences etc. DeepChem provides the following algorithms to build vocabulary from a corpus. A vocabulary builder is not a featurizer. It is an utility which helps the tokenizers to featurize datapoints.
.. autoclass:: deepchem.feat.vocabulary_builders.grover_vocab.GroverAtomVocabularyBuilder
:members:
.. autoclass:: deepchem.feat.vocabulary_builders.grover_vocab.GroverAtomVocabularyBuilder
:members:
Sequence Featurizers
--------------------
PFMFeaturizer
^^^^^^^^^^^^^
The :code:`dc.feat.PFMFeaturizer` module implements a featurizer for position frequency matrices.
This takes in a list of multisequence alignments and returns a list of position frequency matrices.
.. autoclass:: deepchem.feat.sequence_featurizers.PFMFeaturizer
:members:
Other Featurizers
-----------------
BertFeaturizer
^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.BertFeaturizer
:members:
:inherited-members:
RobertaFeaturizer
^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.RobertaFeaturizer
:members:
:inherited-members:
RxnFeaturizer
^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.RxnFeaturizer
:members:
:inherited-members:
BindingPocketFeaturizer
^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.BindingPocketFeaturizer
:members:
:inherited-members:
UserDefinedFeaturizer
^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.UserDefinedFeaturizer
:members:
:inherited-members:
DummyFeaturizer
^^^^^^^^^^^^^^^
.. autoclass:: deepchem.feat.DummyFeaturizer
:members:
:inherited-members:
Base Featurizers (for develop)
------------------------------
Featurizer
^^^^^^^^^^
The :code:`dc.feat.Featurizer` class is the abstract parent class for all featurizers.
.. autoclass:: deepchem.feat.Featurizer
:members:
MolecularFeaturizer
^^^^^^^^^^^^^^^^^^^
If you're creating a new featurizer that featurizes molecules,
you will want to inherit from the abstract :code:`MolecularFeaturizer` base class.
This featurizer can take RDKit mol objects or SMILES as inputs.
.. autoclass:: deepchem.feat.MolecularFeaturizer
:members:
MaterialCompositionFeaturizer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you're creating a new featurizer that featurizes compositional formulas,
you will want to inherit from the abstract :code:`MaterialCompositionFeaturizer` base class.
.. autoclass:: deepchem.feat.MaterialCompositionFeaturizer
:members:
MaterialStructureFeaturizer
^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you're creating a new featurizer that featurizes inorganic crystal structure,
you will want to inherit from the abstract :code:`MaterialCompositionFeaturizer` base class.
This featurizer can take pymatgen structure objects or dictionaries as inputs.
.. autoclass:: deepchem.feat.MaterialStructureFeaturizer
:members:
ComplexFeaturizer
^^^^^^^^^^^^^^^^^
If you're creating a new featurizer that featurizes a pair of ligand molecules and proteins,
you will want to inherit from the abstract :code:`ComplexFeaturizer` base class.
This featurizer can take a pair of PDB or SDF files which contain ligand molecules and proteins.
.. autoclass:: deepchem.feat.ComplexFeaturizer
:members:
VocabularyBuilder
^^^^^^^^^^^^^^^^^
If you're creating a vocabulary builder for generating vocabulary from a corpus or input data,
the vocabulary builder must inhere from :code:`VocabularyBuilder` base class.
.. autoclass:: deepchem.feat.vocabulary_builders.VocabularyBuilder
:members:
HuggingFaceVocabularyBuilder
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A wrapper class for building vocabulary from algorithms implemented in `tokenizers <https://huggingface.co/docs/tokenizers/index>`_ library.
.. autoclass:: deepchem.feat.vocabulary_builders.hf_vocab
:members:
<file_sep>"""
Featurizer implementations used in Smiles2Vec models.
SmilesToSeq featurizer for Smiles2Vec models taken from https://arxiv.org/abs/1712.02734
"""
from typing import Dict, List
import numpy as np
import pandas as pd
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
PAD_TOKEN = "<pad>"
OUT_OF_VOCAB_TOKEN = "<unk>"
def create_char_to_idx(filename: str,
max_len: int = 250,
smiles_field: str = "smiles") -> Dict[str, int]:
"""Creates a dictionary with character to index mapping.
Parameters
----------
filename: str
Name of the file containing the SMILES strings
max_len: int, default 250
Maximum allowed length of the SMILES string
smiles_field: str, default "smiles"
Field indicating the SMILES strings int the file.
Returns
-------
Dict[str, int]
A dictionary mapping characters to their integer indexes.
"""
smiles_df = pd.read_csv(filename)
char_set = set()
for smile in smiles_df[smiles_field]:
if len(smile) <= max_len:
char_set.update(set(smile))
unique_char_list = list(char_set)
unique_char_list += [PAD_TOKEN, OUT_OF_VOCAB_TOKEN]
char_to_idx = {letter: idx for idx, letter in enumerate(unique_char_list)}
return char_to_idx
class SmilesToSeq(MolecularFeaturizer):
"""
SmilesToSeq Featurizer takes a SMILES string, and turns it into a sequence.
Details taken from [1]_.
SMILES strings smaller than a specified max length (max_len) are padded using
the PAD token while those larger than the max length are not considered. Based
on the paper, there is also the option to add extra padding (pad_len) on both
sides of the string after length normalization. Using a character to index (char_to_idx)
mapping, the SMILES characters are turned into indices and the
resulting sequence of indices serves as the input for an embedding layer.
References
----------
.. [1] Goh, <NAME>., et al. "Using rule-based labels for weak supervised
learning: a ChemNet for transferable chemical property prediction."
Proceedings of the 24th ACM SIGKDD International Conference on Knowledge
Discovery & Data Mining. 2018.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
char_to_idx: Dict[str, int],
max_len: int = 250,
pad_len: int = 10):
"""Initialize this class.
Parameters
----------
char_to_idx: Dict
Dictionary containing character to index mappings for unique characters
max_len: int, default 250
Maximum allowed length of the SMILES string.
pad_len: int, default 10
Amount of padding to add on either side of the SMILES seq
"""
self.max_len = max_len
self.char_to_idx = char_to_idx
self.idx_to_char = {
idx: letter for letter, idx in self.char_to_idx.items()
}
self.pad_len = pad_len
def to_seq(self, smile: List[str]) -> np.ndarray:
"""Turns list of smiles characters into array of indices"""
out_of_vocab_idx = self.char_to_idx[OUT_OF_VOCAB_TOKEN]
seq = [
self.char_to_idx.get(character, out_of_vocab_idx)
for character in smile
]
return np.array(seq)
def remove_pad(self, characters: List[str]) -> List[str]:
"""Removes PAD_TOKEN from the character list."""
characters = characters[self.pad_len:len(characters) - self.pad_len]
chars = list()
for char in characters:
if char != PAD_TOKEN:
chars.append(char)
return chars
def smiles_from_seq(self, seq: List[int]) -> str:
"""Reconstructs SMILES string from sequence."""
characters = [self.idx_to_char[i] for i in seq]
characters = self.remove_pad(characters)
smile = "".join([letter for letter in characters])
return smile
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""Featurizes a SMILES sequence.
Parameters
----------
datapoints: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
A 1D array of a SMILES sequence.
If the length of SMILES is longer than `max_len`, this value is an empty array.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
smile = Chem.MolToSmiles(datapoint)
if len(smile) > self.max_len:
return np.array([])
smile_list = list(smile)
# Extend shorter strings with padding
if len(smile) < self.max_len:
smile_list.extend([PAD_TOKEN] * (self.max_len - len(smile)))
# Padding before and after
smile_list += [PAD_TOKEN] * self.pad_len
smile_list = [PAD_TOKEN] * self.pad_len + smile_list
smile_seq = self.to_seq(smile_list)
return smile_seq
<file_sep>from deepchem.feat import Featurizer
from typing import Dict, List
try:
from transformers import RobertaTokenizerFast
except ModuleNotFoundError:
raise ImportError(
'Transformers must be installed for RobertaFeaturizer to be used!')
pass
class RobertaFeaturizer(RobertaTokenizerFast, Featurizer):
"""Roberta Featurizer.
The Roberta Featurizer is a wrapper class of the Roberta Tokenizer,
which is used by Huggingface's transformers library for tokenizing large corpuses for Roberta Models.
Please confirm the details in [1]_.
Please see https://github.com/huggingface/transformers
and https://github.com/seyonechithrananda/bert-loves-chemistry for more details.
Examples
--------
>>> from deepchem.feat import RobertaFeaturizer
>>> smiles = ["Cn1c(=O)c2c(ncn2C)n(C)c1=O", "CC(=O)N1CN(C(C)=O)C(O)C1O"]
>>> featurizer = RobertaFeaturizer.from_pretrained("seyonec/SMILES_tokenized_PubChem_shard00_160k")
>>> out = featurizer.featurize(smiles, add_special_tokens=True, truncation=True)
References
----------
.. [1] <NAME>, and <NAME> (2020): "Chemberta: Large-scale self-supervised
pretraining for molecular property prediction." arXiv. preprint. arXiv:2010.09885.
Note
-----
This class requires transformers to be installed.
RobertaFeaturizer uses dual inheritance with RobertaTokenizerFast in Huggingface for rapid tokenization,
as well as DeepChem's MolecularFeaturizer class.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
return
def _featurize(self, datapoint: str, **kwargs) -> List[List[int]]:
"""Calculate encoding using HuggingFace's RobertaTokenizerFast
Parameters
----------
sequence: str
Arbitrary string sequence to be tokenized.
Returns
-------
datapoint: List
List containing two lists; the `input_ids` and the `attention_mask`
"""
# the encoding is natively a dictionary with keys 'input_ids' and 'attention_mask'
encoding = list(self(datapoint, **kwargs).values())
return encoding
def __call__(self, *args, **kwargs) -> Dict[str, List[int]]:
return super().__call__(*args, **kwargs)
<file_sep>"""
Basic molecular features.
"""
from typing import List, Sequence, Union, Callable, Dict
import numpy as np
import scipy.stats as st
from scipy.stats import rv_continuous
import logging
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.utils.rdkit_utils import DescriptorsNormalizationParameters as DNP
logger = logging.getLogger(__name__)
class RDKitDescriptors(MolecularFeaturizer):
"""
RDKit descriptors.
This class computes a list of chemical descriptors like
molecular weight, number of valence electrons, maximum and
minimum partial charge, etc using RDKit.
This class can also compute normalized descriptors, if required.
(The implementation for normalization is based on `RDKit2DNormalized()` method
in 'descriptastorus' library.)
When the `is_normalized` option is set as True, descriptor values are normalized across the sample
by fitting a cumulative density function. CDFs were used as opposed to simpler scaling algorithms
mainly because CDFs have the useful property that 'each value has the same meaning: the percentage
of the population observed below the raw feature value.'
Warning: Currently, the normalizing cdf parameters are not available for BCUT2D descriptors.
(BCUT2D_MWHI, BCUT2D_MWLOW, BCUT2D_CHGHI, BCUT2D_CHGLO, BCUT2D_LOGPHI, BCUT2D_LOGPLOW, BCUT2D_MRHI, BCUT2D_MRLOW)
Note
----
This class requires RDKit to be installed.
Examples
--------
>>> import deepchem as dc
>>> smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O']
>>> featurizer = dc.feat.RDKitDescriptors()
>>> features = featurizer.featurize(smiles)
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(208,)
"""
def __init__(self,
descriptors: List[str] = [],
is_normalized: bool = False,
use_fragment: bool = True,
ipc_avg: bool = True,
use_bcut2d: bool = True,
labels_only: bool = False):
"""Initialize this featurizer.
Parameters
----------
descriptors_list: List[str] (default None)
List of RDKit descriptors to compute properties. When None, computes values
for descriptors which are chosen based on options set in other arguments.
use_fragment: bool, optional (default True)
If True, the return value includes the fragment binary descriptors like 'fr_XXX'.
ipc_avg: bool, optional (default True)
If True, the IPC descriptor calculates with avg=True option.
Please see this issue: https://github.com/rdkit/rdkit/issues/1527.
is_normalized: bool, optional (default False)
If True, the return value contains normalized features.
use_bcut2d: bool, optional (default True)
If True, the return value includes the descriptors like 'BCUT2D_XXX'.
labels_only: bool, optional (default False)
Returns only the presence or absence of a group.
Notes
-----
* If both `labels_only` and `is_normalized` are True, then `is_normalized` takes
precendence and `labels_only` will not be applied.
"""
try:
from rdkit.Chem import Descriptors
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
self.use_fragment: bool = use_fragment
self.use_bcut2d: bool = use_bcut2d
self.is_normalized: bool = is_normalized
self.ipc_avg: bool = ipc_avg
self.labels_only = labels_only
self.reqd_properties = {}
self.normalized_desc: Dict[str, Callable] = {}
all_descriptors = {name: func for name, func in Descriptors.descList}
if not descriptors:
# user has not specified a descriptor list
for desc_name, function in all_descriptors.items():
if self.use_fragment is False and desc_name.startswith('fr_'):
continue
if self.use_bcut2d is False and desc_name.startswith('BCUT2D_'):
continue
self.reqd_properties[desc_name] = function
else:
for desc_name in descriptors:
if desc_name in all_descriptors:
self.reqd_properties[desc_name] = all_descriptors[desc_name]
else:
logging.error("Unable to find specified property %s" %
desc_name)
# creates normalized functions dictionary if normalized features are required
if is_normalized:
self.normalized_desc = self._make_normalised_func_dict()
desc_names = list(self.reqd_properties.keys())
for desc_name in desc_names:
if desc_name not in self.normalized_desc:
logger.warning("No normalization for %s. Feature removed!",
desc_name)
self.reqd_properties.pop(desc_name)
self.reqd_properties = dict(sorted(self.reqd_properties.items()))
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate RDKit descriptors.
Parameters
----------
datapoint: RDKitMol
RDKit Mol object
Returns
-------
np.ndarray
1D array of RDKit descriptors for `mol`.
The length is `len(self.descriptors)`.
"""
features = []
for desc_name, function in self.reqd_properties.items():
if desc_name == 'Ipc' and self.ipc_avg:
feature = function(datapoint, avg=True)
else:
feature = function(datapoint)
if self.is_normalized:
# get cdf(feature) for that descriptor
feature = self.normalized_desc[desc_name](feature)
features.append(feature)
np_features = np.asarray(features)
if self.labels_only:
np.putmask(np_features, np_features != 0, 1)
return np_features
def _make_normalised_func_dict(self):
"""
Helper function to create dictionary of RDkit descriptors and
associated cumulative density functions (CDFs) to generate normalized features.
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
Copyright (c) 2018-2021, Novartis Institutes for BioMedical Research Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Novartis Institutes for BioMedical Research Inc.
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
"""
normalized_desc = {}
# get sequence of descriptor names and normalization parameters from DescriptorsNormalizationParameters class
parameters = DNP.desc_norm_params.items()
for desc_name, (distribution_name, params, minV, maxV, avg,
std) in parameters:
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# get required distribution_ from `scipy.stats` module.
cont_distribution = getattr(st, distribution_name)
# cdf => cumulative density functions
# make the cdf with the parameters.
def norm_cdf(v: Union[int, float],
distribution_: rv_continuous = cont_distribution,
arg: Sequence[float] = arg,
loc: float = loc,
scale: float = scale,
minV: float = minV,
maxV: float = maxV) -> np.ndarray:
v = distribution_.cdf(np.clip(v, minV, maxV),
loc=loc,
scale=scale,
*arg)
return np.clip(v, 0., 1.)
normalized_desc[desc_name] = norm_cdf
return normalized_desc
<file_sep>from flaky import flaky
import deepchem as dc
from deepchem.models.tensorgraph.layers import Reshape, Variable, SoftMax, GRU, Dense
from deepchem.models.optimizers import Adam, PolynomialDecay
import numpy as np
import tensorflow as tf
import unittest
from nose.plugins.attrib import attr
class TestMCTS(unittest.TestCase):
@flaky
def test_roulette(self):
"""Test training a policy for the roulette environment."""
# This is modeled after the Roulette-v0 environment from OpenAI Gym.
# The player can bet on any number from 0 to 36, or walk away (which ends the
# game). The average reward for any bet is slightly negative, so the best
# strategy is to walk away.
class RouletteEnvironment(dc.rl.Environment):
def __init__(self):
super(RouletteEnvironment, self).__init__([(1,)], 38)
self._state = [np.array([0])]
def step(self, action):
if action == 37:
self._terminated = True # Walk away.
return 0.0
wheel = np.random.randint(37)
if wheel == 0:
if action == 0:
return 35.0
return -1.0
if action != 0 and wheel % 2 == action % 2:
return 1.0
return -1.0
def reset(self):
self._terminated = False
env = RouletteEnvironment()
# This policy just learns a constant probability for each action, and a constant for the value.
class TestPolicy(dc.rl.Policy):
def create_layers(self, state, **kwargs):
action = Variable(np.ones(env.n_actions))
output = SoftMax(
in_layers=[Reshape(in_layers=[action], shape=(-1, env.n_actions))])
value = Variable([0.0])
return {'action_prob': output, 'value': value}
# Optimize it.
mcts = dc.rl.MCTS(
env,
TestPolicy(),
max_search_depth=5,
n_search_episodes=200,
optimizer=Adam(learning_rate=0.005))
mcts.fit(10, steps_per_iteration=50, epochs_per_iteration=50)
# It should have learned that the expected value is very close to zero, and that the best
# action is to walk away.
action_prob, value = mcts.predict([[0]])
assert -0.5 < value[0] < 0.5
assert action_prob.argmax() == 37
assert mcts.select_action([[0]], deterministic=True) == 37
# Verify that we can create a new MCTS object, reload the parameters from the first one, and
# get the same result.
new_mcts = dc.rl.MCTS(env, TestPolicy(), model_dir=mcts._graph.model_dir)
new_mcts.restore()
action_prob2, value2 = new_mcts.predict([[0]])
assert value2 == value
# Do the same thing, only using the "restore" argument to fit().
new_mcts = dc.rl.MCTS(env, TestPolicy(), model_dir=mcts._graph.model_dir)
new_mcts.fit(0, restore=True)
action_prob2, value2 = new_mcts.predict([[0]])
assert value2 == value
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import warnings
import time
import numpy as np
import tensorflow as tf
from deepchem.utils.save import log
from deepchem.metrics import to_one_hot
from deepchem.metrics import from_one_hot
from deepchem.nn import model_ops
class ProgressiveMultitaskRegressor(TensorflowMultiTaskRegressor):
"""Implements a progressive multitask neural network.
Progressive Networks: https://arxiv.org/pdf/1606.04671v3.pdf
Progressive networks allow for multitask learning where each task
gets a new column of weights. As a result, there is no exponential
forgetting where previous tasks are ignored.
TODO(rbharath): This class is unnecessarily complicated. Can we simplify the
structure of the code here?
"""
def __init__(self, n_tasks, n_features, alpha_init_stddevs=[.02], **kwargs):
"""Creates a progressive network.
Only listing parameters specific to progressive networks here.
Parameters
----------
n_tasks: int
Number of tasks
n_features: int
Number of input features
alpha_init_stddevs: list
List of standard-deviations for alpha in adapter layers.
"""
warnings.warn(
"ProgressiveMultitaskRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.alpha_init_stddevs = alpha_init_stddevs
super(ProgressiveMultitaskRegressor, self).__init__(n_tasks, n_features,
**kwargs)
# Consistency check
lengths_set = {
len(self.layer_sizes),
len(self.weight_init_stddevs),
len(self.alpha_init_stddevs),
len(self.bias_init_consts),
len(self.dropouts),
}
assert len(lengths_set) == 1, "All layer params must have same length."
def construct_graph(self, training, seed):
"""Returns a TensorflowGraph object."""
graph = tf.Graph()
# Lazily created by _get_shared_session().
shared_session = None
# Cache of TensorFlow scopes, to prevent '_1' appended scope names
# when subclass-overridden methods use the same scopes.
name_scopes = {}
# Setup graph
with graph.as_default():
if seed is not None:
tf.set_random_seed(seed)
features, labels, weights = self.add_placeholders(graph, name_scopes)
outputs = self.add_progressive_lattice(graph, name_scopes, training)
if training:
loss = self.add_task_training_costs(graph, name_scopes, outputs, labels,
weights)
else:
loss = None
return TensorflowGraph(
graph=graph,
session=shared_session,
name_scopes=name_scopes,
output=outputs,
labels=labels,
weights=weights,
loss=loss)
def add_placeholders(self, graph, name_scopes):
"""Adds all placeholders for this model."""
# Create placeholders
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
labels, weights = [], []
n_features = self.n_features
with placeholder_scope:
self.mol_features = tf.placeholder(
tf.float32, shape=[None, n_features], name='mol_features')
for task in range(self.n_tasks):
weights.append(
tf.identity(
tf.placeholder(
tf.float32, shape=[
None,
], name='weights_%d' % task)))
labels.append(
tf.identity(
tf.placeholder(
tf.float32, shape=[
None,
], name='labels_%d' % task)))
return self.mol_features, labels, weights
def add_progressive_lattice(self, graph, name_scopes, training):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
n_features = self.n_features
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
prev_layer = self.mol_features
prev_layer_size = n_features
all_layers = {}
for i in range(n_layers):
for task in range(self.n_tasks):
task_scope = TensorflowGraph.shared_name_scope(
"task%d_ops" % task, graph, name_scopes)
print("Adding weights for task %d, layer %d" % (task, i))
with task_scope as scope:
if i == 0:
prev_layer = self.mol_features
prev_layer_size = self.n_features
else:
prev_layer = all_layers[(i - 1, task)]
prev_layer_size = layer_sizes[i - 1]
if task > 0:
lateral_contrib = self.add_adapter(all_layers, task, i)
print("Creating W_layer_%d_task%d of shape %s" %
(i, task, str([prev_layer_size, layer_sizes[i]])))
W = tf.Variable(
tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=self.weight_init_stddevs[i]),
name='W_layer_%d_task%d' % (i, task),
dtype=tf.float32)
print("Creating b_layer_%d_task%d of shape %s" %
(i, task, str([layer_sizes[i]])))
b = tf.Variable(
tf.constant(
value=self.bias_init_consts[i], shape=[layer_sizes[i]]),
name='b_layer_%d_task%d' % (i, task),
dtype=tf.float32)
layer = tf.matmul(prev_layer, W) + b
if i > 0 and task > 0:
layer = layer + lateral_contrib
layer = tf.nn.relu(layer)
layer = model_ops.dropout(layer, dropouts[i], training)
all_layers[(i, task)] = layer
output = []
for task in range(self.n_tasks):
prev_layer = all_layers[(i, task)]
prev_layer_size = layer_sizes[i]
task_scope = TensorflowGraph.shared_name_scope("task%d" % task, graph,
name_scopes)
with task_scope as scope:
if task > 0:
lateral_contrib = tf.squeeze(
self.add_adapter(all_layers, task, i + 1))
weight_init = tf.truncated_normal(
shape=[prev_layer_size, 1], stddev=weight_init_stddevs[i])
bias_init = tf.constant(value=bias_init_consts[i], shape=[1])
print("Creating W_output_task%d of shape %s" %
(task, str([prev_layer_size, 1])))
w = tf.Variable(
weight_init, name='W_output_task%d' % task, dtype=tf.float32)
print("Creating b_output_task%d of shape %s" % (task, str([1])))
b = tf.Variable(
bias_init, name='b_output_task%d' % task, dtype=tf.float32)
layer = tf.squeeze(tf.matmul(prev_layer, w) + b)
if i > 0 and task > 0:
layer = layer + lateral_contrib
output.append(layer)
return output
def add_adapter(self, all_layers, task, layer_num):
"""Add an adapter connection for given task/layer combo"""
i = layer_num
prev_layers = []
# Handle output layer
if i < len(self.layer_sizes):
layer_sizes = self.layer_sizes
alpha_init_stddev = self.alpha_init_stddevs[i]
weight_init_stddev = self.weight_init_stddevs[i]
bias_init_const = self.bias_init_consts[i]
elif i == len(self.layer_sizes):
layer_sizes = self.layer_sizes + [1]
alpha_init_stddev = self.alpha_init_stddevs[-1]
weight_init_stddev = self.weight_init_stddevs[-1]
bias_init_const = self.bias_init_consts[-1]
else:
raise ValueError("layer_num too large for add_adapter.")
# Iterate over all previous tasks.
for prev_task in range(task):
prev_layers.append(all_layers[(i - 1, prev_task)])
# prev_layers is a list with elements of size
# (batch_size, layer_sizes[i-1])
prev_layer = tf.concat(axis=1, values=prev_layers)
alpha = tf.Variable(
tf.truncated_normal([
1,
], stddev=alpha_init_stddev),
name="alpha_layer_%d_task%d" % (i, task))
prev_layer = tf.multiply(alpha, prev_layer)
prev_layer_size = task * layer_sizes[i - 1]
print("Creating V_layer_%d_task%d of shape %s" %
(i, task, str([prev_layer_size, layer_sizes[i - 1]])))
V = tf.Variable(
tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i - 1]],
stddev=weight_init_stddev),
name="V_layer_%d_task%d" % (i, task),
dtype=tf.float32)
print("Creating b_lat_layer_%d_task%d of shape %s" %
(i, task, str([layer_sizes[i - 1]])))
b_lat = tf.Variable(
tf.constant(value=bias_init_const, shape=[layer_sizes[i - 1]]),
name='b_lat_layer_%d_task%d' % (i, task),
dtype=tf.float32)
prev_layer = tf.matmul(prev_layer, V) + b_lat
print("Creating U_layer_%d_task%d of shape %s" %
(i, task, str([layer_sizes[i - 1], layer_sizes[i]])))
U = tf.Variable(
tf.truncated_normal(
shape=[layer_sizes[i - 1], layer_sizes[i]],
stddev=weight_init_stddev),
name="U_layer_%d_task%d" % (i, task),
dtype=tf.float32)
return tf.matmul(prev_layer, U)
def get_training_op(self, graph, loss):
"""Get training op for applying gradients to variables.
Subclasses that need to do anything fancy with gradients should override
this method.
Returns:
A training op.
"""
with graph.as_default():
opt = model_ops.optimizer(self.optimizer, self.learning_rate,
self.momentum)
return opt.minimize(loss, name='train')
def add_training_costs(self, graph, name_scopes, output, labels, weights):
with graph.as_default():
epsilon = 1e-3 # small float to avoid dividing by zero
weighted_costs = [] # weighted costs for each example
gradient_costs = [] # costs used for gradient calculation
with TensorflowGraph.shared_name_scope('costs', graph, name_scopes):
for task in range(self.n_tasks):
task_str = str(task).zfill(len(str(self.n_tasks)))
with TensorflowGraph.shared_name_scope('cost_{}'.format(task_str),
graph, name_scopes):
with tf.name_scope('weighted'):
weighted_cost = self.cost(output[task], labels[task],
weights[task])
weighted_costs.append(weighted_cost)
with tf.name_scope('gradient'):
# Note that we divide by the batch size and not the number of
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)
# aggregated costs
with TensorflowGraph.shared_name_scope('aggregated', graph,
name_scopes):
with tf.name_scope('gradient'):
loss = tf.add_n(gradient_costs)
# weight decay
if self.penalty != 0.0:
penalty = model_ops.weight_decay(self.penalty_type, self.penalty)
loss += penalty
return loss
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Construct a feed dictionary from minibatch data.
TODO(rbharath): ids_b is not used here. Can we remove it?
Args:
X_b: np.ndarray of shape (batch_size, n_features)
y_b: np.ndarray of shape (batch_size, n_tasks)
w_b: np.ndarray of shape (batch_size, n_tasks)
ids_b: List of length (batch_size) with datapoint identifiers.
"""
orig_dict = {}
orig_dict["mol_features"] = X_b
for task in range(self.n_tasks):
if y_b is not None:
orig_dict["labels_%d" % task] = y_b[:, task]
else:
# Dummy placeholders
orig_dict["labels_%d" % task] = np.squeeze(np.zeros((self.batch_size,)))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
# Dummy placeholders
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
def predict_on_batch(self, X, pad_batch=False):
"""Return model output for the provided input.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.Dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
labels: True labels.
weights: Example weights.
Note that the output and labels arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
with self._get_shared_session(train=False).as_default():
n_samples = len(X)
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
# Shape (n_tasks, n__samples)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
# Handle edge case when batch-size is 1.
elif batch_outputs.ndim == 1:
n_samples = len(X)
batch_outputs = batch_outputs.reshape((n_samples, n_tasks))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_outputs.shape))
outputs = np.squeeze(batch_outputs)
return outputs
def fit(self,
dataset,
tasks=None,
close_session=True,
max_checkpoints_to_keep=5,
**kwargs):
"""Fit the model.
Progressive networks are fit by training one task at a time. Iteratively
fits one task at a time with other weights frozen.
Parameters
----------
dataset: dc.data.Dataset
Dataset object holding training data
Raises
------
AssertionError
If model is not in training mode.
"""
if tasks is None:
tasks = range(self.n_tasks)
with self.train_graph.graph.as_default():
task_train_ops = {}
for task in tasks:
task_train_ops[task] = self.get_task_training_op(
self.train_graph.graph, self.train_graph.loss, task)
sess = self._get_shared_session(train=True)
#with self._get_shared_session(train=True) as sess:
sess.run(tf.global_variables_initializer())
# Save an initial checkpoint.
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
saver.save(sess, self._save_path, global_step=0)
for task in tasks:
print("Fitting on task %d" % task)
self.fit_task(sess, dataset, task, task_train_ops[task], **kwargs)
saver.save(sess, self._save_path, global_step=task)
# Always save a final checkpoint when complete.
saver.save(sess, self._save_path, global_step=self.n_tasks)
if close_session:
sess.close()
def get_task_training_op(self, graph, losses, task):
"""Get training op for applying gradients to variables.
Subclasses that need to do anything fancy with gradients should override
this method.
Parameters
----------
graph: tf.Graph
Graph for this op
losses: dict
Dictionary mapping task to losses
Returns
-------
A training op.
"""
with graph.as_default():
task_loss = losses[task]
task_root = "task%d_ops" % task
task_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, task_root)
opt = model_ops.optimizer(self.optimizer, self.learning_rate,
self.momentum)
return opt.minimize(task_loss, name='train', var_list=task_vars)
def add_task_training_costs(self, graph, name_scopes, outputs, labels,
weights):
"""Adds the training costs for each task.
Since each task is trained separately, each task is optimized w.r.t a separate
task.
TODO(rbharath): Figure out how to support weight decay for this model.
Since each task is trained separately, weight decay should only be used
on weights in column for that task.
Parameters
----------
graph: tf.Graph
Graph for the model.
name_scopes: dict
Contains all the scopes for model
outputs: list
List of output tensors from model.
weights: list
List of weight placeholders for model.
"""
task_costs = {}
with TensorflowGraph.shared_name_scope('costs', graph, name_scopes):
for task in range(self.n_tasks):
with TensorflowGraph.shared_name_scope('cost_%d' % task, graph,
name_scopes):
weighted_cost = self.cost(outputs[task], labels[task], weights[task])
# Note that we divide by the batch size and not the number of
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
task_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
task_costs[task] = task_cost
return task_costs
def construct_task_feed_dict(self,
this_task,
X_b,
y_b=None,
w_b=None,
ids_b=None):
"""Construct a feed dictionary from minibatch data.
TODO(rbharath): ids_b is not used here. Can we remove it?
Args:
X_b: np.ndarray of shape (batch_size, n_features)
y_b: np.ndarray of shape (batch_size, n_tasks)
w_b: np.ndarray of shape (batch_size, n_tasks)
ids_b: List of length (batch_size) with datapoint identifiers.
"""
orig_dict = {}
orig_dict["mol_features"] = X_b
n_samples = len(X_b)
for task in range(self.n_tasks):
if (this_task == task) and y_b is not None:
#orig_dict["labels_%d" % task] = np.reshape(y_b[:, task], (n_samples, 1))
orig_dict["labels_%d" % task] = np.reshape(y_b[:, task], (n_samples,))
else:
# Dummy placeholders
#orig_dict["labels_%d" % task] = np.zeros((n_samples, 1))
orig_dict["labels_%d" % task] = np.zeros((n_samples,))
if (this_task == task) and w_b is not None:
#orig_dict["weights_%d" % task] = np.reshape(w_b[:, task], (n_samples, 1))
orig_dict["weights_%d" % task] = np.reshape(w_b[:, task], (n_samples,))
else:
# Dummy placeholders
#orig_dict["weights_%d" % task] = np.zeros((n_samples, 1))
orig_dict["weights_%d" % task] = np.zeros((n_samples,))
return TensorflowGraph.get_feed_dict(orig_dict)
def _get_shared_session(self, train):
# allow_soft_placement=True allows ops without a GPU implementation
# to run on the CPU instead.
if train:
if not self.train_graph.session:
config = tf.ConfigProto(allow_soft_placement=True)
self.train_graph.session = tf.Session(config=config)
return self.train_graph.session
else:
if not self.eval_graph.session:
config = tf.ConfigProto(allow_soft_placement=True)
self.eval_graph.session = tf.Session(config=config)
return self.eval_graph.session
def fit_task(self,
sess,
dataset,
task,
task_train_op,
nb_epoch=10,
log_every_N_batches=50,
checkpoint_interval=10):
"""Fit the model.
Fit one task.
TODO(rbharath): Figure out if the logging will work correctly with the
global_step set as it is.
Parameters
----------
dataset: dc.data.Dataset
Dataset object holding training data
task: int
The index of the task to train on.
nb_epoch: 10
Number of training epochs.
max_checkpoints_to_keep: int
Maximum number of checkpoints to keep; older checkpoints will be deleted.
log_every_N_batches: int
Report every N batches. Useful for training on very large datasets,
where epochs can take long time to finish.
checkpoint_interval: int
Frequency at which to write checkpoints, measured in epochs
Raises
------
AssertionError
If model is not in training mode.
"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
log("Training task %d for %d epochs" % (task, nb_epoch), self.verbose)
for epoch in range(nb_epoch):
avg_loss, n_batches = 0., 0
for ind, (X_b, y_b, w_b, ids_b) in enumerate(
# Turns out there are valid cases where we don't want pad-batches
# on by default.
#dataset.iterbatches(batch_size, pad_batches=True)):
dataset.iterbatches(self.batch_size, pad_batches=self.pad_batches)):
if ind % log_every_N_batches == 0:
log("On batch %d" % ind, self.verbose)
feed_dict = self.construct_task_feed_dict(task, X_b, y_b, w_b, ids_b)
fetches = self.train_graph.output + [
task_train_op, self.train_graph.loss[task]
]
fetched_values = sess.run(fetches, feed_dict=feed_dict)
output = fetched_values[:len(self.train_graph.output)]
loss = fetched_values[-1]
avg_loss += loss
y_pred = np.squeeze(np.array(output))
y_b = y_b.flatten()
n_batches += 1
#if epoch%checkpoint_interval == checkpoint_interval-1:
# saver.save(sess, self._save_path, global_step=epoch)
avg_loss = float(avg_loss) / n_batches
log('Ending epoch %d: Average loss %g' % (epoch, avg_loss), self.verbose)
############################################################## TIMING
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
<file_sep>"""
Test for atom feature vector generator and its helper functions
"""
from deepchem.feat.molecule_featurizers.dmpnn_featurizer import get_atomic_num_one_hot, get_atom_chiral_tag_one_hot, get_atom_mass, atom_features, GraphConvConstants
from rdkit import Chem
import pytest
import numpy as np
@pytest.fixture
def example_smiles_n_features():
"""
Sample data for testing
Returns
-------
dictionary
format {'smiles':required feature vector : List}
"""
feature_vector_C = [[
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0.12011
]]
feature_vector_NN = [[
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0.14007
],
[
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0.14007
]]
return {'C': feature_vector_C, 'N#N': feature_vector_NN}
def test_helper_functions():
"""
Test for get_atomic_num_one_hot(), get_atom_chiral_tag_one_hot() and get_atom_mass() helper functions
"""
smiles = 'C'
m = Chem.MolFromSmiles(smiles)
atom = m.GetAtoms()[0]
f_atomic = get_atomic_num_one_hot(
atom, GraphConvConstants.ATOM_FEATURES['atomic_num'])
req_f = list(np.zeros((101,), dtype=float))
req_f[5] = 1.0
assert len(f_atomic) == len(req_f)
assert f_atomic == req_f
f_chiral_tag = get_atom_chiral_tag_one_hot(
atom, GraphConvConstants.ATOM_FEATURES['chiral_tag'])
ref_f = [1.0, 0.0, 0.0, 0.0, 0.0]
assert len(f_chiral_tag) == len(ref_f)
assert f_chiral_tag == ref_f
f_mass = get_atom_mass(atom)
ref_f = [0.12011]
assert len(f_mass) == len(ref_f)
assert f_mass == ref_f
def test_atom_features_none():
"""
Test for atom_features() with 'None' input for Atom value
"""
f_atom = atom_features(None)
req_f = list(np.zeros((133,), dtype=int))
assert len(f_atom) == len(req_f)
assert f_atom == req_f
def test_atom_features_only_atom_num():
"""
Test for atom_features() when only_atom_num is True
"""
smiles = 'C'
m = Chem.MolFromSmiles(smiles)
atom = m.GetAtoms()[0]
features = atom_features(atom, only_atom_num=True)
req_f = list(np.zeros((133,), dtype=int))
req_f[5] = 1
assert len(features) == len(req_f)
assert features == req_f
def test_atom_features(example_smiles_n_features):
"""
Test for atom_features() function
"""
for smiles in example_smiles_n_features.keys():
m = Chem.MolFromSmiles(smiles)
f = []
for atom in m.GetAtoms():
features = atom_features(atom)
f.append(features)
k = np.array(f)
req_f = np.array(example_smiles_n_features[smiles])
assert k.shape == req_f.shape
assert f == example_smiles_n_features[smiles]
<file_sep>import unittest
import os
import deepchem as dc
class TestSplifFingerprints(unittest.TestCase):
"""Test Splif Fingerprint and Voxelizer."""
def setUp(self):
# TODO test more formats for ligand
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(current_dir, 'data',
'3ws9_protein_fixer_rdkit.pdb')
self.ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
self.complex_files = [(self.ligand_file, self.protein_file)]
def test_splif_shape(self):
size = 8
featurizer = dc.feat.SplifFingerprint(size=size)
features = featurizer.featurize(self.complex_files)
assert features.shape == (1, 3 * size)
def test_splif_voxels_shape(self):
box_width = 48
voxel_width = 2
voxels_per_edge = int(box_width / voxel_width)
size = 8
voxelizer = dc.feat.SplifVoxelizer(box_width=box_width,
voxel_width=voxel_width,
size=size)
features = voxelizer.featurize(self.complex_files)
assert features.shape == (1, voxels_per_edge, voxels_per_edge,
voxels_per_edge, size * 3)
<file_sep>"""
Script that trains multitask(torch) models on Delaney dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
sys.path.append('../../contrib/torch')
from torch_multitask_regression import TorchMultitaskRegression
import numpy as np
import deepchem as dc
# Only for debug!
np.random.seed(123)
# Load Delaney dataset
n_features = 1024
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney()
train_dataset, valid_dataset, test_dataset = delaney_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
model = TorchMultitaskRegression(
len(delaney_tasks),
n_features,
layer_sizes=[1000],
dropouts=[.25],
learning_rate=0.001,
batch_size=50,
verbosity="high")
# Fit trained model
model.fit(train_dataset, nb_epoch=10)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>"""
Test for fake_data_generator.py
"""
import numpy as np
from deepchem.utils.fake_data_generator import FakeGraphGenerator, generate_edge_index, remove_self_loops
def test_fake_graph_dataset():
n_graphs = 10
n_node_features = 5
n_edge_features = 3
n_classes = 2
z_shape = 5
# graph-level labels
fgg = FakeGraphGenerator(min_nodes=3,
max_nodes=10,
n_node_features=n_node_features,
avg_degree=4,
n_edge_features=n_edge_features,
n_classes=n_classes,
task='graph',
z=z_shape)
graphs = fgg.sample(n_graphs=n_graphs)
assert len(graphs) == n_graphs
assert np.unique(graphs.y).shape == (n_classes,)
graph = graphs.X[0]
assert graph.node_features.shape[1] == n_node_features
assert graph.edge_features.shape[1] == n_edge_features
assert graph.z.shape == (1, z_shape)
# node-level labels
fgg = FakeGraphGenerator(min_nodes=3,
max_nodes=10,
n_node_features=n_node_features,
avg_degree=4,
n_edge_features=n_edge_features,
n_classes=n_classes,
task='node',
z=z_shape)
graphs = fgg.sample(n_graphs=n_graphs)
assert len(graphs) == n_graphs
graph = graphs.X[0]
# graph.y contains node-labels and graph.node_features.shape[0]
# holds number of nodes in that graph
assert graph.y.shape[0] == graph.node_features.shape[0]
assert graph.node_features.shape[1] == n_node_features
assert graph.edge_features.shape[1] == n_edge_features
assert graph.z.shape == (1, z_shape)
def test_generate_edge_index():
n_nodes, avg_degree = 5, 3
edge_indices = generate_edge_index(n_nodes, avg_degree, remove_loops=False)
assert edge_indices.shape[0] == 2
assert edge_indices.shape[1] == n_nodes * avg_degree
def test_remove_self_loops():
edge_indices = np.array([[1, 2, 3], [1, 2, 4]])
edge_indices = remove_self_loops(edge_indices)
assert edge_indices.shape[0] == 2
assert edge_indices.shape[1] == 1
edge_indices = np.ones((2, 3))
edge_indices = remove_self_loops(edge_indices)
assert edge_indices.shape[0] == 2
assert edge_indices.shape[1] == 0
<file_sep>from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import os
import sys
import deepchem as dc
import numpy as np
import tensorflow as tf
sys.path.append("../../models")
from atomicnet_ops import create_symmetry_parameters
from atomicnet import TensorflowFragmentRegressor
# Set random seeds
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Setup directories
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, "datasets")
train_dir = os.path.join(data_dir, "random_train")
test_dir = os.path.join(data_dir, "random_test")
model_dir = os.path.join(base_dir, "random_model")
# Model constants
frag1_num_atoms = 153
frag2_num_atoms = 1119
complex_num_atoms = 1254
max_num_neighbors = 12
neighbor_cutoff = 12.0
# Load and transform datasets
pdbbind_tasks = ["-logKd/Ki"]
train_dataset = dc.data.DiskDataset(train_dir)
test_dataset = dc.data.DiskDataset(test_dir)
transformers = []
# convert -logKi to dG = +RTlogKi [kJ/mol]
y_train = train_dataset.y
y_train *= -1 * 2.479 / 4.184
train_dataset = dc.data.DiskDataset.from_numpy(
train_dataset.X,
y_train,
train_dataset.w,
train_dataset.ids,
tasks=pdbbind_tasks)
y_test = test_dataset.y
y_test *= -1 * 2.479 / 4.184
test_dataset = dc.data.DiskDataset.from_numpy(
test_dataset.X,
y_test,
test_dataset.w,
test_dataset.ids,
tasks=pdbbind_tasks)
# Atomic convolution variables
# at = atomic numbers (atom types)
# radial basis function parameters [cutoff, mean, width]
at = [
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 19., 20., 25., 26., 27., 28.,
29., 30., 34., 35., 38., 48., 53., 55., 80.
]
radial = [[1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
[0.0], [0.4]]
rp = create_symmetry_parameters(radial)
# Model hyperparameters
layer_sizes = [32, 32, 16]
weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes]
bias_init_consts = [0. for x in layer_sizes]
penalty_type = "l2"
penalty = 0.
dropouts = [0.25, 0.25, 0.]
learning_rate = 0.001
momentum = 0.8
batch_size = 24
# Initialize model
model = TensorflowFragmentRegressor(
len(pdbbind_tasks),
rp,
at,
frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
logdir=model_dir,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
penalty=penalty,
penalty_type=penalty_type,
dropouts=dropouts,
learning_rate=learning_rate,
momentum=momentum,
optimizer="adam",
batch_size=batch_size,
conv_layers=1,
boxsize=None,
verbose=True,
seed=seed)
# Fit model
model.fit(train_dataset, nb_epoch=10)
# Evaluate model
metric = [
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression"),
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression")
]
train_evaluator = dc.utils.evaluate.Evaluator(model, train_dataset,
transformers)
train_scores = train_evaluator.compute_model_performance(metric)
print("Train scores")
print(train_scores)
test_evaluator = dc.utils.evaluate.Evaluator(model, test_dataset, transformers)
test_scores = test_evaluator.compute_model_performance(metric)
print("Test scores")
print(test_scores)
<file_sep>"""
Tests for Normalizing Flows.
"""
import pytest
import unittest
from deepchem.data import NumpyDataset
try:
import tensorflow as tf
import tensorflow_probability as tfp
from deepchem.models.normalizing_flows import NormalizingFlow, NormalizingFlowModel
tfd = tfp.distributions
tfb = tfp.bijectors
has_tensorflow_probablity = True
except:
has_tensorflow_probablity = False
@unittest.skipIf(not has_tensorflow_probablity,
'tensorflow_probability not installed')
@pytest.mark.tensorflow
def test_normalizing_flow():
flow_layers = [
tfb.RealNVP(num_masked=1,
shift_and_log_scale_fn=tfb.real_nvp_default_template(
hidden_layers=[8, 8]))
]
# 3D Multivariate Gaussian base distribution
nf = NormalizingFlow(
base_distribution=tfd.MultivariateNormalDiag(loc=[0., 0.]),
flow_layers=flow_layers)
nfm = NormalizingFlowModel(nf)
# Must be float32 for RealNVP
target_distribution = tfd.MultivariateNormalDiag(loc=[1., 0.])
dataset = NumpyDataset(X=target_distribution.sample(96))
# Tests a simple flow of one RealNVP layer.
X = nfm.flow.sample()
x1 = tf.zeros([2])
x2 = dataset.X[0]
# log likelihoods should be negative
assert nfm.flow.log_prob(X).numpy() < 0
assert nfm.flow.log_prob(x1).numpy() < 0
assert nfm.flow.log_prob(x2).numpy() < 0
# # Fit model
final = nfm.fit(dataset, nb_epoch=5)
print(final)
assert final > 0
<file_sep>import os
import numpy as np
import tempfile
import pytest
from flaky import flaky
import deepchem as dc
from deepchem.feat import create_char_to_idx, SmilesToSeq, SmilesToImage
from deepchem.molnet.load_function.chembl25_datasets import CHEMBL25_TASKS
try:
from deepchem.models import Smiles2Vec, ChemCeption
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def get_dataset(mode="classification",
featurizer="smiles2seq",
max_seq_len=20,
data_points=10,
n_tasks=5):
dataset_file = os.path.join(os.path.dirname(__file__), "assets",
"chembl_25_small.csv")
if featurizer == "smiles2seq":
max_len = 250
pad_len = 10
char_to_idx = create_char_to_idx(dataset_file,
max_len=max_len,
smiles_field="smiles")
feat = SmilesToSeq(char_to_idx=char_to_idx,
max_len=max_len,
pad_len=pad_len)
elif featurizer == "smiles2img":
img_size = 80
img_spec = "engd"
res = 0.5
feat = SmilesToImage(img_size=img_size, img_spec=img_spec, res=res)
loader = dc.data.CSVLoader(tasks=CHEMBL25_TASKS,
smiles_field='smiles',
featurizer=feat)
dataset = loader.create_dataset(inputs=[dataset_file],
shard_size=10000,
data_dir=tempfile.mkdtemp())
w = np.ones(shape=(data_points, n_tasks))
if mode == 'classification':
y = np.random.randint(0, 2, size=(data_points, n_tasks))
metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
np.mean,
mode="classification")
else:
y = np.random.normal(size=(data_points, n_tasks))
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error,
mode="regression")
if featurizer == "smiles2seq":
dataset = dc.data.NumpyDataset(dataset.X[:data_points, :max_seq_len], y,
w, dataset.ids[:data_points])
else:
dataset = dc.data.NumpyDataset(dataset.X[:data_points], y, w,
dataset.ids[:data_points])
if featurizer == "smiles2seq":
return dataset, metric, char_to_idx
else:
return dataset, metric
@pytest.mark.slow
@pytest.mark.tensorflow
def test_chemception_regression():
n_tasks = 5
dataset, metric = get_dataset(mode="regression",
featurizer="smiles2img",
n_tasks=n_tasks)
model = ChemCeption(n_tasks=n_tasks,
img_spec="engd",
model_dir=None,
mode="regression")
model.fit(dataset, nb_epoch=300)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean_absolute_error'] < 0.1
@pytest.mark.slow
@pytest.mark.tensorflow
def test_chemception_classification():
n_tasks = 5
dataset, metric = get_dataset(mode="classification",
featurizer="smiles2img",
n_tasks=n_tasks)
model = ChemCeption(n_tasks=n_tasks,
img_spec="engd",
model_dir=None,
mode="classification")
model.fit(dataset, nb_epoch=300)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean-roc_auc_score'] >= 0.9
@pytest.mark.slow
@pytest.mark.tensorflow
def test_smiles_to_vec_regression():
n_tasks = 5
max_seq_len = 20
dataset, metric, char_to_idx = get_dataset(mode="regression",
featurizer="smiles2seq",
n_tasks=n_tasks,
max_seq_len=max_seq_len)
model = Smiles2Vec(char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=None,
mode="regression")
model.fit(dataset, nb_epoch=500)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean_absolute_error'] < 0.1
@pytest.mark.slow
@pytest.mark.tensorflow
def test_smiles_to_vec_classification():
n_tasks = 5
max_seq_len = 20
dataset, metric, char_to_idx, = get_dataset(mode="classification",
featurizer="smiles2seq",
n_tasks=n_tasks,
max_seq_len=max_seq_len)
model = Smiles2Vec(char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=None,
mode="classification")
model.fit(dataset, nb_epoch=500)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean-roc_auc_score'] >= 0.9
@flaky
@pytest.mark.slow
@pytest.mark.tensorflow
def test_chemception_fit_with_augmentation():
n_tasks = 5
dataset, metric = get_dataset(mode="classification",
featurizer="smiles2img",
n_tasks=n_tasks)
model = ChemCeption(n_tasks=n_tasks,
img_spec="engd",
model_dir=None,
augment=True,
mode="classification")
model.fit(dataset, nb_epoch=300)
scores = model.evaluate(dataset, [metric], [])
assert scores['mean-roc_auc_score'] >= 0.9
<file_sep>import deepchem as dc
import numpy as np
def test_clipping_X_transformer():
"""Test clipping transformer on X of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.ones((n_samples, n_features))
target = 5. * X
X *= 6.
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_X=True, x_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
np.testing.assert_allclose(X_t, target)
def test_clipping_y_transformer():
"""Test clipping transformer on y of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.zeros((n_samples, n_features))
y = np.ones((n_samples, n_tasks))
target = 5. * y
y *= 6.
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_y=True, y_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
np.testing.assert_allclose(y_t, target)
<file_sep>"""
Callback functions that can be invoked while fitting a KerasModel.
"""
import sys
class ValidationCallback(object):
"""Performs validation while training a KerasModel.
This is a callback that can be passed to fit(). It periodically computes a
set of metrics over a validation set, writes them to a file, and keeps track
of the best score. In addition, it can save the best model parameters found
so far to a directory on disk, updating them every time it finds a new best
validation score.
If Tensorboard logging is enabled on the KerasModel, the metrics are also
logged to Tensorboard. This only happens when validation coincides with a
step on which the model writes to the log. You should therefore make sure
that this callback's reporting interval is an even fraction or multiple of
the model's logging interval.
"""
def __init__(self,
dataset,
interval,
metrics,
output_file=sys.stdout,
save_dir=None,
save_metric=0,
save_on_minimum=True,
transformers=[]):
"""Create a ValidationCallback.
Parameters
----------
dataset: dc.data.Dataset
the validation set on which to compute the metrics
interval: int
the interval (in training steps) at which to perform validation
metrics: list of dc.metrics.Metric
metrics to compute on the validation set
output_file: file
to file to which results should be written
save_dir: str
if not None, the model parameters that produce the best validation score
will be written to this directory
save_metric: int
the index of the metric to use when deciding whether to write a new set
of parameters to disk
save_on_minimum: bool
if True, the best model is considered to be the one that minimizes the
validation metric. If False, the best model is considered to be the one
that maximizes it.
transformers: List[Transformer]
List of `dc.trans.Transformer` objects. These transformations
must have been applied to `dataset` previously. The dataset will
be untransformed for metric evaluation.
"""
self.dataset = dataset
self.interval = interval
self.metrics = metrics
self.output_file = output_file
self.save_dir = save_dir
self.save_metric = save_metric
self.save_on_minimum = save_on_minimum
self._best_score = None
self.transformers = transformers
def __call__(self, model, step):
"""This is invoked by the KerasModel after every step of fitting.
Parameters
----------
model: KerasModel
the model that is being trained
step: int
the index of the training step that has just completed
"""
if step % self.interval != 0:
return
scores = model.evaluate(self.dataset, self.metrics, self.transformers)
message = 'Step %d validation:' % step
for key in scores:
message += ' %s=%g' % (key, scores[key])
print(message, file=self.output_file)
if model.tensorboard:
for key in scores:
model._log_scalar_to_tensorboard(key, scores[key],
model.get_global_step())
score = scores[self.metrics[self.save_metric].name]
if not self.save_on_minimum:
score = -score
if self._best_score is None or score < self._best_score:
self._best_score = score
if self.save_dir is not None:
model.save_checkpoint(model_dir=self.save_dir)
if model.wandb_logger is not None:
# Log data to Wandb
data = {'eval/' + k: v for k, v in scores.items()}
model.wandb_logger.log_data(data, step, dataset_id=id(self.dataset))
def get_best_score(self):
"""This getter returns the best score evaluated on the validation set.
Returns
-------
float
The best score.
"""
if self.save_on_minimum:
return self._best_score
else:
return -self._best_score
<file_sep>"""
DGL-based AttentiveFP for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class AttentiveFP(nn.Module):
"""Model for Graph Property Prediction.
This model proceeds as follows:
* Combine node features and edge features for initializing node representations,
which involves a round of message passing
* Update node representations with multiple rounds of message passing
* For each graph, compute its representation by combining the representations
of all nodes in it, which involves a gated recurrent unit (GRU).
* Perform the final prediction using a linear layer
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models import AttentiveFP
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph(self_loop=True) for i in range(len(graphs))]
>>> # Batch two graphs into a graph of two connected components
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = AttentiveFP(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Pushing
the Boundaries of Molecular Representation for Drug Discovery with the Graph Attention
Mechanism." Journal of Medicinal Chemistry. 2020, 63, 16, 8749–8760.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
num_layers: int = 2,
num_timesteps: int = 2,
graph_feat_size: int = 200,
dropout: float = 0.,
mode: str = 'regression',
number_atom_features: int = 30,
number_bond_features: int = 11,
n_classes: int = 2,
nfeat_name: str = 'x',
efeat_name: str = 'edge_attr'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
num_layers: int
Number of graph neural network layers, i.e. number of rounds of message passing.
Default to 2.
num_timesteps: int
Number of time steps for updating graph representations with a GRU. Default to 2.
graph_feat_size: int
Size for graph representations. Default to 200.
dropout: float
Dropout probability. Default to 0.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
number_bond_features: int
The length of the initial bond feature vectors. Default to 11.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
efeat_name: str
For an input graph ``g``, the model assumes that it stores edge features in
``g.edata[efeat_name]`` and will retrieve input edge features from that.
Default to 'edge_attr'.
"""
try:
import dgl # noqa: F401
except:
raise ImportError('This class requires dgl.')
try:
import dgllife # noqa: F401
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
super(AttentiveFP, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
self.efeat_name = efeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import AttentiveFPPredictor as DGLAttentiveFPPredictor
self.model = DGLAttentiveFPPredictor(
node_feat_size=number_atom_features,
edge_feat_size=number_bond_features,
num_layers=num_layers,
num_timesteps=num_timesteps,
graph_feat_size=graph_feat_size,
n_tasks=out_size,
dropout=dropout)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]`` and edge features in
``dgl_graph.edata[self.efeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be
``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1;
its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
edge_feats = g.edata[self.efeat_name]
out = self.model(g, node_feats, edge_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class AttentiveFPModel(TorchModel):
"""Model for Graph Property Prediction.
This model proceeds as follows:
* Combine node features and edge features for initializing node representations,
which involves a round of message passing
* Update node representations with multiple rounds of message passing
* For each graph, compute its representation by combining the representations
of all nodes in it, which involves a gated recurrent unit (GRU).
* Perform the final prediction using a linear layer
Examples
--------
>>> import deepchem as dc
>>> from deepchem.models import AttentiveFPModel
>>> # preparing dataset
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> labels = [0., 1.]
>>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
>>> X = featurizer.featurize(smiles)
>>> dataset = dc.data.NumpyDataset(X=X, y=labels)
>>> # training model
>>> model = AttentiveFPModel(mode='classification', n_tasks=1,
... batch_size=16, learning_rate=0.001)
>>> loss = model.fit(dataset, nb_epoch=5)
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Pushing
the Boundaries of Molecular Representation for Drug Discovery with the Graph
Attention Mechanism." Journal of Medicinal Chemistry. 2020, 63, 16, 8749–8760.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
num_layers: int = 2,
num_timesteps: int = 2,
graph_feat_size: int = 200,
dropout: float = 0.,
mode: str = 'regression',
number_atom_features: int = 30,
number_bond_features: int = 11,
n_classes: int = 2,
self_loop: bool = True,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
num_layers: int
Number of graph neural network layers, i.e. number of rounds of message passing.
Default to 2.
num_timesteps: int
Number of time steps for updating graph representations with a GRU. Default to 2.
graph_feat_size: int
Size for graph representations. Default to 200.
dropout: float
Dropout probability. Default to 0.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
number_bond_features: int
The length of the initial bond feature vectors. Default to 11.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
self_loop: bool
Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
When input graphs have isolated nodes, self loops allow preserving the original feature
of them in message passing. Default to True.
kwargs
This can include any keyword argument of TorchModel.
"""
model = AttentiveFP(n_tasks=n_tasks,
num_layers=num_layers,
num_timesteps=num_timesteps,
graph_feat_size=graph_feat_size,
dropout=dropout,
mode=mode,
number_atom_features=number_atom_features,
number_bond_features=number_bond_features,
n_classes=n_classes)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(AttentiveFPModel, self).__init__(model,
loss=loss,
output_types=output_types,
**kwargs)
self._self_loop = self_loop
def _prepare_batch(self, batch):
"""Create batch data for AttentiveFP.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [
graph.to_dgl_graph(self_loop=self._self_loop) for graph in inputs[0]
]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(AttentiveFPModel, self)._prepare_batch(
([], labels, weights))
return inputs, labels, weights
<file_sep>import math
from math import pi as PI
import numpy as np
from typing import Any, Tuple, Optional, Sequence, List, Union, Callable, Dict, TypedDict
from collections.abc import Sequence as SequenceCollection
try:
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
except ModuleNotFoundError:
raise ImportError('These classes require PyTorch to be installed.')
try:
from torch_geometric.utils import scatter
except ModuleNotFoundError:
pass
from deepchem.utils.typing import OneOrMany, ActivationFn, ArrayLike
from deepchem.utils.pytorch_utils import get_activation, segment_sum
from torch.nn import init as initializers
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops
class MultilayerPerceptron(nn.Module):
"""A simple fully connected feed-forward network, otherwise known as a multilayer perceptron (MLP).
Examples
--------
>>> model = MultilayerPerceptron(d_input=10, d_hidden=(2,3), d_output=2, dropout=0.0, activation_fn='relu')
>>> x = torch.ones(2, 10)
>>> out = model(x)
>>> print(out.shape)
torch.Size([2, 2])
"""
def __init__(self,
d_input: int,
d_output: int,
d_hidden: Optional[tuple] = None,
dropout: float = 0.0,
batch_norm: bool = False,
batch_norm_momentum: float = 0.1,
activation_fn: Union[Callable, str] = 'relu',
skip_connection: bool = False,
weighted_skip: bool = True):
"""Initialize the model.
Parameters
----------
d_input: int
the dimension of the input layer
d_output: int
the dimension of the output layer
d_hidden: tuple
the dimensions of the hidden layers
dropout: float
the dropout probability
batch_norm: bool
whether to use batch normalization
batch_norm_momentum: float
the momentum for batch normalization
activation_fn: str
the activation function to use in the hidden layers
skip_connection: bool
whether to add a skip connection from the input to the output
weighted_skip: bool
whether to add a weighted skip connection from the input to the output
"""
super(MultilayerPerceptron, self).__init__()
self.d_input = d_input
self.d_hidden = d_hidden
self.d_output = d_output
self.dropout = nn.Dropout(dropout)
self.batch_norm = batch_norm
self.batch_norm_momentum = batch_norm_momentum
self.activation_fn = get_activation(activation_fn)
self.model = nn.Sequential(*self.build_layers())
self.skip = nn.Linear(d_input, d_output) if skip_connection else None
self.weighted_skip = weighted_skip
def build_layers(self):
"""
Build the layers of the model, iterating through the hidden dimensions to produce a list of layers.
"""
layer_list = []
layer_dim = self.d_input
if self.d_hidden is not None:
for d in self.d_hidden:
layer_list.append(nn.Linear(layer_dim, d))
layer_list.append(self.dropout)
if self.batch_norm:
layer_list.append(
nn.BatchNorm1d(d, momentum=self.batch_norm_momentum))
layer_dim = d
layer_list.append(nn.Linear(layer_dim, self.d_output))
return layer_list
def forward(self, x: Tensor) -> Tensor:
"""Forward pass of the model."""
input = x
for layer in self.model:
x = layer(x)
if isinstance(layer, nn.Linear):
x = self.activation_fn(
x
) # Done because activation_fn returns a torch.nn.functional
if self.skip is not None:
if not self.weighted_skip:
return x + input
else:
return x + self.skip(input)
else:
return x
class CNNModule(nn.Module):
"""A 1, 2, or 3 dimensional convolutional network for either regression or classification.
The network consists of the following sequence of layers:
- A configurable number of convolutional layers
- A global pooling layer (either max pool or average pool)
- A final fully connected layer to compute the output
It optionally can compose the model from pre-activation residual blocks, as
described in https://arxiv.org/abs/1603.05027, rather than a simple stack of
convolution layers. This often leads to easier training, especially when using a
large number of layers. Note that residual blocks can only be used when
successive layers have the same output shape. Wherever the output shape changes, a
simple convolution layer will be used even if residual=True.
Examples
--------
>>> model = CNNModule(n_tasks=5, n_features=8, dims=2, layer_filters=[3,8,8,16], kernel_size=3, n_classes = 7, mode='classification', uncertainty=False, padding='same')
>>> x = torch.ones(2, 224, 224, 8)
>>> x = model(x)
>>> for tensor in x:
... print(tensor.shape)
torch.Size([2, 5, 7])
torch.Size([2, 5, 7])
"""
def __init__(self,
n_tasks: int,
n_features: int,
dims: int,
layer_filters: List[int] = [100],
kernel_size: OneOrMany[int] = 5,
strides: OneOrMany[int] = 1,
weight_init_stddevs: OneOrMany[float] = 0.02,
bias_init_consts: OneOrMany[float] = 1.0,
dropouts: OneOrMany[float] = 0.5,
activation_fns: OneOrMany[ActivationFn] = 'relu',
pool_type: str = 'max',
mode: str = 'classification',
n_classes: int = 2,
uncertainty: bool = False,
residual: bool = False,
padding: Union[int, str] = 'valid') -> None:
"""Create a CNN.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
dims: int
the number of dimensions to apply convolutions over (1, 2, or 3)
layer_filters: list
the number of output filters for each convolutional layer in the network.
The length of this list determines the number of layers.
kernel_size: int, tuple, or list
a list giving the shape of the convolutional kernel for each layer. Each
element may be either an int (use the same kernel width for every dimension)
or a tuple (the kernel width along each dimension). Alternatively this may
be a single int or tuple instead of a list, in which case the same kernel
shape is used for every layer.
strides: int, tuple, or list
a list giving the stride between applications of the kernel for each layer.
Each element may be either an int (use the same stride for every dimension)
or a tuple (the stride along each dimension). Alternatively this may be a
single int or tuple instead of a list, in which case the same stride is
used for every layer.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization
of each layer. The length of this list should equal len(layer_filters)+1,
where the final element corresponds to the dense layer. Alternatively this
may be a single value instead of a list, in which case the same value is used
for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The length of this
list should equal len(layer_filters)+1, where the final element corresponds
to the dense layer. Alternatively this may be a single value instead of a
list, in which case the same value is used for every layer.
dropouts: list or float
the dropout probability to use for each layer. The length of this list should equal len(layer_filters).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer
activation_fns: str or list
the torch activation function to apply to each layer. The length of this list should equal
len(layer_filters). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer, 'relu' by default
pool_type: str
the type of pooling layer to use, either 'max' or 'average'
mode: str
Either 'classification' or 'regression'
n_classes: int
the number of classes to predict (only used in classification mode)
uncertainty: bool
if True, include extra outputs and loss terms to enable the uncertainty
in outputs to be predicted
residual: bool
if True, the model will be composed of pre-activation residual blocks instead
of a simple stack of convolutional layers.
padding: str, int or tuple
the padding to use for convolutional layers, either 'valid' or 'same'
"""
super(CNNModule, self).__init__()
if dims not in (1, 2, 3):
raise ValueError('Number of dimensions must be 1, 2 or 3')
if mode not in ['classification', 'regression']:
raise ValueError(
"mode must be either 'classification' or 'regression'")
self.n_tasks = n_tasks
self.n_features = n_features
self.dims = dims
self.mode = mode
self.n_classes = n_classes
self.uncertainty = uncertainty
self.mode = mode
self.layer_filters = layer_filters
self.residual = residual
n_layers = len(layer_filters)
# PyTorch layers require input and output channels as parameter
# if only one layer to make the model creating loop below work, multiply layer_filters wutg 2
if len(layer_filters) == 1:
layer_filters = layer_filters * 2
if not isinstance(kernel_size, SequenceCollection):
kernel_size = [kernel_size] * n_layers
if not isinstance(strides, SequenceCollection):
strides = [strides] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if isinstance(
activation_fns,
str) or not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
self.activation_fns = [get_activation(f) for f in activation_fns]
self.dropouts = dropouts
if uncertainty:
if mode != 'regression':
raise ValueError(
"Uncertainty is only supported in regression mode")
if any(d == 0.0 for d in dropouts):
raise ValueError(
'Dropout must be included in every layer to predict uncertainty'
)
# Python tuples use 0 based indexing, dims defines number of dimension for convolutional operation
ConvLayer = (nn.Conv1d, nn.Conv2d, nn.Conv3d)[self.dims - 1]
if pool_type == 'average':
PoolLayer = (F.avg_pool1d, F.avg_pool2d,
F.avg_pool3d)[self.dims - 1]
elif pool_type == 'max':
PoolLayer = (F.max_pool1d, F.max_pool2d,
F.max_pool3d)[self.dims - 1]
else:
raise ValueError("pool_type must be either 'average' or 'max'")
self.PoolLayer = PoolLayer
self.layers = nn.ModuleList()
in_shape = n_features
for out_shape, size, stride, weight_stddev, bias_const in zip(
layer_filters, kernel_size, strides, weight_init_stddevs,
bias_init_consts):
layer = ConvLayer(in_channels=in_shape,
out_channels=out_shape,
kernel_size=size,
stride=stride,
padding=padding,
dilation=1,
groups=1,
bias=True)
nn.init.normal_(layer.weight, 0, weight_stddev)
# initializing layer bias with nn.init gives mypy typecheck error
# using the following workaround
if layer.bias is not None:
layer.bias = nn.Parameter(
torch.full(layer.bias.shape, bias_const))
self.layers.append(layer)
in_shape = out_shape
self.classifier_ffn = nn.LazyLinear(self.n_tasks * self.n_classes)
self.output_layer = nn.LazyLinear(self.n_tasks)
self.uncertainty_layer = nn.LazyLinear(self.n_tasks)
def forward(self, inputs: OneOrMany[torch.Tensor]) -> List[Any]:
"""
Parameters
----------
x: torch.Tensor
Input Tensor
Returns
-------
torch.Tensor
Output as per use case : regression/classification
"""
if isinstance(inputs, torch.Tensor):
x, dropout_switch = inputs, None
else:
x, dropout_switch = inputs
x = torch.transpose(x, 1, -1) # n h w c -> n c h w
prev_layer = x
for layer, activation_fn, dropout in zip(self.layers,
self.activation_fns,
self.dropouts):
x = layer(x)
if dropout > 0. and dropout_switch:
x = F.dropout(x, dropout)
# residual blocks can only be used when successive layers have the same output shape
if self.residual and x.shape[1] == prev_layer.shape[1]:
x = x + prev_layer
if activation_fn is not None:
x = activation_fn(x)
prev_layer = x
x = self.PoolLayer(x, kernel_size=x.size()[2:])
outputs = []
batch_size = x.shape[0]
x = torch.reshape(x, (batch_size, -1))
if self.mode == "classification":
logits = self.classifier_ffn(x)
logits = logits.view(batch_size, self.n_tasks, self.n_classes)
output = F.softmax(logits, dim=2)
outputs = [output, logits]
else:
output = self.output_layer(x)
output = output.view(batch_size, self.n_tasks)
if self.uncertainty:
log_var = self.uncertainty_layer(x)
log_var = log_var.view(batch_size, self.n_tasks, 1)
var = torch.exp(log_var)
outputs = [output, var, output, log_var]
else:
outputs = [output]
return outputs
class ScaleNorm(nn.Module):
"""Apply Scale Normalization to input.
The ScaleNorm layer first computes the square root of the scale, then computes the matrix/vector norm of the input tensor.
The norm value is calculated as `sqrt(scale) / matrix norm`.
Finally, the result is returned as `input_tensor * norm value`.
This layer can be used instead of LayerNorm when a scaled version of the norm is required.
Instead of performing the scaling operation (`scale / norm`) in a lambda-like layer, we are defining it within this layer to make prototyping more efficient.
References
----------
.. [1] <NAME> et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> from deepchem.models.torch_models.layers import ScaleNorm
>>> scale = 0.35
>>> layer = ScaleNorm(scale)
>>> input_tensor = torch.tensor([[1.269, 39.36], [0.00918, -9.12]])
>>> output_tensor = layer(input_tensor)
"""
def __init__(self, scale: float, eps: float = 1e-5):
"""Initialize a ScaleNorm layer.
Parameters
----------
scale: float
Scale magnitude.
eps: float
Epsilon value. Default = 1e-5.
"""
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
norm = self.scale / torch.norm(x, dim=-1,
keepdim=True).clamp(min=self.eps)
return x * norm
class MultiHeadedMATAttention(nn.Module):
"""First constructs an attention layer tailored to the Molecular Attention Transformer [1]_ and then converts it into Multi-Headed Attention.
In Multi-Headed attention the attention mechanism multiple times parallely through the multiple attention heads.
Thus, different subsequences of a given sequences can be processed differently.
The query, key and value parameters are split multiple ways and each split is passed separately through a different attention head.
References
----------
.. [1] <NAME> et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> from deepchem.models.torch_models.layers import MultiHeadedMATAttention, MATEmbedding
>>> import deepchem as dc
>>> import torch
>>> input_smile = "CC"
>>> feat = dc.feat.MATFeaturizer()
>>> input_smile = "CC"
>>> out = feat.featurize(input_smile)
>>> node = torch.tensor(out[0].node_features).float().unsqueeze(0)
>>> adj = torch.tensor(out[0].adjacency_matrix).float().unsqueeze(0)
>>> dist = torch.tensor(out[0].distance_matrix).float().unsqueeze(0)
>>> mask = torch.sum(torch.abs(node), dim=-1) != 0
>>> layer = MultiHeadedMATAttention(
... dist_kernel='softmax',
... lambda_attention=0.33,
... lambda_distance=0.33,
... h=16,
... hsize=1024,
... dropout_p=0.0)
>>> op = MATEmbedding()(node)
>>> output = layer(op, op, op, mask, adj, dist)
"""
def __init__(self,
dist_kernel: str = 'softmax',
lambda_attention: float = 0.33,
lambda_distance: float = 0.33,
h: int = 16,
hsize: int = 1024,
dropout_p: float = 0.0,
output_bias: bool = True):
"""Initialize a multi-headed attention layer.
Parameters
----------
dist_kernel: str
Kernel activation to be used. Can be either 'softmax' for softmax or 'exp' for exponential.
lambda_attention: float
Constant to be multiplied with the attention matrix.
lambda_distance: float
Constant to be multiplied with the distance matrix.
h: int
Number of attention heads.
hsize: int
Size of dense layer.
dropout_p: float
Dropout probability.
output_bias: bool
If True, dense layers will use bias vectors.
"""
super().__init__()
if dist_kernel == "softmax":
self.dist_kernel = lambda x: torch.softmax(-x, dim=-1)
elif dist_kernel == "exp":
self.dist_kernel = lambda x: torch.exp(-x)
self.lambda_attention = lambda_attention
self.lambda_distance = lambda_distance
self.lambda_adjacency = 1.0 - self.lambda_attention - self.lambda_distance
self.d_k = hsize // h
self.h = h
linear_layer = nn.Linear(hsize, hsize)
self.linear_layers = nn.ModuleList([linear_layer for _ in range(3)])
self.dropout_p = nn.Dropout(dropout_p)
self.output_linear = nn.Linear(hsize, hsize, output_bias)
def _single_attention(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: torch.Tensor,
adj_matrix: torch.Tensor,
distance_matrix: torch.Tensor,
dropout_p: float = 0.0,
eps: float = 1e-6,
inf: float = 1e12) -> Tuple[torch.Tensor, torch.Tensor]:
"""Defining and computing output for a single MAT attention layer.
Parameters
----------
query: torch.Tensor
Standard query parameter for attention.
key: torch.Tensor
Standard key parameter for attention.
value: torch.Tensor
Standard value parameter for attention.
mask: torch.Tensor
Masks out padding values so that they are not taken into account when computing the attention score.
adj_matrix: torch.Tensor
Adjacency matrix of the input molecule, returned from dc.feat.MATFeaturizer()
dist_matrix: torch.Tensor
Distance matrix of the input molecule, returned from dc.feat.MATFeaturizer()
dropout_p: float
Dropout probability.
eps: float
Epsilon value
inf: float
Value of infinity to be used.
"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(
mask.unsqueeze(1).repeat(1, query.shape[1], query.shape[2],
1) == 0, -inf)
p_attn = F.softmax(scores, dim=-1)
adj_matrix = adj_matrix / (
torch.sum(torch.tensor(adj_matrix), dim=-1).unsqueeze(2) + eps)
if len(adj_matrix.shape) <= 3:
p_adj = adj_matrix.unsqueeze(1).repeat(1, query.shape[1], 1, 1)
else:
p_adj = adj_matrix.repeat(1, query.shape[1], 1, 1)
distance_matrix = torch.tensor(distance_matrix).squeeze().masked_fill(
mask.repeat(1, mask.shape[-1], 1) == 0, np.inf)
distance_matrix = self.dist_kernel(distance_matrix)
p_dist = distance_matrix.unsqueeze(1).repeat(1, query.shape[1], 1, 1)
p_weighted = self.lambda_attention * p_attn + self.lambda_distance * p_dist + self.lambda_adjacency * p_adj
p_weighted = self.dropout_p(p_weighted)
return torch.matmul(p_weighted.float(), value.float()), p_attn
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: torch.Tensor,
adj_matrix: torch.Tensor,
distance_matrix: torch.Tensor,
dropout_p: float = 0.0,
eps: float = 1e-6,
inf: float = 1e12) -> torch.Tensor:
"""Output computation for the MultiHeadedAttention layer.
Parameters
----------
query: torch.Tensor
Standard query parameter for attention.
key: torch.Tensor
Standard key parameter for attention.
value: torch.Tensor
Standard value parameter for attention.
mask: torch.Tensor
Masks out padding values so that they are not taken into account when computing the attention score.
adj_matrix: torch.Tensor
Adjacency matrix of the input molecule, returned from dc.feat.MATFeaturizer()
dist_matrix: torch.Tensor
Distance matrix of the input molecule, returned from dc.feat.MATFeaturizer()
dropout_p: float
Dropout probability.
eps: float
Epsilon value
inf: float
Value of infinity to be used.
"""
if mask is not None and len(mask.shape) <= 2:
mask = mask.unsqueeze(1)
batch_size = query.size(0)
query, key, value = [
layer(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for layer, x in zip(self.linear_layers, (query, key, value))
]
x, _ = self._single_attention(query, key, value, mask, adj_matrix,
distance_matrix, dropout_p, eps, inf)
x = x.transpose(1, 2).contiguous().view(batch_size, -1,
self.h * self.d_k)
return self.output_linear(x)
class MATEncoderLayer(nn.Module):
"""Encoder layer for use in the Molecular Attention Transformer [1]_.
The MATEncoder layer primarily consists of a self-attention layer (MultiHeadedMATAttention) and a feed-forward layer (PositionwiseFeedForward).
This layer can be stacked multiple times to form an encoder.
References
----------
.. [1] <NAME> et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> from rdkit import Chem
>>> import torch
>>> import deepchem
>>> from deepchem.models.torch_models.layers import MATEmbedding, MATEncoderLayer
>>> input_smile = "CC"
>>> feat = deepchem.feat.MATFeaturizer()
>>> out = feat.featurize(input_smile)
>>> node = torch.tensor(out[0].node_features).float().unsqueeze(0)
>>> adj = torch.tensor(out[0].adjacency_matrix).float().unsqueeze(0)
>>> dist = torch.tensor(out[0].distance_matrix).float().unsqueeze(0)
>>> mask = torch.sum(torch.abs(node), dim=-1) != 0
>>> layer = MATEncoderLayer()
>>> op = MATEmbedding()(node)
>>> output = layer(op, mask, adj, dist)
"""
def __init__(self,
dist_kernel: str = 'softmax',
lambda_attention: float = 0.33,
lambda_distance: float = 0.33,
h: int = 16,
sa_hsize: int = 1024,
sa_dropout_p: float = 0.0,
output_bias: bool = True,
d_input: int = 1024,
d_hidden: int = 1024,
d_output: int = 1024,
activation: str = 'leakyrelu',
n_layers: int = 1,
ff_dropout_p: float = 0.0,
encoder_hsize: int = 1024,
encoder_dropout_p: float = 0.0):
"""Initialize a MATEncoder layer.
Parameters
----------
dist_kernel: str
Kernel activation to be used. Can be either 'softmax' for softmax or 'exp' for exponential, for the self-attention layer.
lambda_attention: float
Constant to be multiplied with the attention matrix in the self-attention layer.
lambda_distance: float
Constant to be multiplied with the distance matrix in the self-attention layer.
h: int
Number of attention heads for the self-attention layer.
sa_hsize: int
Size of dense layer in the self-attention layer.
sa_dropout_p: float
Dropout probability for the self-attention layer.
output_bias: bool
If True, dense layers will use bias vectors in the self-attention layer.
d_input: int
Size of input layer in the feed-forward layer.
d_hidden: int
Size of hidden layer in the feed-forward layer.
d_output: int
Size of output layer in the feed-forward layer.
activation: str
Activation function to be used in the feed-forward layer.
Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, 'elu' for ELU and 'linear' for linear activation.
n_layers: int
Number of layers in the feed-forward layer.
dropout_p: float
Dropout probability in the feeed-forward layer.
encoder_hsize: int
Size of Dense layer for the encoder itself.
encoder_dropout_p: float
Dropout probability for connections in the encoder layer.
"""
super(MATEncoderLayer, self).__init__()
self.self_attn = MultiHeadedMATAttention(dist_kernel, lambda_attention,
lambda_distance, h, sa_hsize,
sa_dropout_p, output_bias)
self.feed_forward = PositionwiseFeedForward(d_input, d_hidden, d_output,
activation, n_layers,
ff_dropout_p)
layer = SublayerConnection(size=encoder_hsize,
dropout_p=encoder_dropout_p)
self.sublayer = nn.ModuleList([layer for _ in range(2)])
self.size = encoder_hsize
def forward(self,
x: torch.Tensor,
mask: torch.Tensor,
adj_matrix: torch.Tensor,
distance_matrix: torch.Tensor,
sa_dropout_p: float = 0.0) -> torch.Tensor:
"""Output computation for the MATEncoder layer.
In the MATEncoderLayer intialization, self.sublayer is defined as an nn.ModuleList of 2 layers. We will be passing our computation through these layers sequentially.
nn.ModuleList is subscriptable and thus we can access it as self.sublayer[0], for example.
Parameters
----------
x: torch.Tensor
Input tensor.
mask: torch.Tensor
Masks out padding values so that they are not taken into account when computing the attention score.
adj_matrix: torch.Tensor
Adjacency matrix of a molecule.
distance_matrix: torch.Tensor
Distance matrix of a molecule.
sa_dropout_p: float
Dropout probability for the self-attention layer (MultiHeadedMATAttention).
"""
x = self.sublayer[0](x,
self.self_attn(x,
x,
x,
mask=mask,
dropout_p=sa_dropout_p,
adj_matrix=adj_matrix,
distance_matrix=distance_matrix))
return self.sublayer[1](x, self.feed_forward(x))
class SublayerConnection(nn.Module):
"""SublayerConnection layer based on the paper `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
The SublayerConnection normalizes and adds dropout to output tensor of an arbitary layer.
It further adds a residual layer connection between the input of the arbitary layer and the dropout-adjusted layer output.
Examples
--------
>>> from deepchem.models.torch_models.layers import SublayerConnection
>>> scale = 0.35
>>> layer = SublayerConnection(2, 0.)
>>> input_ar = torch.tensor([[1., 2.], [5., 6.]])
>>> output = layer(input_ar, input_ar)
"""
def __init__(self, size: int, dropout_p: float = 0.0):
"""Initialize a SublayerConnection Layer.
Parameters
----------
size: int
Size of layer.
dropout_p: float
Dropout probability.
"""
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout_p = nn.Dropout(dropout_p)
def forward(self, x: torch.Tensor, output: torch.Tensor) -> torch.Tensor:
"""Output computation for the SublayerConnection layer.
Takes an input tensor x, then adds the dropout-adjusted sublayer output for normalized x to it.
This is done to add a residual connection followed by LayerNorm.
Parameters
----------
x: torch.Tensor
Input tensor.
output: torch.Tensor
Layer whose normalized output will be added to x.
"""
if x is None:
return self.dropout_p(self.norm(output))
return x + self.dropout_p(self.norm(output))
class PositionwiseFeedForward(nn.Module):
"""PositionwiseFeedForward is a layer used to define the position-wise feed-forward (FFN) algorithm for the Molecular Attention Transformer [1]_
Each layer in the MAT encoder contains a fully connected feed-forward network which applies two linear transformations and the given activation function.
This is done in addition to the SublayerConnection module.
Note: This modified version of `PositionwiseFeedForward` class contains `dropout_at_input_no_act` condition to facilitate its use in defining
the feed-forward (FFN) algorithm for the Directed Message Passing Neural Network (D-MPNN) [2]_
References
----------
.. [1] <NAME> et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
.. [2] Analyzing Learned Molecular Representations for Property Prediction https://arxiv.org/pdf/1904.01561.pdf
Examples
--------
>>> from deepchem.models.torch_models.layers import PositionwiseFeedForward
>>> feed_fwd_layer = PositionwiseFeedForward(d_input = 2, d_hidden = 2, d_output = 2, activation = 'relu', n_layers = 1, dropout_p = 0.1)
>>> input_tensor = torch.tensor([[1., 2.], [5., 6.]])
>>> output_tensor = feed_fwd_layer(input_tensor)
"""
def __init__(self,
d_input: int = 1024,
d_hidden: int = 1024,
d_output: int = 1024,
activation: str = 'leakyrelu',
n_layers: int = 1,
dropout_p: float = 0.0,
dropout_at_input_no_act: bool = False):
"""Initialize a PositionwiseFeedForward layer.
Parameters
----------
d_input: int
Size of input layer.
d_hidden: int (same as d_input if d_output = 0)
Size of hidden layer.
d_output: int (same as d_input if d_output = 0)
Size of output layer.
activation: str
Activation function to be used. Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, 'elu' for ELU and 'linear' for linear activation.
n_layers: int
Number of layers.
dropout_p: float
Dropout probability.
dropout_at_input_no_act: bool
If true, dropout is applied on the input tensor. For single layer, it is not passed to an activation function.
"""
super(PositionwiseFeedForward, self).__init__()
self.dropout_at_input_no_act: bool = dropout_at_input_no_act
if activation == 'relu':
self.activation: Any = nn.ReLU()
elif activation == 'leakyrelu':
self.activation = nn.LeakyReLU(0.1)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'selu':
self.activation = nn.SELU()
elif activation == 'elu':
self.activation = nn.ELU()
elif activation == "linear":
self.activation = lambda x: x
self.n_layers: int = n_layers
d_output = d_output if d_output != 0 else d_input
d_hidden = d_hidden if d_hidden != 0 else d_input
if n_layers == 1:
self.linears: Any = [nn.Linear(d_input, d_output)]
else:
self.linears = [nn.Linear(d_input, d_hidden)] + \
[nn.Linear(d_hidden, d_hidden) for _ in range(n_layers - 2)] + \
[nn.Linear(d_hidden, d_output)]
self.linears = nn.ModuleList(self.linears)
dropout_layer = nn.Dropout(dropout_p)
self.dropout_p = nn.ModuleList([dropout_layer for _ in range(n_layers)])
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Output Computation for the PositionwiseFeedForward layer.
Parameters
----------
x: torch.Tensor
Input tensor.
"""
if not self.n_layers:
return x
if self.n_layers == 1:
if self.dropout_at_input_no_act:
return self.linears[0](self.dropout_p[0](x))
else:
return self.dropout_p[0](self.activation(self.linears[0](x)))
else:
if self.dropout_at_input_no_act:
x = self.dropout_p[0](x)
for i in range(self.n_layers - 1):
x = self.dropout_p[i](self.activation(self.linears[i](x)))
return self.linears[-1](x)
class MATEmbedding(nn.Module):
"""Embedding layer to create embedding for inputs.
In an embedding layer, input is taken and converted to a vector representation for each input.
In the MATEmbedding layer, an input tensor is processed through a dropout-adjusted linear layer and the resultant vector is returned.
References
----------
.. [1] <NAME> et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> from deepchem.models.torch_models.layers import MATEmbedding
>>> layer = MATEmbedding(d_input = 3, d_output = 3, dropout_p = 0.2)
>>> input_tensor = torch.tensor([1., 2., 3.])
>>> output = layer(input_tensor)
"""
def __init__(self,
d_input: int = 36,
d_output: int = 1024,
dropout_p: float = 0.0):
"""Initialize a MATEmbedding layer.
Parameters
----------
d_input: int
Size of input layer.
d_output: int
Size of output layer.
dropout_p: float
Dropout probability for layer.
"""
super(MATEmbedding, self).__init__()
self.linear_unit = nn.Linear(d_input, d_output)
self.dropout = nn.Dropout(dropout_p)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Computation for the MATEmbedding layer.
Parameters
----------
x: torch.Tensor
Input tensor to be converted into a vector.
"""
return self.dropout(self.linear_unit(x))
class MATGenerator(nn.Module):
"""MATGenerator defines the linear and softmax generator step for the Molecular Attention Transformer [1]_.
In the MATGenerator, a Generator is defined which performs the Linear + Softmax generation step.
Depending on the type of aggregation selected, the attention output layer performs different operations.
References
----------
.. [1] <NAME> et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> from deepchem.models.torch_models.layers import MATGenerator
>>> layer = MATGenerator(hsize = 3, aggregation_type = 'mean', d_output = 1, n_layers = 1, dropout_p = 0.3, attn_hidden = 128, attn_out = 4)
>>> input_tensor = torch.tensor([1., 2., 3.])
>>> mask = torch.tensor([1., 1., 1.])
>>> output = layer(input_tensor, mask)
"""
def __init__(self,
hsize: int = 1024,
aggregation_type: str = 'mean',
d_output: int = 1,
n_layers: int = 1,
dropout_p: float = 0.0,
attn_hidden: int = 128,
attn_out: int = 4):
"""Initialize a MATGenerator.
Parameters
----------
hsize: int
Size of input layer.
aggregation_type: str
Type of aggregation to be used. Can be 'grover', 'mean' or 'contextual'.
d_output: int
Size of output layer.
n_layers: int
Number of layers in MATGenerator.
dropout_p: float
Dropout probability for layer.
attn_hidden: int
Size of hidden attention layer.
attn_out: int
Size of output attention layer.
"""
super(MATGenerator, self).__init__()
if aggregation_type == 'grover':
self.att_net = nn.Sequential(
nn.Linear(hsize, attn_hidden, bias=False),
nn.Tanh(),
nn.Linear(attn_hidden, attn_out, bias=False),
)
hsize *= attn_out
if n_layers == 1:
self.proj: Any = nn.Linear(hsize, d_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(hsize, attn_hidden))
self.proj.append(nn.LeakyReLU(negative_slope=0.1))
self.proj.append(nn.LayerNorm(attn_hidden))
self.proj.append(nn.Dropout(dropout_p))
self.proj.append(nn.Linear(attn_hidden, d_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, x: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""Computation for the MATGenerator layer.
Parameters
----------
x: torch.Tensor
Input tensor.
mask: torch.Tensor
Mask for padding so that padded values do not get included in attention score calculation.
"""
mask = mask.unsqueeze(-1).float()
out_masked = x * mask
if self.aggregation_type == 'mean':
out_sum = out_masked.sum(dim=1)
mask_sum = mask.sum(dim=(1))
out_avg_pooling = out_sum / mask_sum
elif self.aggregation_type == 'grover':
out_attn = self.att_net(out_masked)
out_attn = out_attn.masked_fill(mask == 0, -1e9)
out_attn = F.softmax(out_attn, dim=1)
out_avg_pooling = torch.matmul(torch.transpose(out_attn, -1, -2),
out_masked)
out_avg_pooling = out_avg_pooling.view(out_avg_pooling.size(0), -1)
elif self.aggregation_type == 'contextual':
out_avg_pooling = x
return self.proj(out_avg_pooling)
class GraphNetwork(torch.nn.Module):
"""Graph Networks
A Graph Network [1]_ takes a graph as input and returns an updated graph
as output. The output graph has same structure as input graph but it
has updated node features, edge features and global state features.
Parameters
----------
n_node_features: int
Number of features in a node
n_edge_features: int
Number of features in a edge
n_global_features: int
Number of global features
is_undirected: bool, optional (default True)
Directed or undirected graph
residual_connection: bool, optional (default True)
If True, the layer uses a residual connection during training
Example
-------
>>> import torch
>>> from deepchem.models.torch_models.layers import GraphNetwork as GN
>>> n_nodes, n_node_features = 5, 10
>>> n_edges, n_edge_features = 5, 2
>>> n_global_features = 4
>>> node_features = torch.randn(n_nodes, n_node_features)
>>> edge_features = torch.randn(n_edges, n_edge_features)
>>> edge_index = torch.tensor([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]).long()
>>> global_features = torch.randn(1, n_global_features)
>>> gn = GN(n_node_features=n_node_features, n_edge_features=n_edge_features, n_global_features=n_global_features)
>>> node_features, edge_features, global_features = gn(node_features, edge_index, edge_features, global_features)
References
----------
.. [1] Battaglia et al, Relational inductive biases, deep learning, and graph networks. https://arxiv.org/abs/1806.01261 (2018)
"""
def __init__(self,
n_node_features: int = 32,
n_edge_features: int = 32,
n_global_features: int = 32,
is_undirected: bool = True,
residual_connection: bool = True):
super().__init__()
self.n_node_features = n_node_features
self.n_edge_features = n_edge_features
self.n_global_features = n_global_features
self.is_undirected = is_undirected
self.residual_connection = residual_connection
self.edge_models, self.node_models, self.global_models = torch.nn.ModuleList(
), torch.nn.ModuleList(), torch.nn.ModuleList()
self.edge_models.append(
nn.Linear(in_features=n_node_features * 2 + n_edge_features +
n_global_features,
out_features=32))
self.node_models.append(
nn.Linear(in_features=n_node_features + n_edge_features +
n_global_features,
out_features=32))
self.global_models.append(
nn.Linear(in_features=n_node_features + n_edge_features +
n_global_features,
out_features=32))
# Used for converting edges back to their original shape
self.edge_dense = nn.Linear(in_features=32,
out_features=n_edge_features)
self.node_dense = nn.Linear(in_features=32,
out_features=n_node_features)
self.global_dense = nn.Linear(in_features=32,
out_features=n_global_features)
def reset_parameters(self) -> None:
self.edge_dense.reset_parameters()
self.node_dense.reset_parameters()
self.global_dense.reset_parameters()
for i in range(0, len(self.edge_models)):
self.edge_models[i].reset_parameters()
for i in range(0, len(self.node_models)):
self.node_models[i].reset_parameters()
for i in range(0, len(self.global_models)):
self.global_models[i].reset_parameters()
def _update_edge_features(self, node_features, edge_index, edge_features,
global_features, batch):
src_index, dst_index = edge_index
out = torch.cat((node_features[src_index], node_features[dst_index],
edge_features, global_features[batch]),
dim=1)
assert out.shape[
1] == self.n_node_features * 2 + self.n_edge_features + self.n_global_features
for model in self.edge_models:
out = model(out)
return self.edge_dense(out)
def _update_node_features(self, node_features, edge_index, edge_features,
global_features, batch):
src_index, dst_index = edge_index
# Compute mean edge features for each node by dst_index (each node
# receives information from edges which have that node as its destination,
# hence the computation uses dst_index to aggregate information)
edge_features_mean_by_node = scatter(src=edge_features,
index=dst_index,
dim=0,
reduce='mean')
out = torch.cat(
(node_features, edge_features_mean_by_node, global_features[batch]),
dim=1)
for model in self.node_models:
out = model(out)
return self.node_dense(out)
def _update_global_features(self, node_features, edge_features,
global_features, node_batch_map,
edge_batch_map):
edge_features_mean = scatter(src=edge_features,
index=edge_batch_map,
dim=0,
reduce='mean')
node_features_mean = scatter(src=node_features,
index=node_batch_map,
dim=0,
reduce='mean')
out = torch.cat(
(edge_features_mean, node_features_mean, global_features), dim=1)
for model in self.global_models:
out = model(out)
return self.global_dense(out)
def forward(
self,
node_features: Tensor,
edge_index: Tensor,
edge_features: Tensor,
global_features: Tensor,
batch: Optional[Tensor] = None) -> Tuple[Tensor, Tensor, Tensor]:
"""Output computation for a GraphNetwork
Parameters
----------
node_features: torch.Tensor
Input node features of shape :math:`(|\mathcal{V}|, F_n)`
edge_index: torch.Tensor
Edge indexes of shape :math:`(2, |\mathcal{E}|)`
edge_features: torch.Tensor
Edge features of the graph, shape: :math:`(|\mathcal{E}|, F_e)`
global_features: torch.Tensor
Global features of the graph, shape: :math:`(F_g, 1)` where, :math:`|\mathcal{V}|` and :math:`|\mathcal{E}|` denotes the number of nodes and edges in the graph,
:math:`F_n`, :math:`F_e`, :math:`F_g` denotes the number of node features, edge features and global state features respectively.
batch: torch.LongTensor (optional, default: None)
A vector that maps each node to its respective graph identifier. The attribute is used only when more than one graph are batched together during a single forward pass.
"""
if batch is None:
batch = node_features.new_zeros(node_features.size(0),
dtype=torch.int64)
node_features_copy, edge_features_copy, global_features_copy = node_features, edge_features, global_features
if self.is_undirected is True:
# holding bi-directional edges in case of undirected graphs
edge_index = torch.cat((edge_index, edge_index.flip([0])), dim=1)
edge_features_len = edge_features.shape[0]
edge_features = torch.cat((edge_features, edge_features), dim=0)
edge_batch_map = batch[edge_index[0]]
edge_features = self._update_edge_features(node_features, edge_index,
edge_features,
global_features,
edge_batch_map)
node_features = self._update_node_features(node_features, edge_index,
edge_features,
global_features, batch)
global_features = self._update_global_features(node_features,
edge_features,
global_features, batch,
edge_batch_map)
if self.is_undirected is True:
# coonverting edge features to its original shape
split = torch.split(edge_features,
[edge_features_len, edge_features_len])
edge_features = (split[0] + split[1]) / 2
if self.residual_connection:
edge_features += edge_features_copy
node_features += node_features_copy
global_features += global_features_copy
return node_features, edge_features, global_features
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}(n_node_features={self.n_node_features}, n_edge_features={self.n_edge_features}, n_global_features={self.n_global_features}, is_undirected={self.is_undirected}, residual_connection={self.residual_connection})'
)
class Affine(nn.Module):
"""Class which performs the Affine transformation.
This transformation is based on the affinity of the base distribution with
the target distribution. A geometric transformation is applied where
the parameters performs changes on the scale and shift of a function
(inputs).
Normalizing Flow transformations must be bijective in order to compute
the logarithm of jacobian's determinant. For this reason, transformations
must perform a forward and inverse pass.
Example
--------
>>> import deepchem as dc
>>> from deepchem.models.torch_models.layers import Affine
>>> import torch
>>> from torch.distributions import MultivariateNormal
>>> # initialize the transformation layer's parameters
>>> dim = 2
>>> samples = 96
>>> transforms = Affine(dim)
>>> # forward pass based on a given distribution
>>> distribution = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
>>> input = distribution.sample(torch.Size((samples, dim)))
>>> len(transforms.forward(input))
2
>>> # inverse pass based on a distribution
>>> len(transforms.inverse(input))
2
"""
def __init__(self, dim: int) -> None:
"""Create a Affine transform layer.
Parameters
----------
dim: int
Value of the Nth dimension of the dataset.
"""
super().__init__()
self.dim = dim
self.scale = nn.Parameter(torch.zeros(self.dim))
self.shift = nn.Parameter(torch.zeros(self.dim))
def forward(self, x: Sequence) -> Tuple[torch.Tensor, torch.Tensor]:
"""Performs a transformation between two different distributions. This
particular transformation represents the following function:
y = x * exp(a) + b, where a is scale parameter and b performs a shift.
This class also returns the logarithm of the jacobians determinant
which is useful when invert a transformation and compute the
probability of the transformation.
Parameters
----------
x : Sequence
Tensor sample with the initial distribution data which will pass into
the normalizing flow algorithm.
Returns
-------
y : torch.Tensor
Transformed tensor according to Affine layer with the shape of 'x'.
log_det_jacobian : torch.Tensor
Tensor which represents the info about the deviation of the initial
and target distribution.
"""
y = torch.exp(self.scale) * x + self.shift
det_jacobian = torch.exp(self.scale.sum())
log_det_jacobian = torch.ones(y.shape[0]) * torch.log(det_jacobian)
return y, log_det_jacobian
def inverse(self, y: Sequence) -> Tuple[torch.Tensor, torch.Tensor]:
"""Performs a transformation between two different distributions.
This transformation represents the bacward pass of the function
mention before. Its mathematical representation is x = (y - b) / exp(a)
, where "a" is scale parameter and "b" performs a shift. This class
also returns the logarithm of the jacobians determinant which is
useful when invert a transformation and compute the probability of
the transformation.
Parameters
----------
y : Sequence
Tensor sample with transformed distribution data which will be used in
the normalizing algorithm inverse pass.
Returns
-------
x : torch.Tensor
Transformed tensor according to Affine layer with the shape of 'y'.
inverse_log_det_jacobian : torch.Tensor
Tensor which represents the information of the deviation of the initial
and target distribution.
"""
x = (y - self.shift) / torch.exp(self.scale)
det_jacobian = 1 / torch.exp(self.scale.sum())
inverse_log_det_jacobian = torch.ones(
x.shape[0]) * torch.log(det_jacobian)
return x, inverse_log_det_jacobian
class DMPNNEncoderLayer(nn.Module):
"""
Encoder layer for use in the Directed Message Passing Neural Network (D-MPNN) [1]_.
The role of the DMPNNEncoderLayer class is to generate molecule encodings in following steps:
- Message passing phase
- Get new atom hidden states and readout phase
- Concatenate the global features
Let the diagram given below represent a molecule containing 5 atoms (nodes) and 4 bonds (edges):-
| 1 --- 5
| |
| 2 --- 4
| |
| 3
Let the bonds from atoms 1->2 (**B[12]**) and 2->1 (**B[21]**) be considered as 2 different bonds.
Hence, by considering the same for all atoms, the total number of bonds = 8.
Let:
- **atom features** : ``a1, a2, a3, a4, a5``
- **hidden states of atoms** : ``h1, h2, h3, h4, h5``
- **bond features bonds** : ``b12, b21, b23, b32, b24, b42, b15, b51``
- **initial hidden states of bonds** : ``(0)h12, (0)h21, (0)h23, (0)h32, (0)h24, (0)h42, (0)h15, (0)h51``
The hidden state of every bond is a function of the concatenated feature vector which contains
concatenation of the **features of initial atom of the bond** and **bond features**.
Example: ``(0)h21 = func1(concat(a2, b21))``
.. note::
Here func1 is ``self.W_i``
**The Message passing phase**
The goal of the message-passing phase is to generate **hidden states of all the atoms in the molecule**.
The hidden state of an atom is **a function of concatenation of atom features and messages (at T depth)**.
A message is a sum of **hidden states of bonds coming to the atom (at T depth)**.
.. note::
Depth refers to the number of iterations in the message passing phase (here, T iterations). After each iteration, the hidden states of the bonds are updated.
Example:
``h1 = func3(concat(a1, m1))``
.. note::
Here func3 is ``self.W_o``.
`m1` refers to the message coming to the atom.
``m1 = (T-1)h21 + (T-1)h51``
(hidden state of bond 2->1 + hidden state of bond 5->1) (at T depth)
for, depth T = 2:
- the hidden states of the bonds @ 1st iteration will be => (0)h21, (0)h51
- the hidden states of the bonds @ 2nd iteration will be => (1)h21, (1)h51
The hidden states of the bonds in 1st iteration are already know.
For hidden states of the bonds in 2nd iteration, we follow the criterion that:
- hidden state of the bond is a function of **initial hidden state of bond**
and **messages coming to that bond in that iteration**
Example:
``(1)h21 = func2( (0)h21 , (1)m21 )``
.. note::
Here func2 is ``self.W_h``.
`(1)m21` refers to the messages coming to that bond 2->1 in that 2nd iteration.
Messages coming to a bond in an iteration is **a sum of hidden states of bonds (from previous iteration) coming to this bond**.
Example:
``(1)m21 = (0)h32 + (0)h42``
| 2 <--- 3
| ^
| |
| 4
**Computing the messages**
.. code-block:: python
B0 B1 B2 B3 B4 B5 B6 B7 B8
f_ini_atoms_bonds = [(0)h12, (0)h21, (0)h23, (0)h32, (0)h24, (0)h42, (0)h15, (0)h51, h(-1)]
.. note::
h(-1) is an empty array of the same size as other hidden states of bond states.
.. code-block:: python
B0 B1 B2 B3 B4 B5 B6 B7 B8
mapping = [ [-1,B7] [B3,B5] [B0,B5] [-1,-1] [B0,B3] [-1,-1] [B1,-1] [-1,-1] [-1,-1] ]
Later, the encoder will map the concatenated features from the ``f_ini_atoms_bonds``
to ``mapping`` in each iteration upto Tth iteration.
Next the encoder will sum-up the concat features within same bond index.
.. code-block:: python
(1)m12 (1)m21 (1)m23 (1)m32 (1)m24 (1)m42 (1)m15 (1)m51 m(-1)
message = [ [h(-1) + (0)h51] [(0)h32 + (0)h42] [(0)h12 + (0)h42] [h(-1) + h(-1)] [(0)h12 + (0)h32] [h(-1) + h(-1)] [(0)h21 + h(-1)] [h(-1) + h(-1)] [h(-1) + h(-1)] ]
Hence, this is how encoder can get messages for message-passing steps.
**Get new atom hidden states and readout phase**
Hence now for h1:
.. code-block:: python
h1 = func3(
concat(
a1,
[
func2( (0)h21 , (0)h32 + (0)h42 ) +
func2( (0)h51 , 0 )
]
)
)
Similarly, h2, h3, h4 and h5 are calculated.
Next, all atom hidden states are concatenated to make a feature vector of the molecule:
``mol_encodings = [[h1, h2, h3, h4, h5]]``
**Concatenate the global features**
Let,
``global_features = [[gf1, gf2, gf3]]``
This array contains molecule level features. In case of this example, it contains 3 global features.
Hence after concatenation,
``mol_encodings = [[h1, h2, h3, h4, h5, gf1, gf2, gf3]]``
(Final output of the encoder)
References
----------
.. [1] Analyzing Learned Molecular Representations for Property Prediction https://arxiv.org/pdf/1904.01561.pdf
Examples
--------
>>> from rdkit import Chem
>>> import torch
>>> import deepchem as dc
>>> input_smile = "CC"
>>> feat = dc.feat.DMPNNFeaturizer(features_generators=['morgan'])
>>> graph = feat.featurize(input_smile)
>>> from deepchem.models.torch_models.dmpnn import _MapperDMPNN
>>> mapper = _MapperDMPNN(graph[0])
>>> atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds, mapping, global_features = mapper.values
>>> atom_features = torch.from_numpy(atom_features).float()
>>> f_ini_atoms_bonds = torch.from_numpy(f_ini_atoms_bonds).float()
>>> atom_to_incoming_bonds = torch.from_numpy(atom_to_incoming_bonds)
>>> mapping = torch.from_numpy(mapping)
>>> global_features = torch.from_numpy(global_features).float()
>>> molecules_unbatch_key = len(atom_features)
>>> layer = DMPNNEncoderLayer(d_hidden=2)
>>> output = layer(atom_features, f_ini_atoms_bonds, atom_to_incoming_bonds, mapping, global_features, molecules_unbatch_key)
"""
def __init__(self,
use_default_fdim: bool = True,
atom_fdim: int = 133,
bond_fdim: int = 14,
d_hidden: int = 300,
depth: int = 3,
bias: bool = False,
activation: str = 'relu',
dropout_p: float = 0.0,
aggregation: str = 'mean',
aggregation_norm: Union[int, float] = 100):
"""Initialize a DMPNNEncoderLayer layer.
Parameters
----------
use_default_fdim: bool
If ``True``, ``self.atom_fdim`` and ``self.bond_fdim`` are initialized using values from the GraphConvConstants class. If ``False``, ``self.atom_fdim`` and ``self.bond_fdim`` are initialized from the values provided.
atom_fdim: int
Dimension of atom feature vector.
bond_fdim: int
Dimension of bond feature vector.
d_hidden: int
Size of hidden layer in the encoder layer.
depth: int
No of message passing steps.
bias: bool
If ``True``, dense layers will use bias vectors.
activation: str
Activation function to be used in the encoder layer.
Can choose between 'relu' for ReLU, 'leakyrelu' for LeakyReLU, 'prelu' for PReLU,
'tanh' for TanH, 'selu' for SELU, and 'elu' for ELU.
dropout_p: float
Dropout probability for the encoder layer.
aggregation: str
Aggregation type to be used in the encoder layer.
Can choose between 'mean', 'sum', and 'norm'.
aggregation_norm: Union[int, float]
Value required if `aggregation` type is 'norm'.
"""
super(DMPNNEncoderLayer, self).__init__()
if use_default_fdim:
from deepchem.feat.molecule_featurizers.dmpnn_featurizer import GraphConvConstants
self.atom_fdim: int = GraphConvConstants.ATOM_FDIM
self.concat_fdim: int = GraphConvConstants.ATOM_FDIM + GraphConvConstants.BOND_FDIM
else:
self.atom_fdim = atom_fdim
self.concat_fdim = atom_fdim + bond_fdim
self.depth: int = depth
self.aggregation: str = aggregation
self.aggregation_norm: Union[int, float] = aggregation_norm
if activation == 'relu':
self.activation: nn.modules.activation.Module = nn.ReLU()
elif activation == 'leakyrelu':
self.activation = nn.LeakyReLU(0.1)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'selu':
self.activation = nn.SELU()
elif activation == 'elu':
self.activation = nn.ELU()
self.dropout: nn.modules.dropout.Module = nn.Dropout(dropout_p)
# Input
self.W_i: nn.Linear = nn.Linear(self.concat_fdim, d_hidden, bias=bias)
# Shared weight matrix across depths (default):
# For messages hidden states
self.W_h: nn.Linear = nn.Linear(d_hidden, d_hidden, bias=bias)
# For atom hidden states
self.W_o: nn.Linear = nn.Linear(self.atom_fdim + d_hidden, d_hidden)
def _get_updated_atoms_hidden_state(
self, atom_features: torch.Tensor, h_message: torch.Tensor,
atom_to_incoming_bonds: torch.Tensor) -> torch.Tensor:
"""
Method to compute atom hidden states.
Parameters
----------
atom_features: torch.Tensor
Tensor containing atoms features.
h_message: torch.Tensor
Tensor containing hidden states of messages.
atom_to_incoming_bonds: torch.Tensor
Tensor containing mapping from atom index to list of indicies of incoming bonds.
Returns
-------
atoms_hidden_states: torch.Tensor
Tensor containing atom hidden states.
"""
messages_to_atoms: torch.Tensor = h_message[atom_to_incoming_bonds].sum(
1) # num_atoms x hidden_size
atoms_hidden_states: torch.Tensor = self.W_o(
torch.cat((atom_features, messages_to_atoms),
1)) # num_atoms x hidden_size
atoms_hidden_states = self.activation(
atoms_hidden_states) # num_atoms x hidden_size
atoms_hidden_states = self.dropout(
atoms_hidden_states) # num_atoms x hidden_size
return atoms_hidden_states # num_atoms x hidden_size
def _readout(self, atoms_hidden_states: torch.Tensor,
molecules_unbatch_key: List) -> torch.Tensor:
"""
Method to execute the readout phase. (compute molecules encodings from atom hidden states)
Parameters
----------
atoms_hidden_states: torch.Tensor
Tensor containing atom hidden states.
molecules_unbatch_key: List
List containing number of atoms in various molecules of a batch
Returns
-------
molecule_hidden_state: torch.Tensor
Tensor containing molecule encodings.
"""
mol_vecs: List = []
atoms_hidden_states_split: Sequence[Tensor] = torch.split(
atoms_hidden_states, molecules_unbatch_key)
mol_vec: torch.Tensor
for mol_vec in atoms_hidden_states_split:
if self.aggregation == 'mean':
mol_vec = mol_vec.sum(dim=0) / len(mol_vec)
elif self.aggregation == 'sum':
mol_vec = mol_vec.sum(dim=0)
elif self.aggregation == 'norm':
mol_vec = mol_vec.sum(dim=0) / self.aggregation_norm
else:
raise Exception("Invalid aggregation")
mol_vecs.append(mol_vec)
molecule_hidden_state: torch.Tensor = torch.stack(mol_vecs, dim=0)
return molecule_hidden_state # num_molecules x hidden_size
def forward(self, atom_features: torch.Tensor,
f_ini_atoms_bonds: torch.Tensor,
atom_to_incoming_bonds: torch.Tensor, mapping: torch.Tensor,
global_features: torch.Tensor,
molecules_unbatch_key: List) -> torch.Tensor:
"""
Output computation for the DMPNNEncoderLayer.
Steps:
- Get original bond hidden states from concatenation of initial atom and bond features. (``input``)
- Get initial messages hidden states. (``message``)
- Execute message passing step for ``self.depth - 1`` iterations.
- Get atom hidden states using atom features and message hidden states.
- Get molecule encodings.
- Concatenate global molecular features and molecule encodings.
Parameters
----------
atom_features: torch.Tensor
Tensor containing atoms features.
f_ini_atoms_bonds: torch.Tensor
Tensor containing concatenated feature vector which contains concatenation of initial atom and bond features.
atom_to_incoming_bonds: torch.Tensor
Tensor containing mapping from atom index to list of indicies of incoming bonds.
mapping: torch.Tensor
Tensor containing the mapping that maps bond index to 'array of indices of the bonds'
incoming at the initial atom of the bond (excluding the reverse bonds).
global_features: torch.Tensor
Tensor containing molecule features.
molecules_unbatch_key: List
List containing number of atoms in various molecules of a batch
Returns
-------
output: torch.Tensor
Tensor containing the encodings of the molecules.
"""
input: torch.Tensor = self.W_i(
f_ini_atoms_bonds) # num_bonds x hidden_size
message: torch.Tensor = self.activation(
input) # num_bonds x hidden_size
for _ in range(1, self.depth):
message = message[mapping].sum(1) # num_bonds x hidden_size
h_message: torch.Tensor = input + self.W_h(
message) # num_bonds x hidden_size
h_message = self.activation(h_message) # num_bonds x hidden_size
h_message = self.dropout(h_message) # num_bonds x hidden_size
# num_atoms x hidden_size
atoms_hidden_states: torch.Tensor = self._get_updated_atoms_hidden_state(
atom_features, h_message, atom_to_incoming_bonds)
# num_molecules x hidden_size
output: torch.Tensor = self._readout(atoms_hidden_states,
molecules_unbatch_key)
# concat global features
if global_features.size()[0] != 0:
if len(global_features.shape) == 1:
global_features = global_features.view(len(output), -1)
output = torch.cat([output, global_features], dim=1)
return output # num_molecules x hidden_size
class InteratomicL2Distances(nn.Module):
"""Compute (squared) L2 Distances between atoms given neighbors.
This class is the pytorch implementation of the original InteratomicL2Distances layer implemented in Keras.
Pairwise distance (L2) is calculated between input atoms, given the number of neighbors to consider, along with the number of descriptors for every atom.
Examples
--------
>>> atoms = 5
>>> neighbors = 2
>>> coords = np.random.rand(atoms, 3)
>>> neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))
>>> layer = InteratomicL2Distances(atoms, neighbors, 3)
>>> result = np.array(layer([coords, neighbor_list]))
>>> result.shape
(5, 2)
"""
def __init__(self, N_atoms: int, M_nbrs: int, ndim: int, **kwargs):
"""Constructor for this layer.
Parameters
----------
N_atoms: int
Number of atoms in the system total.
M_nbrs: int
Number of neighbors to consider when computing distances.
n_dim: int
Number of descriptors for each atom.
"""
super(InteratomicL2Distances, self).__init__(**kwargs)
self.N_atoms = N_atoms
self.M_nbrs = M_nbrs
self.ndim = ndim
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}(N_atoms={self.N_atoms}, M_nbrs={self.M_nbrs}, ndim={self.ndim})'
)
def forward(
self, inputs: List[Union[torch.Tensor,
List[Union[int, float]]]]) -> torch.Tensor:
"""Invokes this layer.
Parameters
----------
inputs: list
Should be of form `inputs=[coords, nbr_list]` where `coords` is a
tensor of shape `(None, N, 3)` and `nbr_list` is a list.
Returns
-------
Tensor of shape `(N_atoms, M_nbrs)` with interatomic distances.
"""
if len(inputs) != 2:
raise ValueError("InteratomicDistances requires coords,nbr_list")
coords, nbr_list = (torch.tensor(inputs[0]), torch.tensor(inputs[1]))
N_atoms, M_nbrs, ndim = self.N_atoms, self.M_nbrs, self.ndim
# Shape (N_atoms, M_nbrs, ndim)
nbr_coords = coords[nbr_list.long()]
# Shape (N_atoms, M_nbrs, ndim)
tiled_coords = torch.tile(torch.reshape(coords, (N_atoms, 1, ndim)),
(1, M_nbrs, 1))
# Shape (N_atoms, M_nbrs)
return torch.sum((tiled_coords - nbr_coords)**2, dim=2)
class RealNVPLayer(nn.Module):
"""Real NVP Transformation Layer
This class class is a constructor transformation layer used on a
NormalizingFLow model. The Real Non-Preserving-Volumen (Real NVP) is a type
of normalizing flow layer which gives advantages over this mainly because an
ease to compute the inverse pass [1]_, this is to learn a target
distribution.
Example
-------
>>> import torch
>>> import torch.nn as nn
>>> from torch.distributions import MultivariateNormal
>>> from deepchem.models.torch_models.layers import RealNVPLayer
>>> dim = 2
>>> samples = 96
>>> data = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
>>> tensor = data.sample(torch.Size((samples, dim)))
>>> layers = 4
>>> hidden_size = 16
>>> masks = F.one_hot(torch.tensor([i % 2 for i in range(layers)])).float()
>>> layers = nn.ModuleList([RealNVPLayer(mask, hidden_size) for mask in masks])
>>> for layer in layers:
... _, inverse_log_det_jacobian = layer.inverse(tensor)
... inverse_log_det_jacobian = inverse_log_det_jacobian.detach().numpy()
>>> len(inverse_log_det_jacobian)
96
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2021). Resampling Base
Distributions of Normalizing Flows. (2017). Retrieved from http://arxiv.org/abs/2110.15828
"""
def __init__(self, mask: torch.Tensor, hidden_size: int) -> None:
"""
Parameters
-----------
mask: torch.Tensor
Tensor with zeros and ones and its size depende on the number of layers
and dimenssions the user request.
hidden_size: int
The size of the outputs and inputs used on the internal nodes of the
transformation layer.
"""
super(RealNVPLayer, self).__init__()
self.mask = nn.Parameter(mask, requires_grad=False)
self.dim = len(mask)
self.s_func = nn.Sequential(
nn.Linear(in_features=self.dim, out_features=hidden_size),
nn.LeakyReLU(),
nn.Linear(in_features=hidden_size, out_features=hidden_size),
nn.LeakyReLU(),
nn.Linear(in_features=hidden_size, out_features=self.dim))
self.scale = nn.Parameter(torch.Tensor(self.dim))
self.t_func = nn.Sequential(
nn.Linear(in_features=self.dim, out_features=hidden_size),
nn.LeakyReLU(),
nn.Linear(in_features=hidden_size, out_features=hidden_size),
nn.LeakyReLU(),
nn.Linear(in_features=hidden_size, out_features=self.dim))
def forward(self, x: Sequence) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward pass.
This particular transformation is represented by the following function:
y = x + (1 - x) * exp( s(x)) + t(x), where t and s needs an activation
function. This class also returns the logarithm of the jacobians
determinant which is useful when invert a transformation and compute
the probability of the transformation.
Parameters
----------
x : Sequence
Tensor sample with the initial distribution data which will pass into
the normalizing algorithm
Returns
-------
y : torch.Tensor
Transformed tensor according to Real NVP layer with the shape of 'x'.
log_det_jacobian : torch.Tensor
Tensor which represents the info about the deviation of the initial
and target distribution.
"""
x_mask = x * self.mask
s = self.s_func(x_mask) * self.scale
t = self.t_func(x_mask)
y = x_mask + (1 - self.mask) * (x * torch.exp(s) + t)
log_det_jacobian = ((1 - self.mask) * s).sum(-1)
return y, log_det_jacobian
def inverse(self, y: Sequence) -> Tuple[torch.Tensor, torch.Tensor]:
""" Inverse pass
This class performs the inverse of the previous method (formward).
Also, this metehod returns the logarithm of the jacobians determinant
which is useful to compute the learneable features of target distribution.
Parameters
----------
y : Sequence
Tensor sample with transformed distribution data which will be used in
the normalizing algorithm inverse pass.
Returns
-------
x : torch.Tensor
Transformed tensor according to Real NVP layer with the shape of 'y'.
inverse_log_det_jacobian : torch.Tensor
Tensor which represents the information of the deviation of the initial
and target distribution.
"""
y_mask = y * self.mask
s = self.s_func(y_mask) * self.scale
t = self.t_func(y_mask)
x = y_mask + (1 - self.mask) * (y - t) * torch.exp(-s)
inverse_log_det_jacobian = ((1 - self.mask) * -s).sum(-1)
return x, inverse_log_det_jacobian
class NeighborList(nn.Module):
"""Computes a neighbor-list in PyTorch.
Neighbor-lists (also called Verlet Lists) are a tool for grouping
atoms which are close to each other spatially. This layer computes a
Neighbor List from a provided tensor of atomic coordinates. You can
think of this as a general "k-means" layer, but optimized for the
case `k==3`.
Examples
--------
>>> N_atoms = 5
>>> start = 0
>>> stop = 12
>>> nbr_cutoff = 3
>>> ndim = 3
>>> M_nbrs = 2
>>> coords = start + np.random.rand(N_atoms, ndim) * (stop - start)
>>> coords = torch.as_tensor(coords, dtype=torch.float)
>>> layer = NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff, start,
... stop)
>>> result = layer(coords)
>>> result.shape
torch.Size([5, 2])
TODO(rbharath): Make this layer support batching.
"""
def __init__(self, N_atoms: int, M_nbrs: int, ndim: int,
nbr_cutoff: Union[int,
float], start: int, stop: int, **kwargs):
"""
Parameters
----------
N_atoms: int
Maximum number of atoms this layer will neighbor-list.
M_nbrs: int
Maximum number of spatial neighbors possible for atom.
ndim: int
Dimensionality of space atoms live in. (Typically 3D, but sometimes will
want to use higher dimensional descriptors for atoms).
nbr_cutoff: int or float
Length in Angstroms (?) at which atom boxes are gridded.
start: int
Start of range for the box in which the locations of all grid points will be calculated in `self.get_cells()`.
stop: int
End of range for the box in which the locations of all grid points will be calculated in `self.get_cells()`.
"""
super(NeighborList, self).__init__(**kwargs)
self.N_atoms = N_atoms
self.M_nbrs = M_nbrs
self.ndim = ndim
# Number of grid cells
n_cells = int(((stop - start) / nbr_cutoff)**ndim)
self.n_cells = n_cells
self.nbr_cutoff = nbr_cutoff
self.start = start
self.stop = stop
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}(N_atoms={self.N_atoms}, M_nbrs={self.M_nbrs}, ndim={self.ndim}, n_cells={self.n_cells}, nbr_cutoff={self.nbr_cutoff}, start={self.start}, stop={self.stop})'
)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Invokes this layer.
Parameters
----------
inputs: torch.Tensor
Shape (num_atoms, ndim)
Returns
-------
neighbor_list: torch.Tensor
Shape `(N_atoms, M_nbrs)`
"""
if isinstance(inputs, SequenceCollection):
if len(inputs) != 1:
raise ValueError("NeighborList can only have one input")
inputs = inputs[0]
if len(inputs.shape) != 2:
# TODO(rbharath): Support batching
raise ValueError("Parent tensor must be (num_atoms, ndum)")
return self.compute_nbr_list(inputs)
def compute_nbr_list(self, coords: torch.Tensor) -> torch.Tensor:
"""Get closest neighbors for atoms.
Needs to handle padding for atoms with no neighbors.
Parameters
----------
coords: torch.Tensor
Shape (N_atoms, ndim)
Returns
-------
nbr_list: torch.Tensor
Shape (N_atoms, M_nbrs) of atom indices
"""
# Shape (n_cells, ndim)
cells = self.get_cells()
# List of length N_atoms, each element of different length uniques_i
nbrs = self.get_atoms_in_nbrs(coords, cells)
padding = torch.full((self.M_nbrs,), -1)
padded_nbrs = [
torch.concat([unique_nbrs, padding], 0) for unique_nbrs in nbrs
]
# List of length N_atoms, each element of different length uniques_i
# List of length N_atoms, each a tensor of shape
# (uniques_i, ndim)
nbr_coords = [
torch.index_select(coords, 0, atom_nbrs) for atom_nbrs in nbrs
]
# Add phantom atoms that exist far outside the box
coord_padding = torch.full((self.M_nbrs, self.ndim),
2 * self.stop).to(torch.float)
padded_nbr_coords = [
torch.cat([nbr_coord, coord_padding], 0) for nbr_coord in nbr_coords
]
# List of length N_atoms, each of shape (1, ndim)
atom_coords = torch.tensor_split(coords, self.N_atoms)
# TODO(rbharath): How does distance need to be modified here to
# account for periodic boundary conditions?
# List of length N_atoms each of shape (M_nbrs)
padded_dists = [
torch.sum((atom_coord - padded_nbr_coord)**2, dim=-1)
for (atom_coord,
padded_nbr_coord) in zip(atom_coords, padded_nbr_coords)
]
padded_closest_nbrs = [
torch.topk(padded_dist, k=self.M_nbrs, largest=False)[1]
for padded_dist in padded_dists
]
# N_atoms elts of size (M_nbrs,) each
padded_neighbor_list = [
torch.gather(padded_atom_nbrs, 0, padded_closest_nbr)
for (padded_atom_nbrs,
padded_closest_nbr) in zip(padded_nbrs, padded_closest_nbrs)
]
neighbor_list = torch.stack(padded_neighbor_list)
return neighbor_list
def get_atoms_in_nbrs(self, coords: torch.Tensor,
cells: torch.Tensor) -> List[torch.Tensor]:
"""Get the atoms in neighboring cells for each cells.
Parameters
----------
coords: torch.Tensor
Shape (N_atoms, ndim)
cells: torch.Tensor
Returns
-------
atoms_in_nbrs: List[torch.Tensor]
(N_atoms, n_nbr_cells, M_nbrs)
"""
# Shape (N_atoms, 1)
cells_for_atoms = self.get_cells_for_atoms(coords, cells)
# Find M_nbrs atoms closest to each cell
# Shape (n_cells, M_nbrs)
closest_atoms = self.get_closest_atoms(coords, cells)
# Associate each cell with its neighbor cells. Assumes periodic boundary
# conditions, so does wrapround. O(constant)
# Shape (n_cells, n_nbr_cells)
neighbor_cells = self.get_neighbor_cells(cells)
# Shape (N_atoms, n_nbr_cells)
neighbor_cells = torch.squeeze(
torch.index_select(neighbor_cells, 0,
torch.squeeze(cells_for_atoms)))
# Shape (N_atoms, n_nbr_cells, M_nbrs)
atoms_in_nbrs = torch.index_select(closest_atoms, 0,
neighbor_cells.flatten())
# Shape (N_atoms, n_nbr_cells*M_nbrs)
atoms_in_nbrs = torch.reshape(atoms_in_nbrs, [self.N_atoms, -1])
# List of length N_atoms, each element length uniques_i
nbrs_per_atom = torch.split(atoms_in_nbrs, self.N_atoms)
uniques = [
torch.unique(atom_nbrs, sorted=False)
for atom_nbrs in nbrs_per_atom[0]
]
# TODO(rbharath): FRAGILE! Uses fact that identity seems to be the first
# element removed to remove self from list of neighbors. Need to verify
# this holds more broadly or come up with robust alternative.
uniques = [unique[1:] for unique in uniques]
return uniques
def get_closest_atoms(self, coords: torch.Tensor,
cells: torch.Tensor) -> torch.Tensor:
"""For each cell, find M_nbrs closest atoms.
Let N_atoms be the number of atoms.
Parameters
----------
coords: torch.Tensor
(N_atoms, ndim) shape.
cells: torch.Tensor
(n_cells, ndim) shape.
Returns
-------
closest_inds: torch.Tensor
Of shape (n_cells, M_nbrs)
"""
N_atoms, n_cells, ndim, M_nbrs = (self.N_atoms, self.n_cells, self.ndim,
self.M_nbrs)
# Tile both cells and coords to form arrays of size (N_atoms*n_cells, ndim)
tiled_cells = torch.reshape(torch.tile(cells, (1, N_atoms)),
(N_atoms * n_cells, ndim))
# Shape (N_atoms*n_cells, ndim) after tile
tiled_coords = torch.tile(coords, (n_cells, 1))
# Shape (N_atoms*n_cells)
coords_vec = torch.sum((tiled_coords - tiled_cells)**2, dim=-1)
# Shape (n_cells, N_atoms)
coords_norm = torch.reshape(coords_vec, (n_cells, N_atoms))
# Find k atoms closest to this cell.
# Tensor of shape (n_cells, M_nbrs)
closest_inds = torch.topk(coords_norm, k=M_nbrs, largest=False)[1]
return closest_inds
def get_cells_for_atoms(self, coords: torch.Tensor,
cells: torch.Tensor) -> torch.Tensor:
"""Compute the cells each atom belongs to.
Parameters
----------
coords: torch.Tensor
Shape (N_atoms, ndim)
cells: torch.Tensor
(n_cells, ndim) shape.
Returns
-------
cells_for_atoms: torch.Tensor
Shape (N_atoms, 1)
"""
N_atoms, n_cells, ndim = self.N_atoms, self.n_cells, self.ndim
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (N_atoms*n_cells, ndim)
tiled_cells = torch.tile(cells, (N_atoms, 1))
# Shape (N_atoms*n_cells, 1) after tile
tiled_coords = torch.reshape(torch.tile(coords, (1, n_cells)),
(n_cells * N_atoms, ndim))
coords_vec = torch.sum((tiled_coords - tiled_cells)**2, dim=-1)
coords_norm = torch.reshape(coords_vec, (N_atoms, n_cells))
closest_inds = torch.topk(coords_norm, k=1, largest=False)[1]
return closest_inds
def _get_num_nbrs(self) -> int:
"""Get number of neighbors in current dimensionality space."""
return 3**self.ndim
def get_neighbor_cells(self, cells: torch.Tensor) -> torch.Tensor:
"""Compute neighbors of cells in grid.
# TODO(rbharath): Do we need to handle periodic boundary conditions
properly here?
# TODO(rbharath): This doesn't handle boundaries well. We hard-code
# looking for n_nbr_cells neighbors, which isn't right for boundary cells in
# the cube.
Parameters
----------
cells: torch.Tensor
(n_cells, ndim) shape.
Returns
-------
nbr_cells: torch.Tensor
(n_cells, n_nbr_cells)
"""
ndim, n_cells = self.ndim, self.n_cells
n_nbr_cells = self._get_num_nbrs()
# Tile cells to form arrays of size (n_cells*n_cells, ndim)
# Two tilings (a, b, c, a, b, c, ...) vs. (a, a, a, b, b, b, etc.)
# Tile (a, a, a, b, b, b, etc.)
tiled_centers = torch.reshape(torch.tile(cells, (1, n_cells)),
(n_cells * n_cells, ndim))
# Tile (a, b, c, a, b, c, ...)
tiled_cells = torch.tile(cells, (n_cells, 1))
coords_vec = torch.sum((tiled_centers - tiled_cells)**2, dim=-1)
coords_norm = torch.reshape(coords_vec, (n_cells, n_cells))
closest_inds = torch.topk(coords_norm, k=n_nbr_cells, largest=False)[1]
return closest_inds
def get_cells(self) -> torch.Tensor:
"""Returns the locations of all grid points in box.
Suppose start is -10 Angstrom, stop is 10 Angstrom, nbr_cutoff is 1.
Then would return a list of length 20^3 whose entries would be
[(-10, -10, -10), (-10, -10, -9), ..., (9, 9, 9)]
Returns
-------
cells: torch.Tensor
(n_cells, ndim) shape.
"""
start, stop, nbr_cutoff = self.start, self.stop, self.nbr_cutoff
mesh_args = [
torch.arange(start, stop, nbr_cutoff) for _ in range(self.ndim)
]
return torch.reshape(
torch.permute(
torch.stack(torch.meshgrid(*mesh_args, indexing='xy')),
tuple(range(self.ndim, -1, -1))),
(self.n_cells, self.ndim)).to(torch.float)
class LSTMStep(nn.Module):
"""Layer that performs a single step LSTM update.
This is the Torch equivalent of the original implementation using Keras.
"""
def __init__(self,
output_dim,
input_dim,
init_fn='xavier_uniform_',
inner_init_fn='orthogonal_',
activation_fn='tanh',
inner_activation_fn='hardsigmoid',
**kwargs):
"""
Parameters
----------
output_dim: int
Dimensionality of output vectors.
input_dim: int
Dimensionality of input vectors.
init_fn: str
PyTorch initialization to use for W.
inner_init_fn: str
PyTorch initialization to use for U.
activation_fn: str
PyTorch activation to use for output.
inner_activation_fn: str
PyTorch activation to use for inner steps.
"""
super(LSTMStep, self).__init__(**kwargs)
self.init = init_fn
self.inner_init = inner_init_fn
self.output_dim = output_dim
# No other forget biases supported right now.
self.activation = activation_fn
self.inner_activation = inner_activation_fn
self.activation_fn = get_activation(activation_fn)
self.inner_activation_fn = get_activation(inner_activation_fn)
self.input_dim = input_dim
self.build()
def get_config(self):
config = super(LSTMStep, self).get_config()
config['output_dim'] = self.output_dim
config['input_dim'] = self.input_dim
config['init_fn'] = self.init
config['inner_init_fn'] = self.inner_init
config['activation_fn'] = self.activation
config['inner_activation_fn'] = self.inner_activation
return config
def get_initial_states(self, input_shape):
return [torch.zeros(input_shape), torch.zeros(input_shape)]
def build(self):
"""Constructs learnable weights for this layer."""
init = getattr(initializers, self.init)
inner_init = getattr(initializers, self.inner_init)
self.W = init(torch.empty(self.input_dim, 4 * self.output_dim))
self.U = inner_init(torch.empty(self.output_dim, 4 * self.output_dim))
self.b = torch.tensor(np.hstack(
(np.zeros(self.output_dim), np.ones(self.output_dim),
np.zeros(self.output_dim), np.zeros(self.output_dim))),
dtype=torch.float32)
def forward(self, inputs):
"""Execute this layer on input tensors.
Parameters
----------
inputs: list
List of three tensors (x, h_tm1, c_tm1). h_tm1 means "h, t-1".
Returns
-------
list
Returns h, [h, c]
"""
x, h_tm1, c_tm1 = inputs
x, h_tm1, c_tm1 = torch.tensor(x), torch.tensor(h_tm1), torch.tensor(
c_tm1)
z = torch.matmul(x, self.W) + torch.matmul(h_tm1, self.U) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim:2 * self.output_dim]
z2 = z[:, 2 * self.output_dim:3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
i = self.inner_activation_fn(z0)
f = self.inner_activation_fn(z1)
c = f * c_tm1 + i * self.activation_fn(z2)
o = self.inner_activation_fn(z3)
h = o * self.activation_fn(c)
return h, [h, c]
class AtomicConvolution(nn.Module):
"""Implements the Atomic Convolutional transform, introduced in
Gomes, Joseph, et al. "Atomic convolutional networks for predicting
protein-ligand binding affinity." arXiv preprint arXiv:1703.10603
(2017).
At a high level, this transform performs a graph convolution
on the nearest neighbors graph in 3D space.
Examples
--------
>>> batch_size = 4
>>> max_atoms = 5
>>> max_neighbors = 2
>>> dimensions = 3
>>> radial_params = torch.tensor([[5.0, 2.0, 0.5], [10.0, 2.0, 0.5],
... [5.0, 1.0, 0.2]])
>>> input1 = np.random.rand(batch_size, max_atoms, dimensions).astype(np.float32)
>>> input2 = np.random.randint(max_atoms,
... size=(batch_size, max_atoms, max_neighbors))
>>> input3 = np.random.randint(1, 10, size=(batch_size, max_atoms, max_neighbors))
>>> layer = AtomicConvolution(radial_params=radial_params)
>>> result = layer([input1, input2, input3])
>>> result.shape
torch.Size([4, 5, 3])
"""
def __init__(self,
atom_types: Optional[Union[ArrayLike, torch.Tensor]] = None,
radial_params: Union[ArrayLike, torch.Tensor] = list(),
box_size: Optional[Union[ArrayLike, torch.Tensor]] = None,
**kwargs):
"""Initialize this layer.
Parameters
----------
atom_types : Union[ArrayLike, torch.Tensor], optional
List of atom types.
radial_params : Union[ArrayLike, torch.Tensor], optional
List of radial params.
box_size : Union[ArrayLike, torch.Tensor], optional
Length must be equal to the number of features.
"""
super(AtomicConvolution, self).__init__(**kwargs)
self.atom_types = atom_types
self.radial_params = radial_params
if box_size is None or isinstance(box_size, torch.Tensor):
self.box_size = box_size
else:
self.box_size = torch.tensor(box_size)
vars = []
for i in range(3):
val = np.array([p[i] for p in self.radial_params]).reshape(
(-1, 1, 1, 1))
vars.append(torch.tensor(val, dtype=torch.float))
self.rc = nn.Parameter(vars[0])
self.rs = nn.Parameter(vars[1])
self.re = nn.Parameter(vars[2])
def __repr__(self):
return (
f'{self.__class__.__name__}(atom_types={self.atom_types}, radial_params={self.radial_params}, box_size={self.box_size}, rc={self.rc}, rs={self.rs}, re={self.re})'
)
def forward(
self, inputs: Sequence[Union[ArrayLike,
torch.Tensor]]) -> torch.Tensor:
"""Invoke this layer.
B, N, M, d, l = batch_size, max_num_atoms, max_num_neighbors, num_features, len(radial_params) * len(atom_types)
Parameters
----------
inputs: Sequence[Union[ArrayLike, torch.Tensor]]
First input are the coordinates/features, of shape (B, N, d)
Second input is the neighbor list, of shape (B, N, M)
Third input are the atomic numbers of neighbor atoms, of shape (B, N, M)
Returns
-------
torch.Tensor of shape (B, N, l)
Output of atomic convolution layer.
Raises
------
ValueError
When the length of `inputs` is not equal to 3.
"""
if len(inputs) != 3:
raise ValueError(
f"`inputs` has to be of length 3, got: {len(inputs)}")
X = torch.tensor(inputs[0])
Nbrs = torch.tensor(inputs[1], dtype=torch.int64)
Nbrs_Z = torch.tensor(inputs[2])
B, N, d = X.shape
M = Nbrs.shape[-1]
D = self.distance_tensor(X, Nbrs, self.box_size, B, N, M, d)
R = self.distance_matrix(D)
R = torch.unsqueeze(R, 0)
rsf = self.radial_symmetry_function(R, self.rc, self.rs, self.re)
if not self.atom_types:
cond = torch.not_equal(Nbrs_Z, 0).to(torch.float).reshape(
(1, -1, N, M))
layer = torch.sum(cond * rsf, 3)
else:
# Sum the pairwise-interactions between atoms that are of `atom_type` and its neighbors for each atom type in `atom_types`.
symmetries = []
for atom_type in self.atom_types:
cond = torch.eq(Nbrs_Z, atom_type).to(torch.float).reshape(
(1, -1, N, M))
symmetries.append(torch.sum(cond * rsf, 3))
layer = torch.concat(symmetries, 0)
layer = torch.permute(layer, [1, 2, 0])
var, mean = torch.var_mean(layer, [0, 2])
var, mean = var.detach(), mean.detach()
return F.batch_norm(layer, mean, var)
def distance_tensor(self, X: torch.Tensor, Nbrs: torch.Tensor,
box_size: Union[torch.Tensor, None], B: int, N: int,
M: int, d: int) -> torch.Tensor:
"""Calculate distance tensor for a batch of molecules.
B, N, M, d = batch_size, max_num_atoms, max_num_neighbors, num_features
Parameters
----------
X : torch.Tensor of shape (B, N, d)
Coordinates/features.
Nbrs : torch.Tensor of shape (B, N, M)
Neighbor list.
box_size : torch.Tensor
Length must be equal to `d`.
B : int
Batch size
N : int
Maximum number of atoms
M : int
Maximum number of neighbors
d : int
Number of features
Returns
-------
torch.Tensor of shape (B, N, M, d)
Coordinates/features distance tensor.
Raises
------
ValueError
When the length of `box_size` is not equal to `d`.
"""
if box_size is not None and len(box_size) != d:
raise ValueError("Length of `box_size` must be equal to `d`")
flat_neighbors = torch.reshape(Nbrs, (-1, N * M))
neighbor_coords = torch.stack(
[X[b, flat_neighbors[b]] for b in range(B)])
neighbor_coords = torch.reshape(neighbor_coords, (-1, N, M, d))
D = neighbor_coords - torch.unsqueeze(X, 2)
if box_size is not None:
box_size = torch.reshape(box_size, (1, 1, 1, d))
D -= torch.round(D / box_size) * box_size
return D
def distance_matrix(self, D: torch.Tensor) -> torch.Tensor:
"""Calculate a distance matrix, given a distance tensor.
B, N, M, d = batch_size, max_num_atoms, max_num_neighbors, num_features
Parameters
----------
D : torch.Tensor of shape (B, N, M, d)
Distance tensor
Returns
-------
torch.Tensor of shape (B, N, M)
Distance matrix.
"""
return torch.sqrt(torch.sum(torch.mul(D, D), 3))
def gaussian_distance_matrix(self, R: torch.Tensor, rs: torch.Tensor,
re: torch.Tensor) -> torch.Tensor:
"""Calculate a Gaussian distance matrix.
B, N, M, l = batch_size, max_num_atoms, max_num_neighbors, len(radial_params)
Parameters
----------
R : torch.Tensor of shape (B, N, M)
Distance matrix.
rs : torch.Tensor of shape (l, 1, 1, 1)
Gaussian distance matrix mean.
re : torch.Tensor of shape (l, 1, 1, 1)
Gaussian distance matrix width.
Returns
-------
torch.Tensor of shape (B, N, M)
Gaussian distance matrix.
"""
return torch.exp(-re * (R - rs)**2)
def radial_cutoff(self, R: torch.Tensor, rc: torch.Tensor) -> torch.Tensor:
"""Calculate a radial cut-off matrix.
B, N, M, l = batch_size, max_num_atoms, max_num_neighbors, len(radial_params)
Parameters
----------
R : torch.Tensor of shape (B, N, M)
Distance matrix.
rc : torch.Tensor of shape (l, 1, 1, 1)
Interaction cutoff (in angstrom).
Returns
-------
torch.Tensor of shape (B, N, M)
Radial cutoff matrix.
"""
T = 0.5 * (torch.cos(np.pi * R / rc) + 1)
E = torch.zeros_like(T)
cond = torch.less_equal(R, rc)
FC = torch.where(cond, T, E)
return FC
def radial_symmetry_function(self, R: torch.Tensor, rc: torch.Tensor,
rs: torch.Tensor,
re: torch.Tensor) -> torch.Tensor:
"""Calculate a radial symmetry function.
B, N, M, l = batch_size, max_num_atoms, max_num_neighbors, len(radial_params)
Parameters
----------
R : torch.Tensor of shape (B, N, M)
Distance matrix.
rc : torch.Tensor of shape (l, 1, 1, 1)
Interaction cutoff (in angstrom).
rs : torch.Tensor of shape (l, 1, 1, 1)
Gaussian distance matrix mean.
re : torch.Tensor of shape (l, 1, 1, 1)
Gaussian distance matrix width.
Returns
-------
torch.Tensor of shape (B, N, M)
Pre-summation radial symmetry function.
"""
K = self.gaussian_distance_matrix(R, rs, re)
FC = self.radial_cutoff(R, rc)
return torch.mul(K, FC)
class CombineMeanStd(nn.Module):
"""Generate Gaussian noise.
This is the Torch equivalent of the original implementation using Keras.
"""
def __init__(self,
training_only: bool = False,
noise_epsilon: float = 1.0,
**kwargs):
"""Create a CombineMeanStd layer.
This layer should have two inputs with the same shape, and its
output also has the same shape. Each element of the output is a
Gaussian distributed random number whose mean is the corresponding
element of the first input, and whose standard deviation is the
corresponding element of the second input.
Parameters
----------
training_only: bool, optional (default False).
if True, noise is only generated during training. During
prediction, the output is simply equal to the first input (that
is, the mean of the distribution used during training).
noise_epsilon: float, optional (default 1.0).
The noise is scaled by this factor
"""
super(CombineMeanStd, self).__init__(**kwargs)
self.training_only = training_only
self.noise_epsilon = noise_epsilon
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}(training_only={self.training_only}, noise_epsilon={self.noise_epsilon})'
)
def forward(self,
inputs: Sequence[ArrayLike],
training: bool = True) -> torch.Tensor:
"""Invoke this layer.
Parameters
----------
inputs: Sequence[ArrayLike]
First element are the means for the random generated numbers.
Second element are the standard deviations for the random generated numbers.
training: bool, optional (default True).
Specifies whether to generate noise.
Noise is only added when training.
Returns
-------
Tensor of Gaussian distributed random numbers: torch.Tensor
Same shape as the means and standard deviations from `inputs`.
"""
if len(inputs) != 2:
raise ValueError("Must have two in_layers")
mean_parent, std_parent = torch.tensor(inputs[0]), torch.tensor(
inputs[1])
noise_scale = torch.tensor(training or
not self.training_only).to(torch.float)
sample_noise = torch.normal(0.0, self.noise_epsilon, mean_parent.shape)
return mean_parent + noise_scale * std_parent * sample_noise
class GatedRecurrentUnit(nn.Module):
""" Submodule for Message Passing """
def __init__(self, n_hidden=100, init='xavier_uniform_', **kwargs):
super(GatedRecurrentUnit, self).__init__(**kwargs)
self.n_hidden = n_hidden
self.init = init
init = getattr(initializers, self.init)
self.Wz = init(torch.empty(n_hidden, n_hidden))
self.Wr = init(torch.empty(n_hidden, n_hidden))
self.Wh = init(torch.empty(n_hidden, n_hidden))
self.Uz = init(torch.empty(n_hidden, n_hidden))
self.Ur = init(torch.empty(n_hidden, n_hidden))
self.Uh = init(torch.empty(n_hidden, n_hidden))
self.bz = torch.zeros((n_hidden,))
self.br = torch.zeros((n_hidden,))
self.bh = torch.zeros((n_hidden,))
def forward(self, inputs):
sigmoid = get_activation('sigmoid')
tanh = get_activation('tanh')
h_tm1, x = inputs
z = sigmoid(
torch.matmul(x, self.Wz) + torch.matmul(h_tm1, self.Uz) + self.bz)
r = sigmoid(
torch.matmul(x, self.Wr) + torch.matmul(h_tm1, self.Ur) + self.br)
h = (1 - z) * tanh(
torch.matmul(x, self.Wh) + torch.matmul(h_tm1 * r, self.Uh) +
self.bh) + z * x
return h
class WeightedLinearCombo(nn.Module):
"""Compute a weighted linear combination of input layers, where the weight variables are trained.
Examples
--------
>>> input1 = np.random.rand(5, 10).astype(np.float32)
>>> input2 = np.random.rand(5, 10).astype(np.float32)
>>> layer = WeightedLinearCombo(len([input1, input2]))
>>> result = layer([input1, input2])
>>> result.shape
torch.Size([5, 10])
"""
def __init__(self, num_inputs: int, std: float = 0.3, **kwargs):
"""
Parameters
----------
num_inputs: int
Number of inputs given to `self.forward()`
This is used to initialize the correct amount of weight variables to be trained.
std: float
The standard deviation for the normal distribution that is used to initialize the trainable weights.
"""
super(WeightedLinearCombo, self).__init__(**kwargs)
self.num_inputs = num_inputs
self.std = std
self.input_weights = nn.Parameter(torch.empty(self.num_inputs))
nn.init.normal_(self.input_weights, std=std)
def __repr__(self):
return (
f'{self.__class__.__name__}(num_inputs={self.num_inputs}, std={self.std}, input_weights={self.input_weights})'
)
def forward(
self, inputs: Sequence[Union[ArrayLike,
torch.Tensor]]) -> Optional[torch.Tensor]:
"""
Parameters
----------
inputs: Sequence[Union[ArrayLike, torch.Tensor]]
The initial input layers.
The length must be the same as `self.num_inputs`.
Returns
-------
out_tensor: torch.Tensor or None
The tensor containing the weighted linear combination.
"""
out_tensor = None
for in_tensor, w in zip(inputs, self.input_weights):
in_tensor = torch.FloatTensor(in_tensor)
if out_tensor is None:
out_tensor = w * in_tensor
else:
out_tensor += w * in_tensor
return out_tensor
class SetGather(nn.Module):
"""set2set gather layer for graph-based model
Models using this layer must set `pad_batches=True`
Torch Equivalent of Keras SetGather layer
Parameters
----------
M: int
Number of LSTM steps
batch_size: int
Number of samples in a batch(all batches must have same size)
n_hidden: int, optional
number of hidden units in the passing phase
Examples
--------
>>> import deepchem as dc
>>> import numpy as np
>>> from deepchem.models.torch_models import layers
>>> total_n_atoms = 4
>>> n_atom_feat = 4
>>> atom_feat = np.random.rand(total_n_atoms, n_atom_feat)
>>> atom_split = np.array([0, 0, 1, 1], dtype=np.int32)
>>> gather = layers.SetGather(2, 2, n_hidden=4)
>>> output_molecules = gather([atom_feat, atom_split])
>>> print(output_molecules.shape)
torch.Size([2, 8])
"""
def __init__(self,
M: int,
batch_size: int,
n_hidden: int = 100,
init='orthogonal',
**kwargs):
super(SetGather, self).__init__(**kwargs)
self.M = M
self.batch_size = batch_size
self.n_hidden = n_hidden
self.init = init
self.U = nn.Parameter(
torch.Tensor(2 * self.n_hidden, 4 * self.n_hidden).normal_(mean=0.0,
std=0.1))
self.b = nn.Parameter(
torch.cat((torch.zeros(self.n_hidden), torch.ones(self.n_hidden),
torch.zeros(self.n_hidden), torch.zeros(self.n_hidden))))
self.built = True
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}(M={self.M}, batch_size={self.batch_size}, n_hidden={self.n_hidden}, init={self.init})'
)
def forward(self, inputs: List) -> torch.Tensor:
"""Perform M steps of set2set gather,
Detailed descriptions in: https://arxiv.org/abs/1511.06391
Parameters
----------
inputs: List
This contains two elements.
atom_features: np.ndarray
atom_split: np.ndarray
Returns
-------
q_star: torch.Tensor
Final state of the model after all M steps.
"""
atom_features, atom_split = inputs
c = torch.zeros((self.batch_size, self.n_hidden))
h = torch.zeros((self.batch_size, self.n_hidden))
for i in range(self.M):
q_expanded = h[atom_split]
e = (torch.from_numpy(atom_features) * q_expanded).sum(dim=-1)
e_mols = self._dynamic_partition(e, atom_split, self.batch_size)
# Add another value(~-Inf) to prevent error in softmax
e_mols = [
torch.cat([e_mol, torch.tensor([-1000.])], dim=0)
for e_mol in e_mols
]
a = torch.cat([
torch.nn.functional.softmax(e_mol[:-1], dim=0)
for e_mol in e_mols
],
dim=0)
r = scatter(torch.reshape(a, [-1, 1]) * atom_features,
torch.from_numpy(atom_split).long(),
dim=0)
# Model using this layer must set `pad_batches=True`
q_star = torch.cat([h, r], dim=1)
h, c = self._LSTMStep(q_star, c)
return q_star
def _LSTMStep(self,
h: torch.Tensor,
c: torch.Tensor,
x=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""This methord performs a single step of a Long Short-Term Memory (LSTM) cell,
Parameters
----------
h: torch.Tensor
The hidden state of the LSTM cell.
c: torch.Tensor
The cell state of the LSTM cell.
Returns
-------
h_out: torch.Tensor
The updated hidden state of the LSTM cell.
c_out: torch.Tensor
The updated cell state of the LSTM cell.
"""
# z = torch.mm(h, self.U) + self.b
z = F.linear(h.float().detach(),
self.U.float().T.detach(), self.b.detach())
i = torch.sigmoid(z[:, :self.n_hidden])
f = torch.sigmoid(z[:, self.n_hidden:2 * self.n_hidden])
o = torch.sigmoid(z[:, 2 * self.n_hidden:3 * self.n_hidden])
z3 = z[:, 3 * self.n_hidden:]
c_out = f * c + i * torch.tanh(z3)
h_out = o * torch.tanh(c_out)
return h_out, c_out
def _dynamic_partition(self, input_tensor: torch.Tensor,
partition_tensor: np.ndarray,
num_partitions: int) -> List[torch.Tensor]:
"""Partitions `data` into `num_partitions` tensors using indices from `partitions`.
Parameters
----------
input_tensor: torch.Tensor
The tensor to be partitioned.
partition_tensor: np.ndarray
A 1-D tensor whose size is equal to the first dimension of `input_tensor`.
num_partitions: int
The number of partitions to output.
Returns
-------
partitions: List[torch.Tensor]
A list of `num_partitions` `Tensor` objects with the same type as `data`.
"""
# create a boolean mask for each partition
partition_masks = [partition_tensor == i for i in range(num_partitions)]
# partition the input tensor using the masks
partitions = [input_tensor[mask] for mask in partition_masks]
return partitions
class DTNNEmbedding(nn.Module):
"""DTNNEmbedding layer for DTNN model.
Assign initial atomic descriptors. [1]_
This layer creates 'n' number of embeddings as initial atomic descriptors. According to the required weight initializer and periodic_table_length (Total number of unique atoms).
References
----------
[1] Schütt, <NAME>., et al. "Quantum-chemical insights from deep
tensor neural networks." Nature communications 8.1 (2017): 1-8.
Examples
--------
>>> from deepchem.models.torch_models import layers
>>> import torch
>>> layer = layers.DTNNEmbedding(30, 30, 'xavier_uniform_')
>>> output = layer(torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))
>>> output.shape
torch.Size([10, 30])
"""
def __init__(self,
n_embedding: int = 30,
periodic_table_length: int = 30,
initalizer: str = 'xavier_uniform_',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
periodic_table_length: int, optional
Length of embedding, 83=Bi
initalizer: str, optional
Weight initialization for filters.
Options: {xavier_uniform_, xavier_normal_, kaiming_uniform_, kaiming_normal_, trunc_normal_}
"""
super(DTNNEmbedding, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.periodic_table_length = periodic_table_length
self.initalizer = initalizer # Set weight initialization
init_func: Callable = getattr(initializers, self.initalizer)
self.embedding_list: nn.Parameter = nn.Parameter(
init_func(
torch.empty([self.periodic_table_length, self.n_embedding])))
def __repr__(self) -> str:
"""Returns a string representing the configuration of the layer.
Returns
-------
n_embedding: int, optional
Number of features for each atom
periodic_table_length: int, optional
Length of embedding, 83=Bi
initalizer: str, optional
Weight initialization for filters.
Options: {xavier_uniform_, xavier_normal_, kaiming_uniform_, kaiming_normal_, trunc_normal_}
"""
return f'{self.__class__.__name__}(n_embedding={self.n_embedding}, periodic_table_length={self.periodic_table_length}, initalizer={self.initalizer})'
def forward(self, inputs: torch.Tensor):
"""Returns Embeddings according to indices.
Parameters
----------
inputs: torch.Tensor
Indices of Atoms whose embeddings are requested.
Returns
-------
atom_embeddings: torch.Tensor
Embeddings of atoms accordings to indices.
"""
atom_number = inputs
atom_enbeddings = torch.nn.functional.embedding(atom_number,
self.embedding_list)
return atom_enbeddings
class MolGANConvolutionLayer(nn.Module):
"""
Graph convolution layer used in MolGAN model.
MolGAN is a WGAN type model for generation of small molecules.
Not used directly, higher level layers like MolGANMultiConvolutionLayer use it.
This layer performs basic convolution on one-hot encoded matrices containing
atom and bond information. This layer also accepts three inputs for the case
when convolution is performed more than once and results of previous convolution
need to used. It was done in such a way to avoid creating another layer that
accepts three inputs rather than two. The last input layer is so-called
hidden_layer and it hold results of the convolution while first two are unchanged
input tensors.
Examples
--------
See: MolGANMultiConvolutionLayer for using in layers.
>>> import torch
>>> import torch.nn as nn
>>> import torch.nn.functional as F
>>> vertices = 9
>>> nodes = 5
>>> edges = 5
>>> units = 128
>>> layer1 = MolGANConvolutionLayer(units=units, edges=edges, nodes=nodes, name='layer1')
>>> adjacency_tensor = torch.randn((1, vertices, vertices, edges))
>>> node_tensor = torch.randn((1, vertices, nodes))
>>> output = layer1([adjacency_tensor, node_tensor])
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
units: int,
nodes: int,
activation=torch.tanh,
dropout_rate: float = 0.0,
edges: int = 5,
name: str = "",
prev_shape: int = 0):
"""
Initialize this layer.
Parameters
---------
units: int
Dimesion of dense layers used for convolution
nodes: int
Number of features in node tensor
activation: function, optional (default=Tanh)
activation function used across model, default is Tanh
dropout_rate: float, optional (default=0.0)
Dropout rate used by dropout layer
edges: int, optional (default=5)
How many dense layers to use in convolution.
Typically equal to number of bond types used in the model.
name: string, optional (default="")
Name of the layer
prev_shape: int, optional (default=0)
Shape of the previous layer, used when more than two inputs are passed
"""
super(MolGANConvolutionLayer, self).__init__()
self.activation = activation
self.dropout_rate: float = dropout_rate
self.units: int = units
self.edges: int = edges
self.name: str = name
self.nodes: int = nodes
# Case when >2 inputs are passed
if prev_shape:
self.dense1 = nn.ModuleList([
nn.Linear(prev_shape + self.nodes, self.units)
for _ in range(edges - 1)
])
else:
self.dense1 = nn.ModuleList(
[nn.Linear(self.nodes, self.units) for _ in range(edges - 1)])
self.dense2: nn.Linear = nn.Linear(nodes, self.units)
self.dropout: nn.Dropout = nn.Dropout(self.dropout_rate)
def __repr__(self) -> str:
"""
Returns a string representing the configuration of the layer.
Returns
-------
str
String representation of the layer
"""
return (
f'{self.__class__.__name__}(Units={self.units}, Nodes={self.nodes}, Activation={self.activation}, Dropout_rate={self.droput_rate}, Edges={self.edges}, Name={self.name})'
)
def forward(
self,
inputs: List) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Invoke this layer
Parameters
----------
inputs: list
List of two input matrices, adjacency tensor and node features tensors
in one-hot encoding format.
Returns
--------
tuple(torch.Tensor,torch.Tensor,torch.Tensor)
First and second are original input tensors
Third is the result of convolution
"""
ic: int = len(inputs)
if ic < 2:
raise ValueError(
"MolGANConvolutionLayer requires at least two inputs: [adjacency_tensor, node_features_tensor]"
)
adjacency_tensor: torch.Tensor = inputs[0]
node_tensor: torch.Tensor = inputs[1]
if ic > 2:
hidden_tensor: torch.Tensor = inputs[2]
annotations = torch.cat((hidden_tensor, node_tensor), -1)
else:
annotations = node_tensor
output_dense: torch.Tensor = torch.stack(
[dense(annotations) for dense in self.dense1], 1)
adj: torch.Tensor = adjacency_tensor.permute(0, 3, 1, 2)[:, 1:, :, :]
output_mul: torch.Tensor = torch.matmul(adj, output_dense)
output_sum: torch.Tensor = torch.sum(output_mul,
dim=1) + self.dense2(node_tensor)
output_act: torch.Tensor = self.activation(output_sum)
output = self.dropout(output_act)
return adjacency_tensor, node_tensor, output
class MolGANAggregationLayer(nn.Module):
"""
Graph Aggregation layer used in MolGAN model.
MolGAN is a WGAN type model for generation of small molecules.
Performs aggregation on tensor resulting from convolution layers.
Given its simple nature it might be removed in future and moved to
MolGANEncoderLayer.
Examples
--------
>>> import torch
>>> import torch.nn as nn
>>> import torch.nn.functional as F
>>> vertices = 9
>>> nodes = 5
>>> edges = 5
>>> units = 128
>>> layer_1 = MolGANConvolutionLayer(units=units,nodes=nodes,edges=edges, name='layer1')
>>> layer_2 = MolGANAggregationLayer(units=128, name='layer2')
>>> adjacency_tensor = torch.randn((1, vertices, vertices, edges))
>>> node_tensor = torch.randn((1, vertices, nodes))
>>> hidden_1 = layer_1([adjacency_tensor, node_tensor])
>>> output = layer_2(hidden_1[2])
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
units: int = 128,
activation=torch.tanh,
dropout_rate: float = 0.0,
name: str = "",
prev_shape: int = 0):
"""
Initialize the layer
Parameters
---------
units: int, optional (default=128)
Dimesion of dense layers used for aggregation
activation: function, optional (default=Tanh)
activation function used across model, default is Tanh
dropout_rate: float, optional (default=0.0)
Used by dropout layer
name: string, optional (default="")
Name of the layer
prev_shape: int, optional (default=0)
Shape of the input tensor
"""
super(MolGANAggregationLayer, self).__init__()
self.units: int = units
self.activation = activation
self.dropout_rate: float = dropout_rate
self.name: str = name
if prev_shape:
self.d1 = nn.Linear(prev_shape, self.units)
self.d2 = nn.Linear(prev_shape, self.units)
else:
self.d1 = nn.Linear(self.units, self.units)
self.d2 = nn.Linear(self.units, self.units)
self.dropout_layer = nn.Dropout(dropout_rate)
def __repr__(self) -> str:
"""
String representation of the layer
Returns
-------
string
String representation of the layer
"""
return f"{self.__class__.__name__}(units={self.units}, activation={self.activation}, dropout_rate={self.dropout_rate})"
def forward(self, inputs: List) -> torch.Tensor:
"""
Invoke this layer
Parameters
----------
inputs: List
Single tensor resulting from graph convolution layer
Returns
--------
aggregation tensor: torch.Tensor
Result of aggregation function on input convolution tensor.
"""
i = torch.sigmoid(self.d1(inputs))
j = self.activation(self.d2(inputs))
output = torch.sum(i * j, dim=1)
output = self.activation(output)
output = self.dropout_layer(output)
return output
class MolGANMultiConvolutionLayer(nn.Module):
"""
Multiple pass convolution layer used in MolGAN model.
MolGAN is a WGAN type model for generation of small molecules.
It takes outputs of previous convolution layer and uses
them as inputs for the next one.
It simplifies the overall framework, but might be moved to
MolGANEncoderLayer in the future in order to reduce number of layers.
Example
-------
>>> import torch
>>> import torch.nn as nn
>>> import torch.nn.functional as F
>>> vertices = 9
>>> nodes = 5
>>> edges = 5
>>> units = (128,64)
>>> layer_1 = MolGANMultiConvolutionLayer(units=units, nodes=nodes, edges=edges, name='layer1')
>>> adjacency_tensor = torch.randn((1, vertices, vertices, edges))
>>> node_tensor = torch.randn((1, vertices, nodes))
>>> output = layer_1([adjacency_tensor, node_tensor])
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
units: Tuple = (128, 64),
nodes: int = 5,
activation=torch.tanh,
dropout_rate: float = 0.0,
edges: int = 5,
name: str = "",
**kwargs):
"""
Initialize the layer
Parameters
---------
units: Tuple, optional (default=(128,64)), min_length=2
ist of dimensions used by consecutive convolution layers.
The more values the more convolution layers invoked.
nodes: int, optional (default=5)
Number of features in node tensor
activation: function, optional (default=Tanh)
activation function used across model, default is Tanh
dropout_rate: float, optional (default=0.0)
Used by dropout layer
edges: int, optional (default=5)
Controls how many dense layers use for single convolution unit.
Typically matches number of bond types used in the molecule.
name: string, optional (default="")
Name of the layer
"""
super(MolGANMultiConvolutionLayer, self).__init__()
if len(units) < 2:
raise ValueError("units parameter must contain at least two values")
self.nodes: int = nodes
self.units: Tuple = units
self.activation = activation
self.dropout_rate: float = dropout_rate
self.edges: int = edges
self.name: str = name
self.first_convolution = MolGANConvolutionLayer(
units=self.units[0],
nodes=self.nodes,
activation=self.activation,
dropout_rate=self.dropout_rate,
edges=self.edges)
self.gcl = nn.ModuleList([
MolGANConvolutionLayer(units=u,
nodes=self.nodes,
activation=self.activation,
dropout_rate=self.dropout_rate,
edges=self.edges,
prev_shape=self.units[count])
for count, u in enumerate(self.units[1:])
])
def __repr__(self) -> str:
"""
String representation of the layer
Returns
-------
string
String representation of the layer
"""
return f"{self.__class__.__name__}(units={self.units}, activation={self.activation}, dropout_rate={self.dropout_rate}), edges={self.edges})"
def forward(self, inputs: List) -> torch.Tensor:
"""
Invoke this layer
Parameters
----------
inputs: list
List of two input matrices, adjacency tensor and node features tensors
in one-hot encoding format.
Returns
--------
convolution tensor: torch.Tensor
Result of input tensors going through convolution a number of times.
"""
adjacency_tensor = inputs[0]
node_tensor = inputs[1]
tensors = self.first_convolution([adjacency_tensor, node_tensor])
# Loop over the remaining convolution layers
for layer in self.gcl:
# Apply the current layer to the outputs from the previous layer
tensors = layer(tensors)
_, _, hidden_tensor = tensors
return hidden_tensor
class MolGANEncoderLayer(nn.Module):
"""
Main learning layer used by MolGAN model.
MolGAN is a WGAN type model for generation of small molecules.
It role is to further simplify model.
This layer can be manually built by stacking graph convolution layers
followed by graph aggregation.
Example
-------
>>> import torch
>>> import torch.nn as nn
>>> import torch.nn.functional as F
>>> vertices = 9
>>> nodes = 5
>>> edges = 5
>>> dropout_rate = 0.0
>>> adjacency_tensor = torch.randn((1, vertices, vertices, edges))
>>> node_tensor = torch.randn((1, vertices, nodes))
>>> graph = MolGANEncoderLayer(units = [(128,64),128], dropout_rate= dropout_rate, edges=edges, nodes=nodes)([adjacency_tensor,node_tensor])
>>> dense = nn.Linear(128,128)(graph)
>>> dense = torch.tanh(dense)
>>> dense = nn.Dropout(dropout_rate)(dense)
>>> dense = nn.Linear(128,64)(dense)
>>> dense = torch.tanh(dense)
>>> dense = nn.Dropout(dropout_rate)(dense)
>>> output = nn.Linear(64,1)(dense)
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
units: List = [(128, 64), 128],
activation: Callable = torch.tanh,
dropout_rate: float = 0.0,
edges: int = 5,
nodes: int = 5,
name: str = ""):
"""
Initialize the layer
Parameters
----------
units: List, optional (default=[(128,64),128])
List of dimensions used by consecutive convolution layers.
The more values the more convolution layers invoked.
activation: function, optional (default=Tanh)
activation function used across model, default is Tanh
dropout_rate: float, optional (default=0.0)
Used by dropout layer
edges: int, optional (default=5)
Controls how many dense layers use for single convolution unit.
Typically matches number of bond types used in the molecule.
nodes: int, optional (default=5)
Number of features in node tensor
name: string, optional (default="")
Name of the layer
"""
super(MolGANEncoderLayer, self).__init__()
if len(units) != 2:
raise ValueError("units parameter must contain two values")
self.graph_convolution_units, self.auxiliary_units = units
self.activation = activation
self.dropout_rate = dropout_rate
self.edges = edges
self.multi_graph_convolution_layer = MolGANMultiConvolutionLayer(
units=self.graph_convolution_units,
nodes=nodes,
activation=self.activation,
dropout_rate=self.dropout_rate,
edges=self.edges)
self.graph_aggregation_layer = MolGANAggregationLayer(
units=self.auxiliary_units,
activation=self.activation,
dropout_rate=self.dropout_rate,
prev_shape=self.graph_convolution_units[-1] + nodes)
def __repr__(self) -> str:
"""
String representation of the layer
Returns
-------
string
String representation of the layer
"""
return f"{self.__class__.__name__}(units={self.units}, activation={self.activation}, dropout_rate={self.dropout_rate}), edges={self.edges})"
def forward(self, inputs: List) -> torch.Tensor:
"""
Invoke this layer
Parameters
----------
inputs: list
List of two input matrices, adjacency tensor and node features tensors
in one-hot encoding format.
Returns
--------
encoder tensor: tf.Tensor
Tensor that been through number of convolutions followed
by aggregation.
"""
output = self.multi_graph_convolution_layer(inputs)
node_tensor = inputs[1]
if len(inputs) > 2:
hidden_tensor = inputs[2]
annotations = torch.cat((output, hidden_tensor, node_tensor), -1)
else:
_, node_tensor = inputs
annotations = torch.cat((output, node_tensor), -1)
output = self.graph_aggregation_layer(annotations)
return output
class DTNNStep(nn.Module):
"""DTNNStep Layer for DTNN model.
Encodes the atom's interaction with other atoms according to distance relationships. [1]_
This Layer implements the Eq (7) from DTNN Paper. Then sums them up to get the final output using Eq (6) from DTNN Paper.
Eq (7): V_ij = tanh[W_fc . ((W_cf . C_j + b_cf) * (W_df . d_ij + b_df))]
Eq (6): C_i = C_i + sum(V_ij)
Here : '.'=Matrix Multiplication , '*'=Multiplication
References
----------
[1] Schütt, <NAME>., et al. "Quantum-chemical insights from deep
tensor neural networks." Nature communications 8.1 (2017): 1-8.
Examples
--------
>>> from deepchem.models.torch_models import layers
>>> import torch
>>> embedding_layer = layers.DTNNEmbedding(4, 4)
>>> emb = embedding_layer(torch.Tensor([0,1,2,3]).to(torch.int64))
>>> step_layer = layers.DTNNStep(4, 6, 8)
>>> output_torch = step_layer([
... torch.Tensor(emb),
... torch.Tensor([0, 1, 2, 3, 4, 5]).to(torch.float32),
... torch.Tensor([1]).to(torch.int64),
... torch.Tensor([[1]]).to(torch.int64)
... ])
>>> output_torch.shape
torch.Size([2, 4, 4])
"""
def __init__(self,
n_embedding: int = 30,
n_distance: int = 100,
n_hidden: int = 60,
initializer: str = 'xavier_uniform_',
activation='tanh',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
n_distance: int, optional
granularity of distance matrix
n_hidden: int, optional
Number of nodes in hidden layer
initializer: str, optional
Weight initialization for filters.
Options: {xavier_uniform_, xavier_normal_, kaiming_uniform_, kaiming_normal_, trunc_normal_}
activation: str, optional
Activation function applied
"""
super(DTNNStep, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.n_distance = n_distance
self.n_hidden = n_hidden
self.initializer = initializer # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = get_activation(self.activation)
init_func: Callable = getattr(initializers, self.initializer)
self.W_cf = nn.Parameter(
init_func(torch.empty([self.n_embedding, self.n_hidden])))
self.W_df = nn.Parameter(
init_func(torch.empty([self.n_distance, self.n_hidden])))
self.W_fc = nn.Parameter(
init_func(torch.empty([self.n_hidden, self.n_embedding])))
self.b_cf = nn.Parameter(torch.zeros(size=[
self.n_hidden,
]))
self.b_df = nn.Parameter(torch.zeros(size=[
self.n_hidden,
]))
def __repr__(self):
"""Returns a string representing the configuration of the layer.
Returns
-------
n_embedding: int, optional
Number of features for each atom
n_distance: int, optional
granularity of distance matrix
n_hidden: int, optional
Number of nodes in hidden layer
initializer: str, optional
Weight initialization for filters.
Options: {xavier_uniform_, xavier_normal_, kaiming_uniform_, kaiming_normal_, trunc_normal_}
activation: str, optional
Activation function applied
"""
return f'{self.__class__.__name__}(n_embedding={self.n_embedding}, n_distance={self.n_distance}, n_hidden={self.n_hidden}, initializer={self.initializer}, activation={self.activation})'
def forward(self, inputs):
"""Executes the equations and Returns the intraction vector of the atom with other atoms.
Parameters
----------
inputs: torch.Tensor
List of Tensors having atom_features, distance, distance_membership_i, distance_membership_j.
Returns
-------
interaction_vector: torch.Tensor
interaction of the atom with other atoms based on distance and distance_membership.
"""
atom_features = inputs[0]
distance = inputs[1]
distance_membership_i = inputs[2]
distance_membership_j = inputs[3]
distance_hidden = torch.matmul(distance, self.W_df) + self.b_df
atom_features_hidden = torch.matmul(atom_features,
self.W_cf) + self.b_cf
outputs = torch.mul(
distance_hidden,
torch.embedding(atom_features_hidden, distance_membership_j))
# for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
# and embeddings of atom j(both gone through a hidden layer)
outputs = torch.matmul(outputs, self.W_fc)
outputs = self.activation_fn(outputs)
output_ii = torch.mul(self.b_df, atom_features_hidden)
output_ii = torch.matmul(output_ii, self.W_fc)
output_ii = self.activation_fn(output_ii)
# for atom i, sum the influence from all other atom j in the molecule
intraction_vector = scatter(outputs, distance_membership_i,
dim=0) - output_ii + atom_features
return intraction_vector
class DTNNGather(nn.Module):
"""DTNNGather Layer for DTNN Model.
Predict Molecular Energy using atom_features and atom_membership. [1]_
This Layer gathers the inputs got from the step layer according to atom_membership and calulates the total Molecular Energy.
References
----------
[1] Schütt, <NAME>., et al. "Quantum-chemical insights from deep
tensor neural networks." Nature communications 8.1 (2017): 1-8.
Examples
--------
>>> from deepchem.models.torch_models import layers as layers_torch
>>> import torch
>>> gather_layer_torch = layers_torch.DTNNGather(3, 3, [10])
>>> result = gather_layer_torch([torch.Tensor([[3, 2, 1]]).to(torch.float32), torch.Tensor([0]).to(torch.int64)])
>>> result.shape
torch.Size([1, 3])
"""
def __init__(self,
n_embedding=30,
n_outputs=100,
layer_sizes=[100],
output_activation=True,
initializer='xavier_uniform_',
activation='tanh',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
n_outputs: int, optional
Number of features for each molecule(output)
layer_sizes: list of int, optional(default=[100])
Structure of hidden layer(s)
initializer: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied
"""
super(DTNNGather, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.n_outputs = n_outputs
self.layer_sizes = layer_sizes
self.output_activation = output_activation
self.initializer = initializer # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = get_activation(self.activation)
self.W_list = nn.ParameterList()
self.b_list = nn.ParameterList()
init_func: Callable = getattr(initializers, self.initializer)
prev_layer_size = self.n_embedding
for i, layer_size in enumerate(self.layer_sizes):
self.W_list.append(
nn.Parameter(
init_func(torch.empty([prev_layer_size, layer_size]))))
self.b_list.append(nn.Parameter(torch.zeros(size=[
layer_size,
])))
prev_layer_size = layer_size
self.W_list.append(
nn.Parameter(
init_func(torch.empty([prev_layer_size, self.n_outputs]))))
self.b_list.append(nn.Parameter(torch.zeros(size=[
self.n_outputs,
])))
def __repr__(self):
"""Returns a string representing the configuration of the layer.
Returns
----------
n_embedding: int, optional
Number of features for each atom
n_outputs: int, optional
Number of features for each molecule(output)
layer_sizes: list of int, optional(default=[1000])
Structure of hidden layer(s)
initializer: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied
"""
return f'{self.__class__.__name__}(n_embedding={self.n_embedding}, n_outputs={self.n_outputs}, layer_sizes={self.layer_sizes}, output_activation={self.output_activation}, initializer={self.initializer}, activation={self.activation})'
def forward(self, inputs):
"""Executes the equation and Returns Molecular Energies according to atom_membership.
Parameters
----------
inputs: torch.Tensor
List of Tensor containing atom_features and atom_membership
Returns
-------
molecular_energies: torch.Tensor
Tensor containing the Molecular Energies according to atom_membership.
"""
output = inputs[0]
atom_membership = inputs[1]
for i, W in enumerate(self.W_list[:-1]):
output = torch.matmul(output, W) + self.b_list[i]
output = self.activation_fn(output)
output = torch.matmul(output, self.W_list[-1]) + self.b_list[-1]
if self.output_activation:
output = self.activation_fn(output)
return scatter(output, atom_membership)
class EdgeNetwork(nn.Module):
"""The EdgeNetwork module is a PyTorch submodule designed for message passing in graph neural networks.
Examples
--------
>>> pair_features = torch.rand((4, 2), dtype=torch.float32)
>>> atom_features = torch.rand((5, 2), dtype=torch.float32)
>>> atom_to_pair = []
>>> n_atoms = 2
>>> start = 0
>>> C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
>>> atom_to_pair.append(np.transpose(np.array([C1.flatten() + start, C0.flatten() + start])))
>>> atom_to_pair = torch.Tensor(atom_to_pair)
>>> atom_to_pair = torch.squeeze(atom_to_pair.to(torch.int64), dim=0)
>>> inputs = [pair_features, atom_features, atom_to_pair]
>>> n_pair_features = 2
>>> n_hidden = 2
>>> init = 'xavier_uniform_'
>>> layer = EdgeNetwork(n_pair_features, n_hidden, init)
>>> result = layer(inputs)
>>> result.shape[1]
2
"""
def __init__(self,
n_pair_features: int = 8,
n_hidden: int = 100,
init: str = 'xavier_uniform_',
**kwargs):
"""Initalises a EdgeNetwork Layer
Parameters
----------
n_pair_features: int, optional
The length of the pair features vector.
n_hidden: int, optional
number of hidden units in the passing phase
init: str, optional
Initialization function to be used in the message passing layer.
"""
super(EdgeNetwork, self).__init__(**kwargs)
self.n_pair_features: int = n_pair_features
self.n_hidden: int = n_hidden
self.init: str = init
init_func: Callable = getattr(initializers, self.init)
self.W: torch.Tensor = init_func(
torch.empty([self.n_pair_features, self.n_hidden * self.n_hidden]))
self.b: torch.Tensor = torch.zeros((self.n_hidden * self.n_hidden,))
self.built: bool = True
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}(n_pair_features:{self.n_pair_features},n_hidden:{self.n_hidden},init:{self.init})'
)
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
"""
Parameters
----------
inputs: List[torch.Tensor]
The length of atom_to_pair should be same as n_pair_features.
Returns
-------
result: torch.Tensor
Tensor containing the mapping of the edge vector to a d × d matrix, where d denotes the dimension of the internal hidden representation of each node in the graph.
"""
pair_features: torch.Tensor
atom_features: torch.Tensor
atom_to_pair: torch.Tensor
pair_features, atom_features, atom_to_pair = inputs
A: torch.Tensor = torch.add(torch.matmul(pair_features, self.W), self.b)
A = torch.reshape(A, (-1, self.n_hidden, self.n_hidden))
out: torch.Tensor = torch.unsqueeze(atom_features[atom_to_pair[:, 1]],
dim=2)
out_squeeze: torch.Tensor = torch.squeeze(torch.matmul(A, out), dim=2)
ind: torch.Tensor = atom_to_pair[:, 0]
result: torch.Tensor = segment_sum(out_squeeze, ind)
return result
class WeaveLayer(nn.Module):
"""This class implements the core Weave convolution from the Google graph convolution paper [1]_
This is the Torch equivalent of the original implementation using Keras.
This model contains atom features and bond features
separately.Here, bond features are also called pair features.
There are 2 types of transformation, atom->atom, atom->pair, pair->atom, pair->pair that this model implements.
Examples
--------
This layer expects 4 inputs in a list of the form `[atom_features,
pair_features, pair_split, atom_to_pair]`. We'll walk through the structure of these inputs. Let's start with some basic definitions.
>>> import deepchem as dc
>>> import numpy as np
>>> smiles = ["CCC", "C"]
Note that there are 4 atoms in total in this system. This layer expects its input molecules to be batched together.
>>> total_n_atoms = 4
Let's suppose that we have a featurizer that computes `n_atom_feat` features per atom.
>>> n_atom_feat = 75
Then conceptually, `atom_feat` is the array of shape `(total_n_atoms,
n_atom_feat)` of atomic features. For simplicity, let's just go with a
random such matrix.
>>> atom_feat = np.random.rand(total_n_atoms, n_atom_feat)
Let's suppose we have `n_pair_feat` pairwise features
>>> n_pair_feat = 14
For each molecule, we compute a matrix of shape `(n_atoms*n_atoms,
n_pair_feat)` of pairwise features for each pair of atoms in the molecule.
Let's construct this conceptually for our example.
>>> pair_feat = [np.random.rand(3*3, n_pair_feat), np.random.rand(1*1,n_pair_feat)]
>>> pair_feat = np.concatenate(pair_feat, axis=0)
>>> pair_feat.shape
(10, 14)
`pair_split` is an index into `pair_feat` which tells us which atom each row belongs to. In our case, we hve
>>> pair_split = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3])
That is, the first 9 entries belong to "CCC" and the last entry to "C". The
final entry `atom_to_pair` goes in a little more in-depth than `pair_split`
and tells us the precise pair each pair feature belongs to. In our case
>>> atom_to_pair = np.array([[0, 0],
... [0, 1],
... [0, 2],
... [1, 0],
... [1, 1],
... [1, 2],
... [2, 0],
... [2, 1],
... [2, 2],
... [3, 3]])
Let's now define the actual layer
>>> layer = WeaveLayer()
And invoke it
>>> [A, P] = layer([atom_feat, pair_feat, pair_split, atom_to_pair])
The weave layer produces new atom/pair features. Let's check their shapes
>>> A = A.detach().numpy()
>>> A.shape
(4, 50)
>>> P = P.detach().numpy()
>>> P.shape
(10, 50)
The 4 is `total_num_atoms` and the 10 is the total number of pairs. Where
does `50` come from? It's from the default arguments `n_atom_input_feat` and
`n_pair_input_feat`.
References
----------
.. [1] <NAME>, et al. "Molecular graph convolutions: moving beyond
fingerprints." Journal of computer-aided molecular design 30.8 (2016):
595-608.
"""
def __init__(self,
n_atom_input_feat: int = 75,
n_pair_input_feat: int = 14,
n_atom_output_feat: int = 50,
n_pair_output_feat: int = 50,
n_hidden_AA: int = 50,
n_hidden_PA: int = 50,
n_hidden_AP: int = 50,
n_hidden_PP: int = 50,
update_pair: bool = True,
init_: str = 'xavier_uniform_',
activation: str = 'relu',
batch_normalize: bool = True,
**kwargs):
"""
Parameters
----------
n_atom_input_feat: int, optional (default 75)
Number of features for each atom in input.
n_pair_input_feat: int, optional (default 14)
Number of features for each pair of atoms in input.
n_atom_output_feat: int, optional (default 50)
Number of features for each atom in output.
n_pair_output_feat: int, optional (default 50)
Number of features for each pair of atoms in output.
n_hidden_AA: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
n_hidden_PA: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
n_hidden_AP: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
n_hidden_PP: int, optional (default 50)
Number of units(convolution depths) in corresponding hidden layer
update_pair: bool, optional (default True)
Whether to calculate for pair features,
could be turned off for last layer
init: str, optional (default 'xavier_uniform_')
Weight initialization for filters.
activation: str, optional (default 'relu')
Activation function applied
batch_normalize: bool, optional (default True)
If this is turned on, apply batch normalization before applying
activation functions on convolutional layers.
"""
super(WeaveLayer, self).__init__(**kwargs)
self.init: str = init_ # Set weight initialization
self.activation: str = activation # Get activations
self.activation_fn: torch.nn.Module = get_activation(activation)
self.update_pair: bool = update_pair # last weave layer does not need to update
self.n_hidden_AA: int = n_hidden_AA
self.n_hidden_PA: int = n_hidden_PA
self.n_hidden_AP: int = n_hidden_AP
self.n_hidden_PP: int = n_hidden_PP
self.n_hidden_A: int = n_hidden_AA + n_hidden_PA
self.n_hidden_P: int = n_hidden_AP + n_hidden_PP
self.batch_normalize: bool = batch_normalize
self.n_atom_input_feat: int = n_atom_input_feat
self.n_pair_input_feat: int = n_pair_input_feat
self.n_atom_output_feat: int = n_atom_output_feat
self.n_pair_output_feat: int = n_pair_output_feat
# Construct internal trainable weights
init = getattr(initializers, self.init)
# Weight matrix and bias matrix required to compute new atom layer from the previous atom layer
self.W_AA: torch.Tensor = init(
torch.empty(self.n_atom_input_feat, self.n_hidden_AA))
self.b_AA: torch.Tensor = torch.zeros((self.n_hidden_AA,))
self.AA_bn: nn.BatchNorm1d = nn.BatchNorm1d(
num_features=self.n_hidden_AA,
eps=1e-3,
momentum=0.99,
affine=True,
track_running_stats=True)
# Weight matrix and bias matrix required to compute new atom layer from the previous pair layer
self.W_PA: torch.Tensor = init(
torch.empty(self.n_pair_input_feat, self.n_hidden_PA))
self.b_PA: torch.Tensor = torch.zeros((self.n_hidden_PA,))
self.PA_bn: nn.BatchNorm1d = nn.BatchNorm1d(
num_features=self.n_hidden_PA,
eps=1e-3,
momentum=0.99,
affine=True,
track_running_stats=True)
self.W_A: torch.Tensor = init(
torch.empty(self.n_hidden_A, self.n_atom_output_feat))
self.b_A: torch.Tensor = torch.zeros((self.n_atom_output_feat,))
self.A_bn: nn.BatchNorm1d = nn.BatchNorm1d(
num_features=self.n_atom_output_feat,
eps=1e-3,
momentum=0.99,
affine=True,
track_running_stats=True)
if self.update_pair:
# Weight matrix and bias matrix required to compute new pair layer from the previous atom layer
self.W_AP: torch.Tensor = init(
torch.empty(self.n_atom_input_feat * 2, self.n_hidden_AP))
self.b_AP: torch.Tensor = torch.zeros((self.n_hidden_AP,))
self.AP_bn: nn.BatchNorm1d = nn.BatchNorm1d(
num_features=self.n_hidden_AP,
eps=1e-3,
momentum=0.99,
affine=True,
track_running_stats=True)
# Weight matrix and bias matrix required to compute new pair layer from the previous pair layer
self.W_PP: torch.Tensor = init(
torch.empty(self.n_pair_input_feat, self.n_hidden_PP))
self.b_PP: torch.Tensor = torch.zeros((self.n_hidden_PP,))
self.PP_bn: nn.BatchNorm1d = nn.BatchNorm1d(
num_features=self.n_hidden_PP,
eps=1e-3,
momentum=0.99,
affine=True,
track_running_stats=True)
self.W_P: torch.Tensor = init(
torch.empty(self.n_hidden_P, self.n_pair_output_feat))
self.b_P: torch.Tensor = torch.zeros((self.n_pair_output_feat,))
self.P_bn: nn.BatchNorm1d = nn.BatchNorm1d(
num_features=self.n_pair_output_feat,
eps=1e-3,
momentum=0.99,
affine=True,
track_running_stats=True)
self.built = True
def __repr__(self) -> str:
"""
Returns a string representation of the object.
Returns:
-------
str: A string that contains the class name followed by the values of its instance variable.
"""
# flake8: noqa
return (
f'{self.__class__.__name__}(n_atom_input_feat:{self.n_atom_input_feat},n_pair_input_feat:{self.n_pair_input_feat},n_atom_output_feat:{self.n_atom_output_feat},n_pair_output_feat:{self.n_pair_output_feat},n_hidden_AA:{self.n_hidden_AA},n_hidden_PA:{self.n_hidden_PA},n_hidden_AP:{self.n_hidden_AP},n_hidden_PP:{self.n_hidden_PP},batch_normalize:{self.batch_normalize},update_pair:{self.update_pair},init:{self.init},activation:{self.activation})'
)
def forward(
self, inputs: List[Union[np.ndarray, np.ndarray, np.ndarray,
np.ndarray]]
) -> List[Union[torch.Tensor, torch.Tensor]]:
"""
Creates weave tensors.
Parameters
----------
inputs: List[Union[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]
Should contain 4 tensors [atom_features, pair_features, pair_split,
atom_to_pair]
Returns:
-------
List[Union[torch.Tensor, torch.Tensor]]
A: Atom features tensor with shape[total_num_atoms,atom feature size]
P: Pair features tensor with shape[total num of pairs,bond feature size]
"""
# Converting the input to torch tensors
atom_features: torch.Tensor = torch.tensor(inputs[0])
pair_features: torch.Tensor = torch.tensor(inputs[1])
pair_split: torch.Tensor = torch.tensor(inputs[2])
atom_to_pair: torch.Tensor = torch.tensor(inputs[3])
activation = self.activation_fn
# AA is a tensor with shape[total_num_atoms,n_hidden_AA]
AA: torch.Tensor = torch.matmul(atom_features.type(torch.float32),
self.W_AA) + self.b_AA
if self.batch_normalize:
self.AA_bn.eval()
AA = self.AA_bn(AA)
AA = activation(AA)
# PA is a tensor with shape[total number of pairs,n_hidden_PA]
PA: torch.Tensor = torch.matmul(pair_features.type(torch.float32),
self.W_PA) + self.b_PA
if self.batch_normalize:
self.PA_bn.eval()
PA = self.PA_bn(PA)
PA = activation(PA)
# Split the PA tensor according to the 'pair_split' tensor
t_grp: Dict[Tensor, Tensor] = {}
idx: int = 0
for i, s_id in enumerate(pair_split):
s_id = s_id.item()
if s_id in t_grp:
t_grp[s_id] = t_grp[s_id] + PA[idx]
else:
t_grp[s_id] = PA[idx]
idx = i + 1
lst = list(t_grp.values())
tensor = torch.stack(lst)
PA = tensor
A: torch.Tensor = torch.matmul(torch.concat([AA, PA], 1),
self.W_A) + self.b_A
if self.batch_normalize:
self.A_bn.eval()
A = self.A_bn(A)
A = activation(A)
if self.update_pair:
# Note that AP_ij and AP_ji share the same self.AP_bn batch
# normalization
AP_ij: torch.Tensor = torch.matmul(
torch.reshape(atom_features[atom_to_pair],
[-1, 2 * self.n_atom_input_feat]).type(
torch.float32), self.W_AP) + self.b_AP
if self.batch_normalize:
self.AP_bn.eval()
AP_ij = self.AP_bn(AP_ij)
AP_ij = activation(AP_ij)
AP_ji: torch.Tensor = torch.matmul(
torch.reshape(atom_features[torch.flip(atom_to_pair, [1])],
[-1, 2 * self.n_atom_input_feat]).type(
torch.float32), self.W_AP) + self.b_AP
if self.batch_normalize:
self.AP_bn.eval()
AP_ji = self.AP_bn(AP_ji)
AP_ji = activation(AP_ji)
# PP is a tensor with shape [total number of pairs,n_hidden_PP]
PP: torch.Tensor = torch.matmul(pair_features.type(torch.float32),
self.W_PP) + self.b_PP
if self.batch_normalize:
self.PP_bn.eval()
PP = self.PP_bn(PP)
PP = activation(PP)
P: torch.Tensor = torch.matmul(
torch.concat([AP_ij + AP_ji, PP], 1).type(torch.float32),
self.W_P) + self.b_P
if self.batch_normalize:
self.P_bn.eval()
P = self.P_bn(P)
P = activation(P)
else:
P = pair_features
return [A, P]
class WeaveGather(nn.Module):
"""Implements the weave-gathering section of weave convolutions.
This is the Torch equivalent of the original implementation using Keras.
Implements the gathering layer from [1]_. The weave gathering layer gathers
per-atom features to create a molecule-level fingerprint in a weave
convolutional network. This layer can also performs Gaussian histogram
expansion as detailed in [1]_. Note that the gathering function here is
simply addition as in [1]_>
Examples
--------
This layer expects 2 inputs in a list of the form `[atom_features,
pair_features]`. We'll walk through the structure
of these inputs. Let's start with some basic definitions.
>>> import deepchem as dc
>>> import numpy as np
Suppose you have a batch of molecules
>>> smiles = ["CCC", "C"]
Note that there are 4 atoms in total in this system. This layer expects its
input molecules to be batched together.
>>> total_n_atoms = 4
Let's suppose that we have `n_atom_feat` features per atom.
>>> n_atom_feat = 75
Then conceptually, `atom_feat` is the array of shape `(total_n_atoms,
n_atom_feat)` of atomic features. For simplicity, let's just go with a
random such matrix.
>>> atom_feat = np.random.rand(total_n_atoms, n_atom_feat)
We then need to provide a mapping of indices to the atoms they belong to. In
ours case this would be
>>> atom_split = np.array([0, 0, 0, 1])
Let's now define the actual layer
>>> gather = WeaveGather(batch_size=2, n_input=n_atom_feat)
>>> output_molecules = gather([atom_feat, atom_split])
>>> len(output_molecules)
2
References
----------
.. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond
fingerprints." Journal of computer-aided molecular design 30.8 (2016):
595-608.
"""
def __init__(self,
batch_size: int,
n_input: int = 128,
gaussian_expand: bool = True,
compress_post_gaussian_expansion: bool = False,
init_: str = 'xavier_uniform_',
activation: str = 'tanh',
**kwargs):
"""
Parameters
----------
batch_size: int
number of molecules in a batch
n_input: int, optional (default 128)
number of features for each input molecule
gaussian_expand: boolean, optional (default True)
Whether to expand each dimension of atomic features by gaussian histogram
compress_post_gaussian_expansion: bool, optional (default False)
If True, compress the results of the Gaussian expansion back to the
original dimensions of the input by using a linear layer with specified
activation function. Note that this compression was not in the original
paper, but was present in the original DeepChem implementation so is
left present for backwards compatibility.
init: str, optional (default 'xavier_uniform_')
Weight initialization for filters if `compress_post_gaussian_expansion`
is True.
activation: str, optional (default 'tanh')
Activation function applied for filters if
`compress_post_gaussian_expansion` is True.
"""
super(WeaveGather, self).__init__(**kwargs)
self.n_input: int = n_input
self.batch_size: int = batch_size
self.gaussian_expand: bool = gaussian_expand
self.compress_post_gaussian_expansion: bool = compress_post_gaussian_expansion
self.init: str = init_ # Set weight initialization
self.activation: str = activation # Get activations
self.activation_fn: torch.nn.Module = get_activation(activation)
if self.compress_post_gaussian_expansion:
init = getattr(initializers, self.init)
self.W: torch.Tensor = init(
torch.empty([self.n_input * 11, self.n_input]))
self.b: torch.Tensor = torch.zeros((self.n_input,))
self.built = True
def __repr__(self):
"""
Returns a string representation of the object.
Returns:
-------
str: A string that contains the class name followed by the values of its instance variable.
"""
return (
f'{self.__class__.__name__}(batch_size:{self.batch_size},n_input:{self.n_input},gaussian_expand:{self.gaussian_expand},init:{self.init},activation:{self.activation},compress_post_gaussian_expansion:{self.compress_post_gaussian_expansion})'
)
def forward(self, inputs: List[Union[np.ndarray,
np.ndarray]]) -> torch.Tensor:
"""Creates weave tensors.
Parameters
----------
inputs: List[Union[np.ndarray,np.ndarray]]
Should contain 2 tensors [atom_features, atom_split]
Returns
-------
output_molecules: torch.Tensor
Each entry in this list is of shape `(self.n_inputs,)`
"""
outputs: torch.Tensor = torch.tensor(inputs[0])
atom_split: torch.Tensor = torch.tensor(inputs[1])
if self.gaussian_expand:
outputs = self.gaussian_histogram(outputs)
t_grp: Dict[Tensor, Tensor] = {}
idx: int = 0
for i, s_id in enumerate(atom_split):
s_id = s_id.item()
if s_id in t_grp:
t_grp[s_id] = t_grp[s_id] + outputs[idx]
else:
t_grp[s_id] = outputs[idx]
idx = i + 1
lst = list(t_grp.values())
tensor = torch.stack(lst)
output_molecules: torch.Tensor = tensor
if self.compress_post_gaussian_expansion:
output_molecules = torch.matmul(
output_molecules.type(torch.float32), self.W) + self.b
output_molecules = self.activation_fn(output_molecules)
return output_molecules
def gaussian_histogram(self, x: torch.Tensor) -> torch.Tensor:
"""Expands input into a set of gaussian histogram bins.
Parameters
----------
x: torch.Tensor
Of shape `(N, n_feat)`
Examples
--------
This method uses 11 bins spanning portions of a Gaussian with zero mean
and unit standard deviation.
>>> gaussian_memberships = [(-1.645, 0.283), (-1.080, 0.170),
... (-0.739, 0.134), (-0.468, 0.118),
... (-0.228, 0.114), (0., 0.114),
... (0.228, 0.114), (0.468, 0.118),
... (0.739, 0.134), (1.080, 0.170),
... (1.645, 0.283)]
We construct a Gaussian at `gaussian_memberships[i][0]` with standard
deviation `gaussian_memberships[i][1]`. Each feature in `x` is assigned
the probability of falling in each Gaussian, and probabilities are
normalized across the 11 different Gaussians.
Returns
-------
outputs: torch.Tensor
Of shape `(N, 11*n_feat)`
"""
import torch.distributions as dist
gaussian_memberships: List[Tuple[float, float]] = [(-1.645, 0.283),
(-1.080, 0.170),
(-0.739, 0.134),
(-0.468, 0.118),
(-0.228, 0.114),
(0., 0.114),
(0.228, 0.114),
(0.468, 0.118),
(0.739, 0.134),
(1.080, 0.170),
(1.645, 0.283)]
distributions: List[dist.Normal] = [
dist.Normal(torch.tensor(p[0]), torch.tensor(p[1]))
for p in gaussian_memberships
]
dist_max: List[torch.Tensor] = [
distributions[i].log_prob(torch.tensor(
gaussian_memberships[i][0])).exp() for i in range(11)
]
outputs: List[torch.Tensor] = [
distributions[i].log_prob(torch.tensor(x)).exp() / dist_max[i]
for i in range(11)
]
output: torch.Tensor = torch.stack(outputs, dim=2)
output = output / torch.sum(output, dim=2, keepdim=True)
output = output.view(-1, self.n_input * 11)
return output
class _MXMNetEnvelope(torch.nn.Module):
"""
A PyTorch module implementing an envelope function. This is a helper class for MXMNetSphericalBasisLayer and MXMNetBesselBasisLayer to be used in MXMNet Model.
The envelope function is defined as follows:
env(x) = 1 / x + a * x^e + b * x^(e+1) + c * x^(e+2) if x < 1
env(x) = 0 if x >= 1
where
'x' is the input tensor
'e' is the exponent parameter
'a' = -(e + 1) * (e + 2) / 2
'b' = e * (e + 2)
'c' = -e * (e + 1) / 2
Examples
--------
>>> env = _MXMNetEnvelope(exponent=2)
>>> input_tensor = torch.tensor([0.5, 1.0, 2.0, 3.0])
>>> output = env(input_tensor)
>>> output.shape
torch.Size([4])
"""
def __init__(self, exponent: int):
"""
Parameters
----------
exponent: float
The exponent 'e' used in the envelope function.
"""
super(_MXMNetEnvelope, self).__init__()
self.e: int = exponent
self.a: float = -(self.e + 1) * (self.e + 2) / 2
self.b: float = self.e * (self.e + 2)
self.c: float = -self.e * (self.e + 1) / 2
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Compute the envelope function for the input tensor 'x'.
Parameters
----------
x: torch.Tensor
The Input tensor
Returns
-------
output: torch.Tensor
The tensor containing the computed envelope values for each element of 'x'.
"""
e: int = self.e
a: float = self.a
b: float = self.b
c: float = self.c
x_pow_p0: torch.Tensor = x.pow(e)
x_pow_p1: torch.Tensor = x_pow_p0 * x
env_val: torch.Tensor = 1. / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p1 * x
zero: torch.Tensor = torch.zeros_like(x)
output: torch.Tensor = torch.where(x < 1, env_val, zero)
return output
class MXMNetGlobalMessagePassing(MessagePassing):
"""This class implements the Global Message Passing Layer from the Molecular Mechanics-Driven Graph Neural Network
with Multiplex Graph for Molecular Structures(MXMNet) paper [1]_.
This layer consists of two message passing steps and an update step between them.
Let:
- **x_i** : ``The node to be updated``
- **h_i** : ``The hidden state of x_i``
- **x_j** : ``The neighbour node connected to x_i by edge e_ij``
- **h_j** : ``The hidden state of x_j``
- **W** : ``The edge weights``
- **m_ij** : ``The message between x_i and x_j``
- **h_j (self_loop)** : ``The set of hidden states of atom features``
- **mlp** : ``MultilayerPerceptron``
- **res** : ``ResidualBlock``
**In each message passing step**
.. code-block:: python
m_ij = mlp1([h_i || h_j || e_ij])*(e_ij W)
**To handle self loops**
.. code-block:: python
m_ij = m_ij + h_j(self_loop)
**In each update step**
.. code-block:: python
hm_j = res1(sum(m_ij))
h_j_new = mlp2(hm_j) + h_j
h_j_new = res2(h_j_new)
h_j_new = res3(h_j_new)
.. note::
Message passing and message aggregation(sum) is handled by ``self.propagate()``.
References
----------
.. [1] Molecular Mechanics-Driven Graph Neural Network with Multiplex Graph for Molecular Structures. https://arxiv.org/pdf/2011.07457.pdf
Examples
--------
The provided example demonstrates how to use the GlobalMessagePassing layer by creating an instance, passing input tensors (node_features, edge_attributes, edge_indices) through it, and checking the shape of the output.
Initializes variables and creates a configuration dictionary with specific values.
>>> dim = 1
>>> node_features = torch.tensor([[0.8343], [1.2713], [1.2713], [1.2713], [1.2713]])
>>> edge_attributes = torch.tensor([[1.0004], [1.0004], [1.0005], [1.0004], [1.0004],[-0.2644], [-0.2644], [-0.2644], [1.0004],[-0.2644], [-0.2644], [-0.2644], [1.0005],[-0.2644], [-0.2644], [-0.2644], [1.0004],[-0.2644], [-0.2644], [-0.2644]])
>>> edge_indices = torch.tensor([[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],[1, 2, 3, 4, 0, 2, 3, 4, 0, 1, 3, 4, 0, 1, 2, 4, 0, 1, 2, 3]])
>>> out = MXMNetGlobalMessagePassing(dim)
>>> output = out(node_features, edge_attributes, edge_indices)
>>> output.shape
torch.Size([5, 1])
"""
def __init__(self, dim: int, activation_fn: Union[Callable, str] = 'silu'):
"""Initializes the MXMNETGlobalMessagePassing layer.
Parameters
-----------
dim: int
The dimension of the input and output features.
"""
super(MXMNetGlobalMessagePassing, self).__init__()
activation_fn = get_activation(activation_fn)
self.h_mlp: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim, d_output=dim, activation_fn=activation_fn)
self.res1: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn=activation_fn,
skip_connection=True,
weighted_skip=False)
self.res2: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn=activation_fn,
skip_connection=True,
weighted_skip=False)
self.res3: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn=activation_fn,
skip_connection=True,
weighted_skip=False)
self.mlp: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim, d_output=dim, activation_fn=activation_fn)
self.x_edge_mlp: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim * 3, d_output=dim, activation_fn=activation_fn)
self.linear: nn.Linear = nn.Linear(dim, dim, bias=False)
def forward(self, node_features: torch.Tensor,
edge_attributes: torch.Tensor,
edge_indices: torch.Tensor) -> torch.Tensor:
"""
Performs the forward pass of the GlobalMessagePassing layer.
Parameters
-----------
node_features: torch.Tensor
The input node features tensor of shape (num_nodes, feature_dim).
edge_attributes: torch.Tensor
The input edge attribute tensor of shape (num_edges, attribute_dim).
edge_indices: torch.Tensor
The input edge index tensor of shape (2, num_edges).
Returns
--------
torch.Tensor
The updated node features tensor after message passing of shape (num_nodes, feature_dim).
"""
edge_indices, _ = add_self_loops(edge_indices,
num_nodes=node_features.size(0))
residual_node_features: torch.Tensor = node_features
# Integrate the Cross Layer Mapping inside the Global Message Passing
node_features = self.h_mlp(node_features)
# Message Passing operation
node_features = self.propagate(edge_indices,
x=node_features,
num_nodes=node_features.size(0),
edge_attr=edge_attributes)
# Update function f_u
node_features = self.res1(node_features)
node_features = self.mlp(node_features) + residual_node_features
node_features = self.res2(node_features)
node_features = self.res3(node_features)
# Message Passing operation
node_features = self.propagate(edge_indices,
x=node_features,
num_nodes=node_features.size(0),
edge_attr=edge_attributes)
return node_features
def message(self, x_i: torch.Tensor, x_j: torch.Tensor,
edge_attr: torch.Tensor) -> torch.Tensor:
"""Constructs messages to be passed along the edges in the graph.
Parameters
-----------
x_i: torch.Tensor
The source node features tensor of shape (num_edges+num_nodes, feature_dim).
x_j: torch.Tensor
The target node features tensor of shape (num_edges+num_nodes, feature_dim).
edge_attributes: torch.Tensor
The edge attribute tensor of shape (num_edges, attribute_dim).
Returns
--------
torch.Tensor
The constructed messages tensor.
"""
num_edge: int = edge_attr.size()[0]
x_edge: torch.Tensor = torch.cat(
(x_i[:num_edge], x_j[:num_edge], edge_attr), -1)
x_edge = self.x_edge_mlp(x_edge)
x_j = torch.cat((self.linear(edge_attr) * x_edge, x_j[num_edge:]),
dim=0)
return x_j
class MXMNetBesselBasisLayer(torch.nn.Module):
"""This layer implements a basis layer for the MXMNet model using Bessel functions.
The basis layer is used to model radial symmetry in molecular systems.
The output of the layer is given by:
output = envelope(dist / cutoff) * (freq * dist / cutoff).sin()
Examples
--------
>>> radial_layer = MXMNetBesselBasisLayer(num_radial=2, cutoff=2.0, envelope_exponent=2)
>>> distances = torch.tensor([0.5, 1.0, 2.0, 3.0])
>>> output = radial_layer(distances)
>>> output.shape
torch.Size([4, 2])
"""
def __init__(self,
num_radial: int,
cutoff: float = 5.0,
envelope_exponent: int = 5):
"""Initialize the MXMNet Bessel Basis Layer.
Parameters
----------
num_radial: int
The number of radial basis functions to use.
cutoff: float, optional (default 5.0)
The radial cutoff distance used to scale the distances.
envelope_exponent: int, optional (default 5)
The exponent of the envelope function.
"""
super(MXMNetBesselBasisLayer, self).__init__()
self.cutoff = cutoff
self.envelope: _MXMNetEnvelope = _MXMNetEnvelope(envelope_exponent)
self.freq: torch.Tensor = torch.nn.Parameter(torch.empty(num_radial))
self.reset_parameters()
def reset_parameters(self):
"""Reset and initialize the learnable parameters of the MXMNet Bessel Basis Layer.
The 'freq' tensor, representing the frequencies of the Bessel functions, is set up with initial values proportional to π (PI) and becomes a learnable parameter.
The 'freq' tensor will be updated during the training process to optimize the performance of the MXMNet model for the specific task it is being trained on.
"""
with torch.no_grad():
torch.arange(1, self.freq.numel() + 1, out=self.freq).mul_(PI)
self.freq.requires_grad_()
def forward(self, dist: torch.Tensor) -> torch.Tensor:
"""Compute the output of the MXMNet Bessel Basis Layer.
Parameters
----------
dist: torch.Tensor
The input tensor representing the pairwise distances between atoms.
Returns
-------
output: torch.Tensor
The output tensor representing the radial basis functions applied to the input distances.
"""
dist = dist.unsqueeze(-1) / self.cutoff
output: torch.Tensor = self.envelope(dist) * (self.freq * dist).sin()
return output
class EncoderRNN(nn.Module):
"""Encoder Layer for SeqToSeq Model.
It takes input sequences and converts them into a fixed-size context vector
called the "embedding". This vector contains all relevant information from
the input sequence. This context vector is then used by the decoder to
generate the output sequence and can also be used as a representation of the
input sequence for other Models.
Examples
--------
>>> from deepchem.models.torch_models.layers import EncoderRNN
>>> import torch
>>> embedding_dimensions = 7
>>> num_input_token = 4
>>> input = torch.tensor([[1, 0, 2, 3, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
>>> layer = EncoderRNN(num_input_token, embedding_dimensions)
>>> emb, hidden = layer(input)
>>> emb.shape
torch.Size([3, 5, 7])
References
----------
.. [1] Sutskever et al., "Sequence to Sequence Learning with Neural Networks"
"""
def __init__(self,
input_size: int,
hidden_size: int,
dropout_p: float = 0.1,
**kwargs):
"""Initialize the EncoderRNN layer.
Parameters
----------
input_size: int
The number of expected features.
hidden_size: int
The number of features in the hidden state.
dropout_p: float (default 0.1)
The dropout probability to use during training.
"""
super(EncoderRNN, self).__init__(**kwargs)
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)
self.dropout = nn.Dropout(dropout_p)
def __repr__(self) -> str:
"""Returns a string representing the configuration of the layer.
Returns
-------
input_size: int
Number of expected features.
hidden_size: int
Number of features in the hidden state.
dropout_p: float (default 0.1)
Dropout probability to use during training.
"""
return f'{self.__class__.__name__}(input_size={self.input_size}, hidden_size={self.hidden_size}, dropout_p={self.dropout_p})'
def forward(self, input: torch.Tensor):
"""Returns Embeddings according to provided sequences.
Parameters
----------
input: torch.Tensor
Batch of input sequences.
Returns
-------
output: torch.Tensor
Batch of Embeddings.
hidden: torch.Tensor
Batch of hidden states.
"""
embedded = self.dropout(self.embedding(input))
output, hidden = self.gru(embedded)
return output, hidden
class FerminetElectronFeature(torch.nn.Module):
"""
A Pytorch Module implementing the ferminet's electron features interaction layer _[1]. This is a helper class for the Ferminet model.
The layer consists of 2 types of linear layers - v for the one elctron features and w for the two electron features. The number and dimensions
of each layer depends on the number of atoms and electrons in the molecule system.
References
----------
.. [1] Spencer, <NAME>., et al. Better, Faster Fermionic Neural Networks. arXiv:2011.07125, arXiv, 13 Nov. 2020. arXiv.org, http://arxiv.org/abs/2011.07125.
Examples
--------
>>> electron_layer = dc.models.torch_models.layers.FerminetElectronFeature([32,32,32],[16,16,16], 4, 8, 10, [5,5])
>>> one_electron_test = torch.randn(8, 10, 4*4)
>>> two_electron_test = torch.randn(8, 10, 10, 4)
>>> one, two = electron_layer.forward(one_electron_test, two_electron_test)
>>> one.size()
torch.Size([8, 10, 32])
>>> two.size()
torch.Size([8, 10, 10, 16])
"""
def __init__(self, n_one: List[int], n_two: List[int], no_of_atoms: int,
batch_size: int, total_electron: int, spin: List[int]):
"""
Parameters
----------
n_one: List[int]
List of integer values containing the dimensions of each n_one layer's output
n_two: List[int]
List of integer values containing the dimensions of each n_one layer's output
no_of_atoms: int:
Value containing the number of atoms in the molecule system
batch_size: int
Value containing the number of batches for the input provided
total_electron: int
Value containing the total number of electrons in the molecule system
spin: List[int]
List data structure in the format of [number of up-spin electrons, number of down-spin electrons]
v: torch.nn.ModuleList
torch ModuleList containing the linear layer with the n_one layer's dimension size.
w: torch.nn.ModuleList
torch ModuleList containing the linear layer with the n_two layer's dimension size.
layer_size: int
Value containing the number of n_one and n_two layers
"""
super(FerminetElectronFeature, self).__init__()
self.n_one = n_one
self.n_two = n_two
self.no_of_atoms = no_of_atoms
self.batch_size = batch_size
self.total_electron = total_electron
self.spin = spin
self.v: torch.nn.ModuleList = torch.nn.ModuleList()
self.w: torch.nn.ModuleList = torch.nn.ModuleList()
self.layer_size: int = len(self.n_one)
# Initializing the first layer (first layer has different dims than others)
self.v.append(
nn.Linear(8 + 3 * 4 * self.no_of_atoms, self.n_one[0], bias=True))
#filling the weights with 1e-9 for faster convergence
self.v[0].weight.data.fill_(1e-9)
self.v[0].bias.data.fill_(1e-9)
self.w.append(nn.Linear(4, self.n_two[0], bias=True))
self.w[0].weight.data.fill_(1e-9)
self.w[0].bias.data.fill_(1e-9)
for i in range(1, self.layer_size):
self.v.append(
nn.Linear(3 * self.n_one[i - 1] + 2 * self.n_two[i - 1],
n_one[i],
bias=True))
self.v[i].weight.data.fill_(1e-9)
self.v[i].bias.data.fill_(1e-9)
self.w.append(nn.Linear(self.n_two[i - 1], self.n_two[i],
bias=True))
self.w[i].weight.data.fill_(1e-9)
self.w[i].bias.data.fill_(1e-9)
def forward(self, one_electron: torch.Tensor, two_electron: torch.Tensor):
"""
Parameters
----------
one_electron: torch.Tensor
The one electron feature which has the shape (batch_size, number of electrons, number of atoms * 4). Here the last dimension contains
the electron's distance from each of the atom as a vector concatenated with norm of that vector.
two_electron: torch.Tensor
The two electron feature which has the shape (batch_size, number of electrons, number of electron , 4). Here the last dimension contains
the electron's distance from the other electrons as a vector concatenated with norm of that vector.
Returns
-------
one_electron: torch.Tensor
The one electron feature after passing through the layer which has the shape (batch_size, number of electrons, n_one shape).
two_electron: torch.Tensor
The two electron feature after passing through the layer which has the shape (batch_size, number of electrons, number of electron , n_two shape).
"""
for l in range(self.layer_size):
# Calculating one-electron feature's average
g_one_up: torch.Tensor = torch.mean(
one_electron[:, :self.spin[0], :], dim=-2)
g_one_down: torch.Tensor = torch.mean(
one_electron[:, self.spin[0]:, :], dim=-2)
one_electron_tmp: torch.Tensor = torch.zeros(
self.batch_size, self.total_electron, self.n_one[l])
two_electron_tmp: torch.Tensor = torch.zeros(
self.batch_size, self.total_electron, self.total_electron,
self.n_two[l])
for i in range(self.total_electron):
# Calculating two-electron feature's average
g_two_up: torch.Tensor = torch.mean(
two_electron[:, i, :self.spin[0], :], dim=1)
g_two_down: torch.Tensor = torch.mean(
two_electron[:, i, self.spin[0]:, :], dim=1)
f: torch.Tensor = torch.cat((one_electron[:, i, :], g_one_up,
g_one_down, g_two_up, g_two_down),
dim=1)
if l == 0 or (self.n_one[l] != self.n_one[l - 1]) or (
self.n_two[l] != self.n_two[l - 1]):
one_electron_tmp[:, i, :] = torch.tanh(self.v[l](f.to(
torch.float32)))
two_electron_tmp[:, i, :, :] = torch.tanh(self.w[l](
two_electron[:, i, :, :].to(torch.float32)))
else:
one_electron_tmp[:, i, :] = torch.tanh(self.v[l](f.to(
torch.float32))) + one_electron[:, i, :].to(
torch.float32)
two_electron_tmp[:, i, :, :] = torch.tanh(self.w[l](
two_electron[:, i, :, :].to(
torch.float32))) + two_electron[:, i, :].to(
torch.float32)
one_electron = one_electron_tmp
two_electron = two_electron_tmp
return one_electron, two_electron
class FerminetEnvelope(torch.nn.Module):
"""
A Pytorch Module implementing the ferminet's envlope layer [1]_, which is used to calculate the spin up and spin down orbital values.
This is a helper class for the Ferminet model.
The layer consists of 4 types of parameter lists - envelope_w, envelope_g, sigma and pi, which helps to calculate the orbital vlaues.
References
----------
.. [1] Spencer, <NAME>., et al. Better, Faster Fermionic Neural Networks. arXiv:2011.07125, arXiv, 13 Nov. 2020. arXiv.org, http://arxiv.org/abs/2011.07125.
Examples
--------
>>> envelope_layer = dc.models.torch_models.layers.FerminetEnvelope([32, 32, 32], [16, 16, 16], 10, 8, [5, 5], 5, 16)
>>> one_electron = torch.randn(8, 10, 32)
>>> one_electron_permuted = torch.randn(8, 10, 5, 3)
>>> psi_up, psi_down = envelope_layer.forward(one_electron, one_electron_permuted)
>>> psi_up.size()
torch.Size([8, 16, 5, 5])
>>> two.size()
torch.Size([8, 16, 5, 5])
"""
def __init__(self, n_one: List[int], n_two: List[int], total_electron: int,
batch_size: int, spin: List[int], no_of_atoms: int,
determinant: int):
"""
Parameters
----------
n_one: List[int]
List of integer values containing the dimensions of each n_one layer's output
n_two: List[int]
List of integer values containing the dimensions of each n_one layer's output
total_electron: int
Value containing the total number of electrons in the molecule system
batch_size: int
Value containing the number of batches for the input provided
spin: List[int]
List data structure in the format of [number of up-spin electrons, number of down-spin electrons]
no_of_atoms: int
Value containing the number of atoms in the molecule system
determinant: int
The number of determinants to be incorporated in the post-HF solution.
envelope_w: torch.nn.ParameterList
torch ParameterList containing the torch Tensor with n_one layer's dimension size.
envelope_g: torch.nn.ParameterList
torch ParameterList containing the torch Tensor with the unit dimension size, which acts as bias.
sigma: torch.nn.ParameterList
torch ParameterList containing the torch Tensor with the unit dimension size.
pi: torch.nn.ParameterList
torch ParameterList containing the linear layer with the n_two layer's dimension size.
layer_size: int
Value containing the number of n_one and n_two layers
"""
super(FerminetEnvelope, self).__init__()
self.n_one = n_one
self.n_two = n_two
self.total_electron = total_electron
self.batch_size = batch_size
self.spin = spin
self.no_of_atoms = no_of_atoms
self.determinant = determinant
self.layer_size: int = len(self.n_one)
self.envelope_w = torch.nn.ParameterList()
self.envelope_g = torch.nn.ParameterList()
self.sigma = torch.nn.ParameterList()
self.pi = torch.nn.ParameterList()
for i in range(self.determinant):
for j in range(self.total_electron):
self.envelope_w.append(
torch.nn.init.uniform(torch.empty(n_one[-1], 1),
b=0.00001).squeeze(-1))
self.envelope_g.append(
torch.nn.init.uniform(torch.empty(1),
b=0.000001).squeeze(0))
for k in range(self.no_of_atoms):
self.sigma.append(
torch.nn.init.uniform(torch.empty(self.no_of_atoms, 1),
b=0.000001).squeeze(0))
self.pi.append(
torch.nn.init.uniform(torch.empty(self.no_of_atoms, 1),
b=0.00001).squeeze(0))
def forward(self, one_electron: torch.Tensor,
one_electron_vector_permuted: torch.Tensor):
"""
Parameters
----------
one_electron: torch.Tensor
Torch tensor which is output from FerminElectronFeature layer in the shape of (batch_size, number of elctrons, n_one layer size).
one_electron_vector_permuted: torch.Tensor
Torch tensor which is shape permuted vector of the original one_electron vector tensor. shape of the tensor should be (batch_size, number of atoms, number of electrons, 3).
Returns
-------
psi_up: torch.Tensor
Torch tensor with up spin electron values in a the shape of (batch_size, determinant, up_spin, up_spin)
psi_down: torch.Tensor
Torch tensor with down spin electron values in a the shape of (batch_size, determinant, down_spin, down_spin)
"""
psi_up = torch.zeros(self.batch_size, self.determinant, self.spin[0],
self.spin[0])
psi_down = torch.zeros(self.batch_size, self.determinant, self.spin[1],
self.spin[1])
for k in range(self.determinant):
for i in range(self.spin[0]):
one_d_index = (k * (self.total_electron)) + i
for j in range(self.spin[0]):
psi_up[:, k, i, j] = (torch.sum(
(self.envelope_w[one_d_index] * one_electron[:, j, :]) +
self.envelope_g[one_d_index],
dim=1)) * torch.sum(torch.exp(-torch.abs(
torch.norm(self.sigma[one_d_index] *
one_electron_vector_permuted[:, j, :, :],
dim=2))) * self.pi[one_d_index].T,
dim=1)
for i in range(self.spin[0], self.spin[0] + self.spin[1]):
one_d_index = (k * (self.total_electron)) + i
for j in range(self.spin[0], self.spin[0] + self.spin[1]):
psi_down[:, k, i - self.spin[0], j - self.spin[0]] = (
torch.sum((self.envelope_w[one_d_index] *
one_electron[:, j, :]) +
self.envelope_g[one_d_index],
dim=1)
) * torch.sum(torch.exp(-torch.abs(
torch.norm(self.sigma[one_d_index] *
one_electron_vector_permuted[:, j, :, :],
dim=2))) * self.pi[one_d_index].T,
dim=1)
return psi_up, psi_down
class MXMNetLocalMessagePassing(nn.Module):
"""
The MXMNetLocalMessagePassing class defines a local message passing layer used in the MXMNet model [1]_.
This layer integrates cross-layer mappings inside the local message passing, allowing for the transformation
of input tensors representing pairwise distances and angles between atoms in a molecular system.
The layer aggregates information using message passing and updates atom representations accordingly.
The 3-step message passing scheme is proposed in the paper [1]_.
1. Step 1 contains Message Passing 1 that captures the two-hop angles and related pairwise distances to update edge-level embeddings {mji}.
2. Step 2 contains Message Passing 2 that captures the one-hop angles and related pairwise distances to further update {mji}.
3. Step 3 finally aggregates {mji} to update the node-level embedding hi.
These steps in the t-th iteration can be formulated as follows:
Let:
- **mlp** : ``MultilayerPerceptron``
- **res** : ``ResidualBlock``
- **h** : ``node_features``
- **m** : ``message with radial basis function``
- **idx_kj**: ``Tensor containing indices for the k and j atoms``
- **x_i** : ``The node to be updated``
- **h_i** : ``The hidden state of x_i``
- **x_j** : ``The neighbour node connected to x_i by edge e_ij``
- **h_j** : ``The hidden state of x_j``
- **rbf** : ``Input tensor representing radial basis functions``
- **sbf** : ``Input tensor representing the spherical basis functions``
- **idx_jj** : ``Tensor containing indices for the j and j' where j' is other neighbours of i``
Step 1: Message Passing 1
.. code-block:: python
m = [h[i] || h[j] || rbf]
m_kj = mlp_kj(m[idx_kj]) * (rbf*W) * mlp_sbf1(sbf1)
m_ji = mlp_ji_1(m) + reduce_sum(m_kj)
Step 2: Message Passing 2
.. code-block:: python
m_ji = mlp_jj(m_ji[idx_jj]) * (rbf*W) * mlp_sbf2(sbf2)
m_ji = mlp_ji_2(m_ji) + reduce_sum(m_ji)
Step 3: Aggregation and Update
**In each aggregation step**
.. code-block:: python
m = reduce_sum(m_ji*(rbf*W))
**In each update step**
.. code-block:: python
hm_i = res1(m)
h_i_new = mlp2(hm_i) + h_i
h_i_new = res2(h_i_new)
h_i_new = res3(h_i_new)
References
----------
.. [1] Molecular Mechanics-Driven Graph Neural Network with Multiplex Graph for Molecular Structures. https://arxiv.org/pdf/2011.07457
Examples
--------
>>> dim = 1
>>> h = torch.tensor([[0.8343], [1.2713], [1.2713], [1.2713], [1.2713]])
>>> rbf = torch.tensor([[-0.2628], [-0.2628], [-0.2628], [-0.2628],
... [-0.2629], [-0.2629], [-0.2628], [-0.2628]])
>>> sbf1 = torch.tensor([[-0.2767], [-0.2767], [-0.2767], [-0.2767],
... [-0.2767], [-0.2767], [-0.2767], [-0.2767],
... [-0.2767], [-0.2767], [-0.2767], [-0.2767]])
>>> sbf2 = torch.tensor([[-0.0301], [-0.0301], [-0.1483], [-0.1486], [-0.1484],
... [-0.0301], [-0.1483], [-0.0301], [-0.1485], [-0.1483],
... [-0.0301], [-0.1486], [-0.1485], [-0.0301], [-0.1486],
... [-0.0301], [-0.1484], [-0.1483], [-0.1486], [-0.0301]])
>>> idx_kj = torch.tensor([3, 5, 7, 1, 5, 7, 1, 3, 7, 1, 3, 5])
>>> idx_ji_1 = torch.tensor([0, 0, 0, 2, 2, 2, 4, 4, 4, 6, 6, 6])
>>> idx_jj = torch.tensor([0, 1, 3, 5, 7, 2, 1, 3, 5, 7, 4, 1, 3, 5, 7, 6, 1, 3, 5, 7])
>>> idx_ji_2 = torch.tensor([0, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 7, 7, 7, 7])
>>> edge_index = torch.tensor([[0, 1, 0, 2, 0, 3, 0, 4],
... [1, 0, 2, 0, 3, 0, 4, 0]])
>>> out = MXMNetLocalMessagePassing(dim, activation_fn='silu')
>>> output = out(h,
... rbf,
... sbf1,
... sbf2,
... idx_kj,
... idx_ji_1,
... idx_jj,
... idx_ji_2,
... edge_index)
>>> output[0].shape
torch.Size([5, 1])
>>> output[1].shape
torch.Size([5, 1])
"""
def __init__(self, dim: int, activation_fn: Union[Callable, str] = 'silu'):
"""Initializes the MXMNetLocalMessagePassing layer.
Parameters
----------
dim : int
The dimension of the input and output tensors for the local message passing layer.
activation_fn : Union[Callable, str], optional (default: 'silu')
The activation function to be used in the multilayer perceptrons (MLPs) within the layer.
"""
super(MXMNetLocalMessagePassing, self).__init__()
activation_fn = get_activation(activation_fn)
self.h_mlp: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim, d_output=dim, activation_fn=activation_fn)
self.mlp_kj: MultilayerPerceptron = MultilayerPerceptron(
d_input=3 * dim, d_output=dim, activation_fn=activation_fn)
self.mlp_ji_1: MultilayerPerceptron = MultilayerPerceptron(
d_input=3 * dim, d_output=dim, activation_fn=activation_fn)
self.mlp_ji_2: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim, d_output=dim, activation_fn=activation_fn)
self.mlp_jj: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim, d_output=dim, activation_fn=activation_fn)
self.mlp_sbf1: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn=activation_fn)
self.mlp_sbf2: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn=activation_fn)
self.res1: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn=activation_fn,
skip_connection=True,
weighted_skip=False)
self.res2: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn=activation_fn,
skip_connection=True,
weighted_skip=False)
self.res3: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim,),
d_output=dim,
activation_fn=activation_fn,
skip_connection=True,
weighted_skip=False)
self.lin_rbf1: nn.Linear = nn.Linear(dim, dim, bias=False)
self.lin_rbf2: nn.Linear = nn.Linear(dim, dim, bias=False)
self.lin_rbf_out: nn.Linear = nn.Linear(dim, dim, bias=False)
self.mlp: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim, d_output=dim, activation_fn=activation_fn)
self.out_mlp: MultilayerPerceptron = MultilayerPerceptron(
d_input=dim,
d_hidden=(dim, dim),
d_output=dim,
activation_fn=activation_fn)
self.out_W: nn.Linear = nn.Linear(dim, 1)
def forward(self, node_features: torch.Tensor, rbf: torch.Tensor,
sbf1: torch.Tensor, sbf2: torch.Tensor, idx_kj: torch.Tensor,
idx_ji_1: torch.Tensor, idx_jj: torch.Tensor,
idx_ji_2: torch.Tensor,
edge_index: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""The forward method performs the computation for the MXMNetLocalMessagePassing Layer.
This method processes the input tensors representing atom features, radial basis functions (RBF), and spherical basis functions (SBF) using message passing over the molecular graph. The message passing updates the atom representations, and the resulting tensor represents the updated atom feature after local message passing.
Parameters
----------
node_features : torch.Tensor
Input tensor representing atom features.
rbf : torch.Tensor
Input tensor representing radial basis functions.
sbf1 : torch.Tensor
Input tensor representing the first set of spherical basis functions.
sbf2 : torch.Tensor
Input tensor representing the second set of spherical basis functions.
idx_kj : torch.Tensor
Tensor containing indices for the k and j atoms involved in each interaction.
idx_ji_1 : torch.Tensor
Tensor containing indices for the j and i atoms involved in the first message passing step.
idx_jj : torch.Tensor
Tensor containing indices for the j and j' atoms involved in the second message passing step.
idx_ji_2 : torch.Tensor
Tensor containing indices for the j and i atoms involved in the second message passing step.
edge_index : torch.Tensor
Tensor containing the edge indices of the molecular graph, with shape (2, M), where M is the number of edges.
Returns
-------
node_features: torch.Tensor
Updated atom representations after local message passing.
output: torch.Tensor
Output tensor representing a fixed-size representation, with shape (N, 1).
"""
residual_node_features: torch.Tensor = node_features
# Integrate the Cross Layer Mapping inside the Local Message Passing
node_features = self.h_mlp(node_features)
# Message Passing 1
j, i = edge_index
m: torch.Tensor = torch.cat([node_features[i], node_features[j], rbf],
dim=-1)
m_kj: torch.Tensor = self.mlp_kj(m)
m_kj = m_kj * self.lin_rbf1(rbf)
m_kj = m_kj[idx_kj] * self.mlp_sbf1(sbf1)
m_kj = scatter(m_kj, idx_ji_1, dim=0, dim_size=m.size(0), reduce='add')
m_ji_1: torch.Tensor = self.mlp_ji_1(m)
m = m_ji_1 + m_kj
# Message Passing 2
m_jj: torch.Tensor = self.mlp_jj(m)
m_jj = m_jj * self.lin_rbf2(rbf)
m_jj = m_jj[idx_jj] * self.mlp_sbf2(sbf2)
m_jj = scatter(m_jj, idx_ji_2, dim=0, dim_size=m.size(0), reduce='add')
m_ji_2: torch.Tensor = self.mlp_ji_2(m)
m = m_ji_2 + m_jj
# Aggregation
m = self.lin_rbf_out(rbf) * m
node_features = scatter(m,
i,
dim=0,
dim_size=node_features.size(0),
reduce='add')
# Update function f_u
node_features = self.res1(node_features)
node_features = self.mlp(node_features) + residual_node_features
node_features = self.res2(node_features)
node_features = self.res3(node_features)
# Output Module
out: torch.Tensor = self.out_mlp(node_features)
output: torch.Tensor = self.out_W(out)
return node_features, output
<file_sep>"""
Tests for Docking
"""
import os
import platform
import unittest
import pytest
import logging
import numpy as np
import deepchem as dc
from deepchem.feat import ComplexFeaturizer
from deepchem.models import Model
from deepchem.dock.pose_generation import PoseGenerator
IS_WINDOWS = platform.system() == 'Windows'
class TestDocking(unittest.TestCase):
"""Does sanity checks on pose generation."""
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(current_dir, "1jld_protein.pdb")
self.ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
@pytest.mark.slow
def test_docker_init(self):
"""Test that Docker can be initialized."""
vpg = dc.dock.VinaPoseGenerator()
dc.dock.Docker(vpg)
@unittest.skipIf(IS_WINDOWS, "vina is not supported in windows")
@pytest.mark.slow
def test_docker_dock(self):
"""Test that Docker can dock."""
# We provide no scoring model so the docker won't score
vpg = dc.dock.VinaPoseGenerator()
docker = dc.dock.Docker(vpg)
docked_outputs = docker.dock((self.protein_file, self.ligand_file),
exhaustiveness=1,
num_modes=1,
out_dir="/tmp")
# Check only one output since num_modes==1
assert len(list(docked_outputs)) == 1
@unittest.skipIf(IS_WINDOWS, "vina is not supported in windows")
@pytest.mark.slow
def test_docker_pose_generator_scores(self):
"""Test that Docker can get scores from pose_generator."""
# We provide no scoring model so the docker won't score
vpg = dc.dock.VinaPoseGenerator()
docker = dc.dock.Docker(vpg)
docked_outputs = docker.dock((self.protein_file, self.ligand_file),
exhaustiveness=1,
num_modes=1,
out_dir="/tmp",
use_pose_generator_scores=True)
# Check only one output since num_modes==1
docked_outputs = list(docked_outputs)
assert len(docked_outputs) == 1
assert len(docked_outputs[0]) == 2
@unittest.skipIf(IS_WINDOWS, "vina is not supported in windows")
@pytest.mark.slow
def test_docker_specified_pocket(self):
"""Test that Docker can dock into spec. pocket."""
# Let's turn on logging since this test will run for a while
logging.basicConfig(level=logging.INFO)
vpg = dc.dock.VinaPoseGenerator()
docker = dc.dock.Docker(vpg)
docked_outputs = docker.dock((self.protein_file, self.ligand_file),
centroid=(10, 10, 10),
box_dims=(10, 10, 10),
exhaustiveness=1,
num_modes=1,
out_dir="/tmp")
# Check returned files exist
assert len(list(docked_outputs)) == 1
@unittest.skipIf(IS_WINDOWS, "vina is not supported in windows")
@pytest.mark.slow
def test_pocket_docker_dock(self):
"""Test that Docker can find pockets and dock dock."""
# Let's turn on logging since this test will run for a while
logging.basicConfig(level=logging.INFO)
pocket_finder = dc.dock.ConvexHullPocketFinder()
vpg = dc.dock.VinaPoseGenerator(pocket_finder=pocket_finder)
docker = dc.dock.Docker(vpg)
docked_outputs = docker.dock((self.protein_file, self.ligand_file),
exhaustiveness=1,
num_modes=1,
num_pockets=1,
out_dir="/tmp")
# Check returned files exist
assert len(list(docked_outputs)) == 1
@pytest.mark.slow
def test_scoring_model_and_featurizer(self):
"""Test that scoring model and featurizer are invoked correctly."""
class DummyFeaturizer(ComplexFeaturizer):
def featurize(self, complexes, *args, **kwargs):
return np.zeros((len(complexes), 5))
class DummyModel(Model):
def predict(self, dataset, *args, **kwargs):
return np.zeros(len(dataset))
class DummyPoseGenerator(PoseGenerator):
def generate_poses(self, *args, **kwargs):
return [None]
featurizer = DummyFeaturizer()
scoring_model = DummyModel()
pose_generator = DummyPoseGenerator()
docker = dc.dock.Docker(pose_generator, featurizer, scoring_model)
outputs = docker.dock(None)
assert list(outputs) == [(None, np.array([0.]))]
<file_sep># Hyperparameter Optimization
In this folder we provide examples of performing hyperparameter optimization
with DeepChem.
<file_sep>import unittest
import pytest
import tempfile
import deepchem as dc
import numpy as np
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class TestCallbacks(unittest.TestCase):
@pytest.mark.torch
def test_validation(self):
"""Test ValidationCallback."""
tasks, datasets, transformers = dc.molnet.load_clintox()
train_dataset, valid_dataset, test_dataset = datasets
n_features = 1024
model = dc.models.MultitaskClassifier(len(tasks),
n_features,
dropouts=0.5)
# Train the model while logging the validation ROC AUC.
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
log = StringIO()
save_dir = tempfile.mkdtemp()
callback = dc.models.ValidationCallback(valid_dataset,
30, [metric],
log,
save_dir=save_dir,
save_on_minimum=False,
transformers=transformers)
model.fit(train_dataset, callbacks=callback)
# Parse the log to pull out the AUC scores.
log.seek(0)
scores = []
for line in log:
score = float(line.split('=')[-1])
scores.append(score)
# The last reported score should match the current performance of the model.
valid_score = model.evaluate(valid_dataset, [metric], transformers)
self.assertAlmostEqual(valid_score['mean-roc_auc_score'],
scores[-1],
places=5)
# The highest recorded score should match get_best_score().
self.assertAlmostEqual(max(scores), callback.get_best_score(), places=5)
# Reload the save model and confirm that it matches the best logged score.
model.restore(model_dir=save_dir)
valid_score = model.evaluate(valid_dataset, [metric], transformers)
self.assertAlmostEqual(valid_score['mean-roc_auc_score'],
max(scores),
places=5)
# Make sure get_best_score() still works when save_dir is not specified
callback = dc.models.ValidationCallback(valid_dataset,
30, [metric],
log,
save_on_minimum=False,
transformers=transformers)
model.fit(train_dataset, callbacks=callback)
log.seek(0)
scores = []
for line in log:
score = float(line.split('=')[-1])
scores.append(score)
self.assertTrue(abs(max(scores) - callback.get_best_score()) < 0.05)
<file_sep>import unittest
import numpy as np
import pytest
from deepchem.feat.graph_data import BatchGraphData, GraphData, shortest_path_length
class TestGraph(unittest.TestCase):
@pytest.mark.torch
def test_graph_data(self):
num_nodes, num_node_features = 5, 32
num_edges, num_edge_features = 6, 32
node_features = np.random.random_sample((num_nodes, num_node_features))
edge_features = np.random.random_sample((num_edges, num_edge_features))
edge_index = np.array([
[0, 1, 2, 2, 3, 4],
[1, 2, 0, 3, 4, 0],
])
node_pos_features = None
# z is kwargs
z = np.random.random(5)
graph = GraphData(node_features=node_features,
edge_index=edge_index,
edge_features=edge_features,
node_pos_features=node_pos_features,
z=z)
assert graph.num_nodes == num_nodes
assert graph.num_node_features == num_node_features
assert graph.num_edges == num_edges
assert graph.num_edge_features == num_edge_features
assert graph.z.shape == z.shape
assert str(
graph
) == 'GraphData(node_features=[5, 32], edge_index=[2, 6], edge_features=[6, 32], z=[5])'
# check convert function
pyg_graph = graph.to_pyg_graph()
from torch_geometric.data import Data
assert isinstance(pyg_graph, Data)
assert tuple(pyg_graph.z.shape) == z.shape
dgl_graph = graph.to_dgl_graph()
from dgl import DGLGraph
assert isinstance(dgl_graph, DGLGraph)
@pytest.mark.torch
def test_invalid_graph_data(self):
with self.assertRaises(ValueError):
invalid_node_features_type = list(np.random.random_sample((5, 32)))
edge_index = np.array([
[0, 1, 2, 2, 3, 4],
[1, 2, 0, 3, 4, 0],
])
_ = GraphData(
node_features=invalid_node_features_type,
edge_index=edge_index,
)
with self.assertRaises(ValueError):
node_features = np.random.random_sample((5, 32))
invalid_edge_index_shape = np.array([
[0, 1, 2, 2, 3, 4],
[1, 2, 0, 3, 4, 5],
])
_ = GraphData(
node_features=node_features,
edge_index=invalid_edge_index_shape,
)
with self.assertRaises(ValueError):
node_features = np.random.random_sample((5, 5))
invalid_edge_index_shape = np.array([
[0, 1, 2, 2, 3, 4],
[1, 2, 0, 3, 4, 0],
[2, 2, 1, 4, 0, 3],
],)
_ = GraphData(
node_features=node_features,
edge_index=invalid_edge_index_shape,
)
with self.assertRaises(TypeError):
node_features = np.random.random_sample((5, 32))
_ = GraphData(node_features=node_features)
@pytest.mark.torch
def test_batch_graph_data(self):
num_nodes_list, num_edge_list = [3, 4, 5], [2, 4, 5]
num_node_features, num_edge_features = 32, 32
edge_index_list = [
np.array([[0, 1], [1, 2]]),
np.array([[0, 1, 2, 3], [1, 2, 0, 2]]),
np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]),
]
graph_list = [
GraphData(node_features=np.random.random_sample(
(num_nodes_list[i], num_node_features)),
edge_index=edge_index_list[i],
edge_features=np.random.random_sample(
(num_edge_list[i], num_edge_features)),
node_pos_features=None) for i in range(len(num_edge_list))
]
batch = BatchGraphData(graph_list)
assert batch.num_nodes == sum(num_nodes_list)
assert batch.num_node_features == num_node_features
assert batch.num_edges == sum(num_edge_list)
assert batch.num_edge_features == num_edge_features
assert batch.graph_index.shape == (sum(num_nodes_list),)
assert batch.edge_index.max() == sum(num_edge_list)
assert batch.edge_index.shape == (2, sum(num_edge_list))
@pytest.mark.torch
def test_graph_data_single_atom_mol(self):
"""
Test for graph data when no edges in the graph (example: single atom mol)
"""
num_nodes, num_node_features = 1, 32
num_edges = 0
node_features = np.random.random_sample((num_nodes, num_node_features))
edge_index = np.empty((2, 0), dtype=int)
graph = GraphData(node_features=node_features, edge_index=edge_index)
assert graph.num_nodes == num_nodes
assert graph.num_node_features == num_node_features
assert graph.num_edges == num_edges
assert str(
graph
) == 'GraphData(node_features=[1, 32], edge_index=[2, 0], edge_features=None)'
@pytest.mark.torch
def test_graphdata_numpy_to_torch(self):
"""
Test for converting GraphData numpy arrays to torch tensors
"""
import torch
num_nodes, num_node_features = 5, 32
num_edges, num_edge_features = 6, 32
node_features = np.random.random_sample((num_nodes, num_node_features))
edge_features = np.random.random_sample((num_edges, num_edge_features))
edge_index = np.array([
[0, 1, 2, 2, 3, 4],
[1, 2, 0, 3, 4, 0],
])
node_pos_features = None
# z is kwargs
z = np.random.random(5)
graph_np = GraphData(node_features=node_features,
edge_index=edge_index,
edge_features=edge_features,
node_pos_features=node_pos_features,
z=z)
graph = graph_np.numpy_to_torch()
assert isinstance(graph.node_features, torch.Tensor)
assert isinstance(graph.edge_index, torch.Tensor)
assert isinstance(graph.edge_features, torch.Tensor)
assert graph.node_pos_features is None
assert isinstance(graph.z, torch.Tensor)
@pytest.mark.torch
def test_batchgraphdata_numpy_to_torch(self):
import torch
num_nodes_list, num_edge_list = [3, 4, 5], [2, 4, 5]
num_node_features, num_edge_features = 32, 32
edge_index_list = [
np.array([[0, 1], [1, 2]]),
np.array([[0, 1, 2, 3], [1, 2, 0, 2]]),
np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]),
]
graph_list = [
GraphData(node_features=np.random.random_sample(
(num_nodes_list[i], num_node_features)),
edge_index=edge_index_list[i],
edge_features=np.random.random_sample(
(num_edge_list[i], num_edge_features)),
node_pos_features=None) for i in range(len(num_edge_list))
]
batched_graph = BatchGraphData(graph_list)
batched_graph = batched_graph.numpy_to_torch()
assert isinstance(batched_graph, BatchGraphData)
assert isinstance(batched_graph.node_features, torch.Tensor)
assert isinstance(batched_graph.edge_index, torch.Tensor)
assert isinstance(batched_graph.edge_features, torch.Tensor)
assert batched_graph.node_pos_features is None
def test_batch_graph_data_with_user_defined_attributes(self):
edge_index = np.array([[0, 1], [1, 0]])
node_features_shape = 5
n_nodes = 2
g1 = GraphData(node_features=np.random.randn(n_nodes,
node_features_shape),
edge_index=edge_index,
user_defined_attribute1=[0, 1])
g2 = GraphData(node_features=np.random.randn(n_nodes,
node_features_shape),
edge_index=edge_index,
user_defined_attribute1=[2, 3])
g3 = GraphData(node_features=np.random.randn(n_nodes,
node_features_shape),
edge_index=edge_index,
user_defined_attribute1=[4, 5])
g = BatchGraphData([g1, g2, g3])
assert hasattr(g, 'user_defined_attribute1')
assert (g.user_defined_attribute1 == np.array([[0, 1], [2, 3],
[4, 5]])).all()
def test_shortest_path_length(self):
node_features = np.random.rand(5, 10)
edge_index = np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]],
dtype=np.int64)
graph_data = GraphData(node_features, edge_index)
lengths = shortest_path_length(graph_data, 0)
assert lengths == {0: 0, 1: 1, 2: 2, 3: 2, 4: 1}
lengths_cutoff = shortest_path_length(graph_data, 0, cutoff=1)
assert lengths_cutoff == {0: 0, 1: 1, 4: 1}
def test_subgraph(self):
node_features = np.random.rand(5, 10)
edge_index = np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]],
dtype=np.int64)
edge_features = np.random.rand(5, 3)
graph_data = GraphData(node_features, edge_index, edge_features)
nodes = [0, 1, 2, 4]
subgraph, node_mapping = graph_data.subgraph(nodes)
assert subgraph.num_nodes == len(nodes)
assert subgraph.num_edges == 3
expected_node_features = node_features[nodes]
np.testing.assert_array_equal(subgraph.node_features,
expected_node_features)
expected_edge_index = np.array([[0, 1, 3], [1, 2, 0]], dtype=np.int64)
np.testing.assert_array_equal(subgraph.edge_index, expected_edge_index)
expected_edge_features = edge_features[[0, 1, 4]]
np.testing.assert_array_equal(subgraph.edge_features,
expected_edge_features)
expected_node_mapping = {0: 0, 1: 1, 2: 2, 4: 3}
assert node_mapping == expected_node_mapping
<file_sep># Clintox dataset models
The Clintox dataset is a collection of "clinical toxicity" datasets that compares drugs approved by the FDA and drugs that have failed clinical trials for toxicity reasons. It contains two classification tasks for 1491 compounds:
1) Clinical trial toxicity/non-toxicity
2) FDA approval status
In this example, we construct fully connected deep networks and
graph convolutional models for the task of predicting clinical
toxicity/FDA approval status from molecular structure.
<file_sep>"""
Cell Counting Dataset.
Loads the cell counting dataset from
http://www.robots.ox.ac.uk/~vgg/research/counting/index_org.html. Labels aren't
available for this dataset, so only raw images are provided.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
CELL_COUNTING_URL = 'http://www.robots.ox.ac.uk/~vgg/research/counting/cells.zip'
CELL_COUNTING_TASKS: List[str] = []
class _CellCountingLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "cells.zip")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=CELL_COUNTING_URL,
dest_dir=self.data_dir)
loader = dc.data.ImageLoader()
return loader.featurize(dataset_file)
def load_cell_counting(
splitter: Union[dc.splits.Splitter, str, None] = None,
transformers: List[Union[TransformerGenerator, str]] = [],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load Cell Counting dataset.
Loads the cell counting dataset from http://www.robots.ox.ac.uk/~vgg/research/counting/index_org.html.
Parameters
----------
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
featurizer = dc.feat.UserDefinedFeaturizer([]) # Not actually used
loader = _CellCountingLoader(featurizer, splitter, transformers,
CELL_COUNTING_TASKS, data_dir, save_dir,
**kwargs)
return loader.load_dataset('cell_counting', reload)
<file_sep>"""Proximal Policy Optimization (PPO) algorithm for reinforcement learning."""
import copy
import time
from collections.abc import Sequence as SequenceCollection
from multiprocessing.dummy import Pool
import numpy as np
import tensorflow as tf
from deepchem.models import KerasModel
from deepchem.models.optimizers import Adam
class PPOLoss(object):
"""This class computes the loss function for PPO."""
def __init__(self, value_weight, entropy_weight, clipping_width,
action_prob_index, value_index):
self.value_weight = value_weight
self.entropy_weight = entropy_weight
self.clipping_width = clipping_width
self.action_prob_index = action_prob_index
self.value_index = value_index
def __call__(self, outputs, labels, weights):
prob = outputs[self.action_prob_index]
value = outputs[self.value_index]
reward, advantage, old_prob = weights
action = labels[0]
advantage = tf.expand_dims(advantage, axis=1)
machine_eps = np.finfo(np.float32).eps
prob += machine_eps
old_prob += machine_eps
ratio = tf.reduce_sum(action * prob, axis=1) / old_prob
clipped_ratio = tf.clip_by_value(ratio, 1 - self.clipping_width,
1 + self.clipping_width)
policy_loss = -tf.reduce_mean(
tf.minimum(ratio * advantage, clipped_ratio * advantage))
value_loss = tf.reduce_mean(tf.square(reward - value))
entropy = -tf.reduce_mean(
tf.reduce_sum(prob * tf.math.log(prob), axis=1))
return policy_loss + self.value_weight * value_loss - self.entropy_weight * entropy
class PPO(object):
"""
Implements the Proximal Policy Optimization (PPO) algorithm for reinforcement learning.
The algorithm is described in Schulman et al, "Proximal Policy Optimization Algorithms"
(https://openai-public.s3-us-west-2.amazonaws.com/blog/2017-07/ppo/ppo-arxiv.pdf).
This class requires the policy to output two quantities: a vector giving the probability of
taking each action, and an estimate of the value function for the current state. It
optimizes both outputs at once using a loss that is the sum of three terms:
1. The policy loss, which seeks to maximize the discounted reward for each action.
2. The value loss, which tries to make the value estimate match the actual discounted reward
that was attained at each step.
3. An entropy term to encourage exploration.
This class only supports environments with discrete action spaces, not continuous ones. The
"action" argument passed to the environment is an integer, giving the index of the action to perform.
This class supports Generalized Advantage Estimation as described in Schulman et al., "High-Dimensional
Continuous Control Using Generalized Advantage Estimation" (https://arxiv.org/abs/1506.02438).
This is a method of trading off bias and variance in the advantage estimate, which can sometimes
improve the rate of convergance. Use the advantage_lambda parameter to adjust the tradeoff.
This class supports Hindsight Experience Replay as described in Andrychowicz et al., "Hindsight
Experience Replay" (https://arxiv.org/abs/1707.01495). This is a method that can enormously
accelerate learning when rewards are very rare. It requires that the environment state contains
information about the goal the agent is trying to achieve. Each time it generates a rollout, it
processes that rollout twice: once using the actual goal the agent was pursuing while generating
it, and again using the final state of that rollout as the goal. This guarantees that half of
all rollouts processed will be ones that achieved their goals, and hence received a reward.
To use this feature, specify use_hindsight=True to the constructor. The environment must have
a method defined as follows:
def apply_hindsight(self, states, actions, goal):
...
return new_states, rewards
The method receives the list of states generated during the rollout, the action taken for each one,
and a new goal state. It should generate a new list of states that are identical to the input ones,
except specifying the new goal. It should return that list of states, and the rewards that would
have been received for taking the specified actions from those states. The output arrays may be
shorter than the input ones, if the modified rollout would have terminated sooner.
"""
def __init__(self,
env,
policy,
max_rollout_length=20,
optimization_rollouts=8,
optimization_epochs=4,
batch_size=64,
clipping_width=0.2,
discount_factor=0.99,
advantage_lambda=0.98,
value_weight=1.0,
entropy_weight=0.01,
optimizer=None,
model_dir=None,
use_hindsight=False):
"""Create an object for optimizing a policy.
Parameters
----------
env: Environment
the Environment to interact with
policy: Policy
the Policy to optimize. It must have outputs with the names 'action_prob'
and 'value', corresponding to the action probabilities and value estimate
max_rollout_length: int
the maximum length of rollouts to generate
optimization_rollouts: int
the number of rollouts to generate for each iteration of optimization
optimization_epochs: int
the number of epochs of optimization to perform within each iteration
batch_size: int
the batch size to use during optimization. If this is 0, each rollout will be used as a
separate batch.
clipping_width: float
in computing the PPO loss function, the probability ratio is clipped to the range
(1-clipping_width, 1+clipping_width)
discount_factor: float
the discount factor to use when computing rewards
advantage_lambda: float
the parameter for trading bias vs. variance in Generalized Advantage Estimation
value_weight: float
a scale factor for the value loss term in the loss function
entropy_weight: float
a scale factor for the entropy term in the loss function
optimizer: Optimizer
the optimizer to use. If None, a default optimizer is used.
model_dir: str
the directory in which the model will be saved. If None, a temporary directory will be created.
use_hindsight: bool
if True, use Hindsight Experience Replay
"""
self._env = env
self._policy = policy
self.max_rollout_length = max_rollout_length
self.optimization_rollouts = optimization_rollouts
self.optimization_epochs = optimization_epochs
self.batch_size = batch_size
self.clipping_width = clipping_width
self.discount_factor = discount_factor
self.advantage_lambda = advantage_lambda
self.value_weight = value_weight
self.entropy_weight = entropy_weight
self.use_hindsight = use_hindsight
self._state_is_list = isinstance(env.state_shape[0], SequenceCollection)
if optimizer is None:
self._optimizer = Adam(learning_rate=0.001, beta1=0.9, beta2=0.999)
else:
self._optimizer = optimizer
output_names = policy.output_names
self._value_index = output_names.index('value')
self._action_prob_index = output_names.index('action_prob')
self._rnn_final_state_indices = [
i for i, n in enumerate(output_names) if n == 'rnn_state'
]
self._rnn_states = policy.rnn_initial_states
if len(self._rnn_states) > 0 and batch_size != 0:
raise ValueError(
'Cannot batch rollouts when the policy contains a recurrent layer. Set batch_size to 0.'
)
self._model = self._build_model(model_dir)
self._checkpoint = tf.train.Checkpoint()
self._checkpoint.save_counter # Ensure the variable has been created
self._checkpoint.listed = self._model.model.trainable_variables
def _build_model(self, model_dir):
"""Construct a KerasModel containing the policy and loss calculations."""
policy_model = self._policy.create_model()
loss = PPOLoss(self.value_weight, self.entropy_weight,
self.clipping_width, self._action_prob_index,
self._value_index)
model = KerasModel(policy_model,
loss,
batch_size=self.max_rollout_length,
model_dir=model_dir,
optimize=self._optimizer)
model._ensure_built()
return model
def fit(self,
total_steps,
max_checkpoints_to_keep=5,
checkpoint_interval=600,
restore=False):
"""Train the policy.
Parameters
----------
total_steps: int
the total number of time steps to perform on the environment, across all rollouts
on all threads
max_checkpoints_to_keep: int
the maximum number of checkpoint files to keep. When this number is reached, older
files are deleted.
checkpoint_interval: float
the time interval at which to save checkpoints, measured in seconds
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
"""
step_count = 0
workers = []
for i in range(self.optimization_rollouts):
workers.append(_Worker(self, i))
if restore:
self.restore()
pool = Pool()
manager = tf.train.CheckpointManager(self._checkpoint,
self._model.model_dir,
max_checkpoints_to_keep)
checkpoint_time = time.time()
while step_count < total_steps:
# Have the worker threads generate the rollouts for this iteration.
rollouts = []
pool.map(lambda x: rollouts.extend(x.run()), workers)
# Perform optimization.
for epoch in range(self.optimization_epochs):
if self.batch_size == 0:
batches = rollouts
else:
batches = self._iter_batches(rollouts)
for batch in batches:
initial_rnn_states, state_arrays, discounted_rewards, actions_matrix, action_prob, advantages = batch
# Build the inputs and run the optimizer.
state_arrays = [np.stack(s) for s in state_arrays]
inputs = state_arrays + [
np.expand_dims(s, axis=0) for s in initial_rnn_states
]
self._apply_gradients(inputs, actions_matrix,
discounted_rewards, advantages,
action_prob)
# Update the number of steps taken so far and perform checkpointing.
new_steps = sum(len(r[3]) for r in rollouts)
if self.use_hindsight:
new_steps /= 2
step_count += new_steps
if step_count >= total_steps or time.time(
) >= checkpoint_time + checkpoint_interval:
manager.save()
checkpoint_time = time.time()
@tf.function(experimental_relax_shapes=True)
def _apply_gradients(self, inputs, actions_matrix, discounted_rewards,
advantages, action_prob):
"""Compute the gradient of the loss function for a batch and update the model."""
vars = self._model.model.trainable_variables
with tf.GradientTape() as tape:
outputs = self._model.model(inputs)
loss = self._model._loss_fn(
outputs, [actions_matrix],
[discounted_rewards, advantages, action_prob])
gradients = tape.gradient(loss, vars)
self._model._tf_optimizer.apply_gradients(zip(gradients, vars))
def _iter_batches(self, rollouts):
"""Given a set of rollouts, merge them into batches for optimization."""
# Merge all the rollouts into a single set of arrays.
state_arrays = []
for i in range(len(rollouts[0][1])):
state_arrays.append(np.concatenate([r[1][i] for r in rollouts]))
discounted_rewards = np.concatenate([r[2] for r in rollouts])
actions_matrix = np.concatenate([r[3] for r in rollouts])
action_prob = np.concatenate([r[4] for r in rollouts])
advantages = np.concatenate([r[5] for r in rollouts])
total_length = len(discounted_rewards)
# Iterate slices.
start = 0
while start < total_length:
end = min(start + self.batch_size, total_length)
batch = [[]]
batch.append([s[start:end] for s in state_arrays])
batch.append(discounted_rewards[start:end])
batch.append(actions_matrix[start:end])
batch.append(action_prob[start:end])
batch.append(advantages[start:end])
start = end
yield batch
def predict(self, state, use_saved_states=True, save_states=True):
"""Compute the policy's output predictions for a state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array or list of arrays
the state of the environment for which to generate predictions
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to the initial values defined by the policy before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the array of action probabilities, and the estimated value function
"""
results = self._predict_outputs(state, use_saved_states, save_states)
return [
results[i] for i in (self._action_prob_index, self._value_index)
]
def select_action(self,
state,
deterministic=False,
use_saved_states=True,
save_states=True):
"""Select an action to perform based on the environment's state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array or list of arrays
the state of the environment for which to select an action
deterministic: bool
if True, always return the best action (that is, the one with highest probability).
If False, randomly select an action based on the computed probabilities.
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to the initial values defined by the policy before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the index of the selected action
"""
outputs = self._predict_outputs(state, use_saved_states, save_states)
return self._select_action_from_outputs(outputs, deterministic)
def restore(self):
"""Reload the model parameters from the most recent checkpoint file."""
last_checkpoint = tf.train.latest_checkpoint(self._model.model_dir)
if last_checkpoint is None:
raise ValueError('No checkpoint found')
self._checkpoint.restore(last_checkpoint)
def _predict_outputs(self, state, use_saved_states, save_states):
"""Compute a set of outputs for a state. """
if not self._state_is_list:
state = [state]
if use_saved_states:
state = state + list(self._rnn_states)
else:
state = state + list(self._policy.rnn_initial_states)
inputs = [np.expand_dims(s, axis=0) for s in state]
results = self._compute_model(inputs)
results = [r.numpy() for r in results]
if save_states:
self._rnn_states = [
np.squeeze(results[i], 0) for i in self._rnn_final_state_indices
]
return results
@tf.function(experimental_relax_shapes=True)
def _compute_model(self, inputs):
return self._model.model(inputs)
def _select_action_from_outputs(self, outputs, deterministic):
"""Given the policy outputs, select an action to perform."""
action_prob = outputs[self._action_prob_index]
if deterministic:
return action_prob.argmax()
else:
action_prob = action_prob.flatten()
return np.random.choice(np.arange(len(action_prob)), p=action_prob)
def _create_feed_dict(self, state, use_saved_states):
"""Create a feed dict for use by predict() or select_action()."""
if use_saved_states:
state = state + list(self._rnn_states)
else:
state = state + list(self._policy.rnn_initial_states)
return dict((f, np.expand_dims(s, axis=0))
for f, s in zip(self._model._input_placeholders, state))
class _Worker(object):
"""A Worker object is created for each training thread."""
def __init__(self, ppo, index):
self.ppo = ppo
self.index = index
self.scope = 'worker%d' % index
self.env = copy.deepcopy(ppo._env)
self.env.reset()
self.model = ppo._build_model(None)
self.rnn_states = ppo._policy.rnn_initial_states
def run(self):
rollouts = []
local_vars = self.model.model.trainable_variables
global_vars = self.ppo._model.model.trainable_variables
for v1, v2 in zip(local_vars, global_vars):
v1.assign(v2)
initial_rnn_states = self.rnn_states
states, actions, action_prob, rewards, values = self.create_rollout()
rollouts.append(
self.process_rollout(states, actions, action_prob, rewards, values,
initial_rnn_states))
if self.ppo.use_hindsight:
rollouts.append(
self.process_rollout_with_hindsight(states, actions,
initial_rnn_states))
return rollouts
def create_rollout(self):
"""Generate a rollout."""
states = []
action_prob = []
actions = []
rewards = []
values = []
# Generate the rollout.
for i in range(self.ppo.max_rollout_length):
if self.env.terminated:
break
state = self.env.state
states.append(state)
results = self._compute_model(
self._create_model_inputs(state, self.rnn_states))
results = [r.numpy() for r in results]
value = results[self.ppo._value_index]
probabilities = np.squeeze(results[self.ppo._action_prob_index])
self.rnn_states = [
np.squeeze(results[i], 0)
for i in self.ppo._rnn_final_state_indices
]
action = self.ppo._select_action_from_outputs(results, False)
actions.append(action)
action_prob.append(probabilities[action])
values.append(float(value))
rewards.append(self.env.step(action))
# Compute an estimate of the reward for the rest of the episode.
if not self.env.terminated:
results = self._compute_model(
self._create_model_inputs(self.env.state, self.rnn_states))
final_value = self.ppo.discount_factor * results[
self.ppo._value_index].numpy()[0]
else:
final_value = 0.0
values.append(final_value)
if self.env.terminated:
self.env.reset()
self.rnn_states = self.ppo._policy.rnn_initial_states
return states, np.array(actions, dtype=np.int32), np.array(
action_prob, dtype=np.float32), np.array(
rewards, dtype=np.float32), np.array(values, dtype=np.float32)
def process_rollout(self, states, actions, action_prob, rewards, values,
initial_rnn_states):
"""Construct the arrays needed for training."""
# Compute the discounted rewards and advantages.
discounted_rewards = rewards.copy()
discounted_rewards[-1] += values[-1]
advantages = rewards - values[:-1] + self.ppo.discount_factor * np.array(
values[1:])
for j in range(len(rewards) - 1, 0, -1):
discounted_rewards[
j - 1] += self.ppo.discount_factor * discounted_rewards[j]
advantages[
j -
1] += self.ppo.discount_factor * self.ppo.advantage_lambda * advantages[
j]
# Convert the actions to one-hot.
n_actions = self.env.n_actions
actions_matrix = []
for action in actions:
a = np.zeros(n_actions, np.float32)
a[action] = 1.0
actions_matrix.append(a)
actions_matrix = np.array(actions_matrix, dtype=np.float32)
# Rearrange the states into the proper set of arrays.
if self.ppo._state_is_list:
state_arrays = [[] for i in range(len(self.env.state_shape))]
for state in states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [states]
# Return the processed arrays.
return (initial_rnn_states, state_arrays, discounted_rewards,
actions_matrix, action_prob, advantages)
def process_rollout_with_hindsight(self, states, actions,
initial_rnn_states):
"""Create a new rollout by applying hindsight to an existing one, then process it."""
hindsight_states, rewards = self.env.apply_hindsight(
states, actions, states[-1])
if self.ppo._state_is_list:
state_arrays = [[] for i in range(len(self.env.state_shape))]
for state in hindsight_states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [hindsight_states]
state_arrays += initial_rnn_states
state_arrays = [np.stack(s) for s in state_arrays]
inputs = state_arrays + [
np.expand_dims(s, axis=0) for s in initial_rnn_states
]
outputs = self._compute_model(inputs)
values = outputs[self.ppo._value_index].numpy()
values = np.append(values.flatten(), 0.0)
probabilities = outputs[self.ppo._action_prob_index].numpy()
actions = actions[:len(rewards)]
action_prob = probabilities[np.arange(len(actions)), actions]
return self.process_rollout(hindsight_states, actions, action_prob,
np.array(rewards, dtype=np.float32),
np.array(values, dtype=np.float32),
initial_rnn_states)
def _create_model_inputs(self, state, rnn_states):
"""Create the inputs to the model for use during a rollout."""
if not self.ppo._state_is_list:
state = [state]
state = state + rnn_states
return [np.expand_dims(s, axis=0) for s in state]
@tf.function(experimental_relax_shapes=True)
def _compute_model(self, inputs):
return self.model.model(inputs)
<file_sep>import pytest
try:
import jax
import jax.numpy as jnp
from jax import random # noqa: F401
import haiku as hk
except:
has_haiku_and_optax = False
@pytest.mark.jax
def test_linear():
from deepchem.models.jax_models import layers as jax_models_layers
def forward(x):
layer = jax_models_layers.Linear(2)
return layer(x)
forward = hk.transform(forward)
rng = jax.random.PRNGKey(42)
x = jnp.ones([8, 28 * 28])
params = forward.init(rng, x)
output = forward.apply(params, rng, x)
assert output.shape == (8, 2)
<file_sep>"""
KINASE dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import time
import numpy as np
import deepchem as dc
from kinase_features import kinase_descriptors
def remove_missing_entries(dataset):
"""Remove missing entries.
Some of the datasets have missing entries that sneak in as zero'd out
feature vectors. Get rid of them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
print("Shard %d has %d missing entries."
% (i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
def get_transformers(train_dataset):
"""Get transformers applied to datasets."""
transformers = []
return transformers
def gen_kinase(KINASE_tasks, raw_train_dir, train_dir, valid_dir, test_dir,
shard_size=10000):
"""Load Kinase datasets."""
train_files = ("KINASE_training_disguised_combined_full.csv.gz")
valid_files = ("KINASE_test1_disguised_combined_full.csv.gz")
test_files = ("KINASE_test2_disguised_combined_full.csv.gz")
# Featurize Kinase dataset
print("About to featurize KINASE dataset.")
featurizer = dc.feat.UserDefinedFeaturizer(kinase_descriptors)
loader = dc.data.UserCSVLoader(
tasks=KINASE_tasks, id_field="Molecule", featurizer=featurizer)
train_datasets, valid_datasets, test_datasets = [], [], []
print("Featurizing train datasets")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
print("Featurizing valid datasets")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
print("Featurizing test datasets")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
print("Remove missing entries from datasets.")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
print("Transforming datasets with transformers.")
transformers = get_transformers(train_dataset)
raw_train_dataset = train_dataset
for transformer in transformers:
print("Performing transformations with %s"
% transformer.__class__.__name__)
print("Transforming datasets")
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
print("Shuffling order of train dataset.")
train_dataset.sparse_shuffle()
print("Moving directories")
raw_train_dataset.move(raw_train_dir)
train_dataset.move(train_dir)
valid_dataset.move(valid_dir)
test_dataset.move(test_dir)
return (raw_train_dataset, train_dataset, valid_dataset, test_dataset)
def load_kinase(shard_size):
"""Loads kinase datasets. Generates if not stored already."""
KINASE_tasks = (['T_000%d' % i for i in range(13, 100)]
+ ['T_00%d' % i for i in range(100, 112)])
current_dir = os.path.dirname(os.path.realpath(__file__))
raw_train_dir = os.path.join(current_dir, "raw_train_dir")
train_dir = os.path.join(current_dir, "train_dir")
valid_dir = os.path.join(current_dir, "valid_dir")
test_dir = os.path.join(current_dir, "test_dir")
if (os.path.exists(raw_train_dir) and
os.path.exists(train_dir) and
os.path.exists(valid_dir) and
os.path.exists(test_dir)):
print("Reloading existing datasets")
raw_train_dataset = dc.data.DiskDataset(raw_train_dir)
train_dataset = dc.data.DiskDataset(train_dir)
valid_dataset = dc.data.DiskDataset(valid_dir)
test_dataset = dc.data.DiskDataset(test_dir)
else:
print("Featurizing datasets")
(raw_train_dataset, train_dataset, valid_dataset, test_dataset) = \
gen_kinase(KINASE_tasks, raw_train_dir, train_dir, valid_dir, test_dir,
shard_size=shard_size)
transformers = get_transformers(raw_train_dataset)
return KINASE_tasks, (train_dataset, valid_dataset, test_dataset), transformers
<file_sep>import unittest
import pytest
try:
import tensorflow as tf # noqa: F401
from deepchem.models import TextCNNModel
from deepchem.models.text_cnn import default_dict
has_tensorflow = True
except:
has_tensorflow = False
class TestTextCNNModel(unittest.TestCase):
@pytest.mark.tensorflow
def test_set_length(self):
model = TextCNNModel(1, default_dict, 1)
self.assertEqual(model.seq_length, max(model.kernel_sizes))
large_length = 500
model = TextCNNModel(1, default_dict, large_length)
self.assertEqual(model.seq_length, large_length)
<file_sep>import deepchem as dc
import numpy as np
import pytest
import tempfile
from flaky import flaky
try:
import tensorflow as tf
from tensorflow.keras.layers import Input, Concatenate, Dense
class ExampleGAN(dc.models.GAN):
def get_noise_input_shape(self):
return (2,)
def get_data_input_shapes(self):
return [(1,)]
def get_conditional_input_shapes(self):
return [(1,)]
def create_generator(self):
noise_input = Input(self.get_noise_input_shape())
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [noise_input, conditional_input]
gen_in = Concatenate(axis=1)(inputs)
output = Dense(1)(gen_in)
return tf.keras.Model(inputs=inputs, outputs=output)
def create_discriminator(self):
data_input = Input(self.get_data_input_shapes()[0])
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [data_input, conditional_input]
discrim_in = Concatenate(axis=1)(inputs)
dense = Dense(10, activation=tf.nn.relu)(discrim_in)
output = Dense(1, activation=tf.sigmoid)(dense)
return tf.keras.Model(inputs=inputs, outputs=output)
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def generate_batch(batch_size):
"""Draw training data from a Gaussian distribution, where the mean is a conditional input."""
means = 10 * np.random.random([batch_size, 1])
values = np.random.normal(means, scale=2.0)
return means, values
@pytest.mark.tensorflow
def generate_data(gan, batches, batch_size):
for i in range(batches):
means, values = generate_batch(batch_size)
batch = {gan.data_inputs[0]: values, gan.conditional_inputs[0]: means}
yield batch
@flaky
@pytest.mark.tensorflow
def test_cgan():
"""Test fitting a conditional GAN."""
gan = ExampleGAN(learning_rate=0.01)
gan.fit_gan(generate_data(gan, 500, 100),
generator_steps=0.5,
checkpoint_interval=0)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
values = gan.predict_gan_generator(conditional_inputs=[means])
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
assert gan.get_global_step() == 500
@flaky
@pytest.mark.tensorflow
def test_cgan_reload():
"""Test reloading a conditional GAN."""
model_dir = tempfile.mkdtemp()
gan = ExampleGAN(learning_rate=0.01, model_dir=model_dir)
gan.fit_gan(generate_data(gan, 500, 100), generator_steps=0.5)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
batch_size = len(means)
noise_input = gan.get_noise_batch(batch_size=batch_size)
values = gan.predict_gan_generator(noise_input=noise_input,
conditional_inputs=[means])
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
assert gan.get_global_step() == 500
reloaded_gan = ExampleGAN(learning_rate=0.01, model_dir=model_dir)
reloaded_gan.restore()
reloaded_values = reloaded_gan.predict_gan_generator(
noise_input=noise_input, conditional_inputs=[means])
assert np.all(values == reloaded_values)
@flaky
@pytest.mark.tensorflow
def test_mix_gan_reload():
"""Test reloading a GAN with multiple generators and discriminators."""
model_dir = tempfile.mkdtemp()
gan = ExampleGAN(n_generators=2,
n_discriminators=2,
learning_rate=0.01,
model_dir=model_dir)
gan.fit_gan(generate_data(gan, 1000, 100), generator_steps=0.5)
reloaded_gan = ExampleGAN(n_generators=2,
n_discriminators=2,
learning_rate=0.01,
model_dir=model_dir)
reloaded_gan.restore()
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
batch_size = len(means)
noise_input = gan.get_noise_batch(batch_size=batch_size)
for i in range(2):
values = gan.predict_gan_generator(noise_input=noise_input,
conditional_inputs=[means],
generator_index=i)
reloaded_values = reloaded_gan.predict_gan_generator(
noise_input=noise_input,
conditional_inputs=[means],
generator_index=i)
assert np.all(values == reloaded_values)
assert gan.get_global_step() == 1000
# No training has been done after reload
assert reloaded_gan.get_global_step() == 0
@flaky
@pytest.mark.tensorflow
def test_mix_gan():
"""Test a GAN with multiple generators and discriminators."""
gan = ExampleGAN(n_generators=2, n_discriminators=2, learning_rate=0.01)
gan.fit_gan(generate_data(gan, 1000, 100),
generator_steps=0.5,
checkpoint_interval=0)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
for i in range(2):
values = gan.predict_gan_generator(conditional_inputs=[means],
generator_index=i)
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
assert gan.get_global_step() == 1000
@flaky
@pytest.mark.tensorflow
def test_wgan():
"""Test fitting a conditional WGAN."""
class ExampleWGAN(dc.models.WGAN):
def get_noise_input_shape(self):
return (2,)
def get_data_input_shapes(self):
return [(1,)]
def get_conditional_input_shapes(self):
return [(1,)]
def create_generator(self):
noise_input = Input(self.get_noise_input_shape())
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [noise_input, conditional_input]
gen_in = Concatenate(axis=1)(inputs)
output = Dense(1)(gen_in)
return tf.keras.Model(inputs=inputs, outputs=output)
def create_discriminator(self):
data_input = Input(self.get_data_input_shapes()[0])
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [data_input, conditional_input]
discrim_in = Concatenate(axis=1)(inputs)
dense = Dense(10, activation=tf.nn.relu)(discrim_in)
output = Dense(1)(dense)
return tf.keras.Model(inputs=inputs, outputs=output)
# We have to set the gradient penalty very small because the generator's
# output is only a single number, so the default penalty would constrain
# it far too much.
gan = ExampleWGAN(learning_rate=0.01, gradient_penalty=0.1)
gan.fit_gan(generate_data(gan, 1000, 100), generator_steps=0.1)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
values = gan.predict_gan_generator(conditional_inputs=[means])
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
@flaky
@pytest.mark.tensorflow
def test_wgan_reload():
"""Test fitting a conditional WGAN."""
class ExampleWGAN(dc.models.WGAN):
def get_noise_input_shape(self):
return (2,)
def get_data_input_shapes(self):
return [(1,)]
def get_conditional_input_shapes(self):
return [(1,)]
def create_generator(self):
noise_input = Input(self.get_noise_input_shape())
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [noise_input, conditional_input]
gen_in = Concatenate(axis=1)(inputs)
output = Dense(1)(gen_in)
return tf.keras.Model(inputs=inputs, outputs=output)
def create_discriminator(self):
data_input = Input(self.get_data_input_shapes()[0])
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [data_input, conditional_input]
discrim_in = Concatenate(axis=1)(inputs)
dense = Dense(10, activation=tf.nn.relu)(discrim_in)
output = Dense(1)(dense)
return tf.keras.Model(inputs=inputs, outputs=output)
# We have to set the gradient penalty very small because the generator's
# output is only a single number, so the default penalty would constrain
# it far too much.
model_dir = tempfile.mkdtemp()
gan = ExampleWGAN(learning_rate=0.01,
gradient_penalty=0.1,
model_dir=model_dir)
gan.fit_gan(generate_data(gan, 1000, 100), generator_steps=0.1)
reloaded_gan = ExampleWGAN(learning_rate=0.01,
gradient_penalty=0.1,
model_dir=model_dir)
reloaded_gan.restore()
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
batch_size = len(means)
noise_input = gan.get_noise_batch(batch_size=batch_size)
values = gan.predict_gan_generator(noise_input=noise_input,
conditional_inputs=[means])
reloaded_values = reloaded_gan.predict_gan_generator(
noise_input=noise_input, conditional_inputs=[means])
assert np.all(values == reloaded_values)
<file_sep>"""
Script that trains Sklearn multitask models on the sider dataset
@Author <NAME>, <NAME>
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
from sklearn.ensemble import RandomForestClassifier
sider_tasks, datasets, transformers = dc.molnet.load_sider()
train_dataset, valid_dataset, test_dataset = datasets
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=100)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(sider_tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
model.save()
print("About to evaluate model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 7 00:07:10 2017
@author: zqwu
"""
import deepchem
hps = {}
hps['tf'] = {
'layer_sizes': [1500],
'weight_init_stddevs': [0.02],
'bias_init_consts': [1.],
'dropouts': [0.5],
'penalty': 0.1,
'penalty_type': 'l2',
'batch_size': 50,
'nb_epoch': 10,
'learning_rate': 0.001
}
hps['tf_robust'] = {
'layer_sizes': [1500],
'weight_init_stddevs': [0.02],
'bias_init_consts': [1.],
'dropouts': [0.5],
'bypass_layer_sizes': [200],
'bypass_weight_init_stddevs': [0.02],
'bypass_bias_init_consts': [1.],
'bypass_dropouts': [0.5],
'penalty': 0.1,
'penalty_type': 'l2',
'batch_size': 50,
'nb_epoch': 10,
'learning_rate': 0.0005
}
hps['logreg'] = {
'penalty': 1.,
'penalty_type': 'l2',
}
hps['irv'] = {
'penalty': 0.,
'batch_size': 50,
'nb_epoch': 10,
'learning_rate': 0.001,
'n_K': 10
}
hps['graphconv'] = {
'batch_size': 64,
'nb_epoch': 40,
'learning_rate': 0.0005,
'n_filters': 64,
'n_fully_connected_nodes': 128,
'seed': 123
}
hps['dag'] = {
'batch_size': 64,
'nb_epoch': 50,
'learning_rate': 0.0005,
'n_graph_feat': 30,
'default_max_atoms': 60,
'seed': 123
}
hps['weave'] = {
'batch_size': 64,
'nb_epoch': 40,
'learning_rate': 0.0005,
'n_graph_feat': 128,
'n_pair_feat': 14,
'seed': 123
}
hps['textcnn'] = {
'batch_size': 64,
'nb_epoch': 40,
'learning_rate': 0.0005,
'n_embedding': 75,
'filter_sizes': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20],
'num_filters': [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160],
'seed': 123
}
hps['rf'] = {'n_estimators': 500}
hps['kernelsvm'] = {'C': 1.0, 'gamma': 0.05}
hps['xgb'] = {
'max_depth': 5,
'learning_rate': 0.05,
'n_estimators': 3000,
'gamma': 0.,
'min_child_weight': 5,
'max_delta_step': 1,
'subsample': 0.53,
'colsample_bytree': 0.66,
'colsample_bylevel': 1,
'reg_alpha': 0,
'reg_lambda': 1,
'scale_pos_weight': 1,
'base_score': 0.5,
'seed': 2016,
'early_stopping_rounds': 100
}
hps['tf_regression'] = {
'layer_sizes': [1000, 1000],
'weight_init_stddevs': [0.02, 0.02],
'bias_init_consts': [1., 1.],
'dropouts': [0.25, 0.25],
'penalty': 0.0005,
'penalty_type': 'l2',
'batch_size': 128,
'nb_epoch': 50,
'learning_rate': 0.0008
}
hps['tf_regression_ft'] = {
'layer_sizes': [400, 100, 100],
'weight_init_stddevs': [0.05, 0.1, 0.1],
'bias_init_consts': [0., 0., 0.],
'dropouts': [0.01, 0.01, 0.01],
'penalty': 0.,
'penalty_type': 'l2',
'batch_size': 25,
'nb_epoch': 50,
'learning_rate': 0.001,
'fit_transformers': deepchem.trans.CoulombFitTransformer
}
hps['rf_regression'] = {'n_estimators': 500}
hps['krr'] = {'alpha': 1e-3}
hps['krr_ft'] = {'alpha': 1e-3}
hps['graphconvreg'] = {
'batch_size': 128,
'nb_epoch': 100,
'learning_rate': 0.0005,
'n_filters': 128,
'n_fully_connected_nodes': 256,
'seed': 123
}
hps['dtnn'] = {
'batch_size': 64,
'nb_epoch': 100,
'learning_rate': 0.001,
'n_embedding': 50,
'n_distance': 170,
'seed': 123
}
hps['dag_regression'] = {
'batch_size': 64,
'nb_epoch': 100,
'learning_rate': 0.0005,
'n_graph_feat': 30,
'default_max_atoms': 60,
'seed': 123
}
hps['weave_regression'] = {
'batch_size': 64,
'nb_epoch': 100,
'learning_rate': 0.0005,
'n_graph_feat': 128,
'n_pair_feat': 14,
'seed': 123
}
hps['textcnn_regression'] = {
'batch_size': 64,
'nb_epoch': 100,
'learning_rate': 0.0005,
'n_embedding': 75,
'filter_sizes': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20],
'num_filters': [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160],
'seed': 123
}
hps['ani'] = {
'batch_size': 32,
'nb_epoch': 100,
'learning_rate': 0.00005,
'layer_structures': [20, 10, 10],
'seed': 123
}
hps['mpnn'] = {
'batch_size': 16,
'nb_epoch': 50,
'learning_rate': 0.001,
'T': 2,
'M': 5,
'seed': 123
}
hps['xgb_regression'] = {
'max_depth': 5,
'learning_rate': 0.05,
'n_estimators': 3000,
'gamma': 0.,
'min_child_weight': 5,
'max_delta_step': 1,
'subsample': 0.53,
'colsample_bytree': 0.66,
'colsample_bylevel': 1,
'reg_alpha': 0,
'reg_lambda': 1,
'scale_pos_weight': 1,
'base_score': 0.5,
'seed': 2016,
'early_stopping_rounds': 100
}
hps['siamese'] = {
'n_pos': 1,
'n_neg': 1,
'test_batch_size': 128,
'n_filters': [64, 128, 64],
'n_fully_connected_nodes': [128],
'nb_epochs': 1,
'n_train_trials': 2000,
'n_eval_trials': 20,
'learning_rate': 1e-4
}
hps['res'] = {
'n_pos': 1,
'n_neg': 1,
'test_batch_size': 128,
'n_filters': [64, 128, 64],
'n_fully_connected_nodes': [128],
'max_depth': 3,
'nb_epochs': 1,
'n_train_trials': 2000,
'n_eval_trials': 20,
'learning_rate': 1e-4
}
hps['attn'] = {
'n_pos': 1,
'n_neg': 1,
'test_batch_size': 128,
'n_filters': [64, 128, 64],
'n_fully_connected_nodes': [128],
'max_depth': 3,
'nb_epochs': 1,
'n_train_trials': 2000,
'n_eval_trials': 20,
'learning_rate': 1e-4
}
<file_sep>import pytest
try:
import torch
# import torch.nn as nn
# import torch.nn.functional as F
import numpy as np
has_torch = True
except ModuleNotFoundError:
has_torch = False
pass
@pytest.mark.torch
def test_graph_convolution_layer():
from deepchem.models.torch_models.layers import MolGANConvolutionLayer
vertices = 9
nodes = 5
edges = 5
units = 128
layer = MolGANConvolutionLayer(units=units, edges=edges, nodes=nodes)
adjacency_tensor = torch.randn((1, vertices, vertices, edges))
node_tensor = torch.randn((1, vertices, nodes))
output = layer([adjacency_tensor, node_tensor])
output_tf = [
(1, 9, 9, 5), (1, 9, 5), (1, 9, 128)
] # None has been converted to 1 as batch size is taken as 1 in torch
# Testing Shapes
assert output[0].shape == output_tf[0] # adjacency_tensor
assert output[1].shape == output_tf[1] # node_tensor
assert output[2].shape == output_tf[2] # output of the layer
assert output[0].shape == torch.Size([1, vertices, vertices,
edges]) # adjacency_tensor
assert output[1].shape == torch.Size([1, vertices, nodes]) # node_tensor
assert output[2].shape == torch.Size([1, vertices,
units]) # output of the layer
# Testing values
assert layer.units == units
assert layer.activation == torch.tanh
assert layer.edges == 5
assert layer.dropout_rate == 0.0
@pytest.mark.torch
def test_graph_convolution_layer_values():
from deepchem.models.torch_models.layers import MolGANConvolutionLayer
vertices = 9
nodes = 5
edges = 5
units = 128
torch.manual_seed(21) # Setting seed for reproducibility
layer = MolGANConvolutionLayer(units=units, edges=edges, nodes=nodes)
tf_weights = np.load(
'deepchem/models/tests/assets/molgan_conv_layer_weights.npy',
allow_pickle=True).item()
with torch.no_grad():
for idx, dense in enumerate(layer.dense1):
# Dense1 is a list of dense layers
weight_name = f'layer1/dense_{idx+4}/kernel:0'
bias_name = f'layer1/dense_{idx+4}/bias:0'
dense.weight.data = torch.from_numpy(
np.transpose(tf_weights[weight_name]))
dense.bias.data = torch.from_numpy(tf_weights[bias_name])
layer.dense2.weight.data = torch.from_numpy(
np.transpose(tf_weights['layer1/dense_8/kernel:0']))
layer.dense2.bias.data = torch.from_numpy(
tf_weights['layer1/dense_8/bias:0'])
adjacency_tensor = torch.randn((1, vertices, vertices, edges))
node_tensor = torch.randn((1, vertices, nodes))
output = layer([adjacency_tensor, node_tensor])
adjacency_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_adj_tensor.npy').astype(
np.float32))
node_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_nod_tensor.npy').astype(
np.float32))
output = layer([adjacency_tensor, node_tensor])
output_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_conv_layer_op.npy').astype(
np.float32))
# Testing Values
assert torch.allclose(output[0], adjacency_tensor, atol=1e-06)
assert torch.allclose(output[1], node_tensor, atol=1e-06)
assert torch.allclose(output[2], output_tensor, atol=1e-04)
@pytest.mark.torch
def test_aggregation_layer_shape():
from deepchem.models.torch_models.layers import MolGANAggregationLayer
vertices = 9
units = 128
layer = MolGANAggregationLayer(units=units)
hidden_tensor = torch.randn((1, vertices, units))
output = layer(hidden_tensor)
output_tf = (
1, 128
) # None has been converted to 1 as batch size is taken as 1 in torch
# Testing Shapes with TF Model Output
assert output.shape == output_tf
# Testing Shapes
assert output.shape == (1, units)
assert layer.units == units
assert layer.activation == torch.tanh
assert layer.dropout_rate == 0.0
@pytest.mark.torch
def test_aggregation_layer_values():
from deepchem.models.torch_models.layers import MolGANAggregationLayer
units = 128
torch.manual_seed(21) # Setting seed for reproducibility
layer = MolGANAggregationLayer(units=units, name='layer1')
tf_weights = np.load(
'deepchem/models/tests/assets/molgan_agg_layer_weights.npy',
allow_pickle=True).item()
with torch.no_grad():
layer.d1.weight.data = torch.from_numpy(
np.transpose(tf_weights['layer1/dense_27/kernel:0']))
layer.d1.bias.data = torch.from_numpy(
tf_weights['layer1/dense_27/bias:0'])
layer.d2.weight.data = torch.from_numpy(
np.transpose(tf_weights['layer1/dense_28/kernel:0']))
layer.d2.bias.data = torch.from_numpy(
tf_weights['layer1/dense_28/bias:0'])
hidden_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_agg_tensor.npy').astype(
np.float32))
output = layer(hidden_tensor)
output_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_agg_layer_op.npy').astype(
np.float32))
# Testing Values
assert torch.allclose(output, output_tensor, atol=1e-04)
@pytest.mark.torch
def test_multigraph_convolution_layer_shape():
from deepchem.models.torch_models.layers import MolGANMultiConvolutionLayer
vertices = 9
nodes = 5
edges = 5
first_convolution_unit = 128
second_convolution_unit = 64
units = [first_convolution_unit, second_convolution_unit]
layer = MolGANMultiConvolutionLayer(units=units, edges=edges)
adjacency_tensor = torch.randn((1, vertices, vertices, edges))
node_tensor = torch.randn((1, vertices, nodes))
model = layer([adjacency_tensor, node_tensor])
assert model.shape == (1, vertices, second_convolution_unit)
assert layer.units == units
assert layer.activation == torch.tanh
assert layer.edges == 5
assert layer.dropout_rate == 0.0
@pytest.mark.torch
def test_multigraph_convolution_layer_values():
from deepchem.models.torch_models.layers import MolGANMultiConvolutionLayer
nodes = 5
edges = 5
first_convolution_unit = 128
second_convolution_unit = 64
units = [first_convolution_unit, second_convolution_unit]
torch.manual_seed(21) # Setting seed for reproducibility
layer_multi_conv = MolGANMultiConvolutionLayer(units=units,
nodes=nodes,
edges=edges,
name='layer1')
tf_weights = np.load(
'deepchem/models/tests/assets/molgan_multi_conv_layer_weights.npy',
allow_pickle=True).item()
with torch.no_grad():
x = 10
# testing first convolution layer
# dense1 layer - list of dense layers
for idx, dense in enumerate(layer_multi_conv.first_convolution.dense1):
weight_name = f'layer1//dense_{idx+x}/kernel:0'
bias_name = f'layer1//dense_{idx+x}/bias:0'
dense.weight.data = torch.from_numpy(
np.transpose(tf_weights[weight_name]))
dense.bias.data = torch.from_numpy(tf_weights[bias_name])
idx += 1
# dense2 layer - single dense layer
layer_multi_conv.first_convolution.dense2.weight.data = torch.from_numpy(
np.transpose(tf_weights[f'layer1//dense_{idx+x}/kernel:0']))
layer_multi_conv.first_convolution.dense2.bias.data = torch.from_numpy(
tf_weights[f'layer1//dense_{idx+x}/bias:0'])
x += 5
# testing rest of the convolution layer
for idx_, layer in enumerate(layer_multi_conv.gcl):
# dense1 layer - list of dense layers
for idx, dense in enumerate(layer.dense1):
weight_name = f'layer1//dense_{idx+x}/kernel:0'
bias_name = f'layer1//dense_{idx+x}/bias:0'
dense.weight.data = torch.from_numpy(
np.transpose(tf_weights[weight_name]))
dense.bias.data = torch.from_numpy(tf_weights[bias_name])
x += 1
# dense2 layer - single dense layer
layer.dense2.weight.data = torch.from_numpy(
np.transpose(tf_weights[f'layer1//dense_{idx+x}/kernel:0']))
layer.dense2.bias.data = torch.from_numpy(
tf_weights[f'layer1//dense_{idx+x}/bias:0'])
# Loading input tensors
adjacency_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_adj_tensor.npy').astype(
np.float32))
node_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_nod_tensor.npy').astype(
np.float32))
# Testing output
output = layer_multi_conv([adjacency_tensor, node_tensor])
output_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_multi_conv_layer_op.npy').
astype(np.float32))
assert torch.allclose(output, output_tensor, atol=1e-04)
@pytest.mark.torch
def test_graph_encoder_layer_shape():
from deepchem.models.torch_models.layers import MolGANEncoderLayer
vertices = 9
nodes = 5
edges = 5
first_convolution_unit = 128
second_convolution_unit = 64
aggregation_unit = 128
units = [(first_convolution_unit, second_convolution_unit),
aggregation_unit]
layer = MolGANEncoderLayer(units=units, edges=edges)
adjacency_tensor = torch.randn((1, vertices, vertices, edges))
node_tensor = torch.randn((1, vertices, nodes))
model = layer([adjacency_tensor, node_tensor])
assert model.shape == (1, aggregation_unit)
assert layer.graph_convolution_units == (first_convolution_unit,
second_convolution_unit)
assert layer.auxiliary_units == aggregation_unit
assert layer.activation == torch.tanh
assert layer.edges == 5
assert layer.dropout_rate == 0.0
@pytest.mark.torch
def test_graph_encoder_layer_values():
"""
Test to check the Values of the Graph Encoder Layer
It first loads the weights of the TF model
Then it starts transfering the weights to the torch model
1. MultiConvolution Layer
1.1 First Convolution Layer
1.2 Rest of the Convolution Layers
2. Aggregation Layer
Then it loads the input tensors and checks the output
"""
from deepchem.models.torch_models.layers import MolGANEncoderLayer
nodes = 5
edges = 5
first_convolution_unit = 128
second_convolution_unit = 64
aggregation_unit = 128
units = [(first_convolution_unit, second_convolution_unit),
aggregation_unit]
torch.manual_seed(21)
tf_weights = np.load(
'deepchem/models/tests/assets/molgan_encoder_layer_weights.npy',
allow_pickle=True).item()
torch_model_encoder = MolGANEncoderLayer(units=units,
nodes=nodes,
edges=edges,
name='layer1')
x = 12 # the starting number for the dense layers in the tf model weights
with torch.no_grad():
# Testing MultiConvolution Layer
# Testing First Convolution Layer
# dense1 layer - list of dense layers
for idx, dense in enumerate(
torch_model_encoder.multi_graph_convolution_layer.
first_convolution.dense1):
weight_name = f'layer1///dense_{idx+x}/kernel:0'
bias_name = f'layer1///dense_{idx+x}/bias:0'
dense.weight.data = torch.from_numpy(
np.transpose(tf_weights[weight_name]))
dense.bias.data = torch.from_numpy(tf_weights[bias_name])
idx += 1
# dense2 layer - single dense layer
torch_model_encoder.multi_graph_convolution_layer.first_convolution.dense2.weight.data = torch.from_numpy(
np.transpose(tf_weights[f'layer1///dense_{idx+x}/kernel:0']))
torch_model_encoder.multi_graph_convolution_layer.first_convolution.dense2.bias.data = torch.from_numpy(
tf_weights[f'layer1///dense_{idx+x}/bias:0'])
x += 5
# Testing rest of the Multi convolution layer
for idx_, layer in enumerate(
torch_model_encoder.multi_graph_convolution_layer.gcl):
# dense1 layer - list of dense layers
for idx, dense in enumerate(layer.dense1):
weight_name = f'layer1///dense_{idx+x}/kernel:0'
bias_name = f'layer1///dense_{idx+x}/bias:0'
dense.weight.data = torch.from_numpy(
np.transpose(tf_weights[weight_name]))
dense.bias.data = torch.from_numpy(tf_weights[bias_name])
x += 1
# dense2 layer - single dense layer
layer.dense2.weight.data = torch.from_numpy(
np.transpose(tf_weights[f'layer1///dense_{idx+x}/kernel:0']))
layer.dense2.bias.data = torch.from_numpy(
tf_weights[f'layer1///dense_{idx+x}/bias:0'])
# Testing Aggregation Layer
torch_model_encoder.graph_aggregation_layer.d1.weight.data = torch.from_numpy(
np.transpose(tf_weights['layer1//dense_22/kernel:0']))
torch_model_encoder.graph_aggregation_layer.d1.bias.data = torch.from_numpy(
tf_weights['layer1//dense_22/bias:0'])
torch_model_encoder.graph_aggregation_layer.d2.weight.data = torch.from_numpy(
np.transpose(tf_weights['layer1//dense_23/kernel:0']))
torch_model_encoder.graph_aggregation_layer.d2.bias.data = torch.from_numpy(
tf_weights['layer1//dense_23/bias:0'])
# Loading input tensors
adjacency_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_adj_tensor.npy').astype(
np.float32))
node_tensor = torch.from_numpy(
np.load('deepchem/models/tests/assets/molgan_nod_tensor.npy').astype(
np.float32))
# Testing output
output = torch_model_encoder([adjacency_tensor, node_tensor])
output_tensor = torch.from_numpy(
np.load(
'deepchem/models/tests/assets/molgan_encoder_layer_op.npy').astype(
np.float32))
assert torch.allclose(output, output_tensor, atol=1e-04)
<file_sep>import unittest
import numpy as np
from deepchem.utils import geometry_utils
from deepchem.utils.geometry_utils import unit_vector
from deepchem.utils.geometry_utils import angle_between
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import generate_random_unit_vector
from deepchem.utils.geometry_utils import generate_random_rotation_matrix
from deepchem.utils.geometry_utils import is_angle_within_cutoff
class TestGeometryUtils(unittest.TestCase):
def test_generate_random_unit_vector(self):
for _ in range(100):
u = generate_random_unit_vector()
# 3D vector with unit length
self.assertEqual(u.shape, (3,))
self.assertAlmostEqual(np.linalg.norm(u), 1.0)
def test_generate_random_rotation_matrix(self):
# very basic test, we check if rotations actually work in test_rotate_molecules
for _ in range(100):
m = generate_random_rotation_matrix()
self.assertEqual(m.shape, (3, 3))
def test_unit_vector(self):
for _ in range(10):
vector = np.random.rand(3)
norm_vector = unit_vector(vector)
self.assertAlmostEqual(np.linalg.norm(norm_vector), 1.0)
def test_angle_between(self):
for _ in range(10):
v1 = np.random.rand(3,)
v2 = np.random.rand(3,)
angle = angle_between(v1, v2)
self.assertLessEqual(angle, np.pi)
self.assertGreaterEqual(angle, 0.0)
self.assertAlmostEqual(angle_between(v1, v1), 0.0)
self.assertAlmostEqual(angle_between(v1, -v1), np.pi)
def test_is_angle_within_cutoff(self):
v1 = np.array([1, 0, 0])
v2 = np.array([-1, 0, 0])
angle_cutoff = 10
assert is_angle_within_cutoff(v1, v2, angle_cutoff)
def test_compute_pairwise_distances(self):
n1 = 10
n2 = 50
coords1 = np.random.rand(n1, 3)
coords2 = np.random.rand(n2, 3)
distance = compute_pairwise_distances(coords1, coords2)
self.assertEqual(distance.shape, (n1, n2))
self.assertTrue((distance >= 0).all())
# random coords between 0 and 1, so the max possible distance in sqrt(3)
self.assertTrue((distance <= 3.0**0.5).all())
# check if correct distance metric was used
coords1 = np.array([[0, 0, 0], [1, 0, 0]])
coords2 = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]])
distance = compute_pairwise_distances(coords1, coords2)
self.assertTrue((distance == [[1, 2, 3], [0, 1, 2]]).all())
def test_compute_centroid(self):
N = 10
coords = np.random.rand(N, 3)
centroid = geometry_utils.compute_centroid(coords)
assert centroid.shape == (3,)
def test_subract_centroid(self):
N = 10
coords = np.random.rand(N, 3)
centroid = geometry_utils.compute_centroid(coords)
new_coords = geometry_utils.subtract_centroid(coords, centroid)
assert new_coords.shape == (N, 3)
new_centroid = geometry_utils.compute_centroid(new_coords)
assert new_centroid.shape == (3,)
np.testing.assert_almost_equal(new_centroid,
np.zeros_like(new_centroid))
<file_sep>#!/usr/bin/python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import numpy as np
import scipy.stats
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_state_pb2
from deepchem.models.tensorflow_models import utils
test_random_seed = 20151102
class UtilsTest(test_util.TensorFlowTestCase):
def setUp(self):
super(UtilsTest, self).setUp()
np.random.seed(test_random_seed)
def testParseCheckpoint(self):
# parse CheckpointState proto
with tempfile.NamedTemporaryFile(mode='w+') as f:
cp = checkpoint_state_pb2.CheckpointState()
cp.model_checkpoint_path = 'my-checkpoint'
f.write(text_format.MessageToString(cp))
f.file.flush()
self.assertEqual(utils.ParseCheckpoint(f.name), 'my-checkpoint')
# parse path to actual checkpoint
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('This is not a CheckpointState proto.')
f.file.flush()
self.assertEqual(utils.ParseCheckpoint(f.name), f.name)
def PrepareFeatures(self, features):
features = np.asarray(features, dtype=float)
features_t = tf.constant(features, dtype=tf.float32)
return features, features_t
def PrepareMask(self, features, mask):
mask = np.asarray(mask, dtype=float)
mask_t = tf.constant(mask, dtype=tf.float32)
# the provided mask has to be the same shape as features
expanded_mask = np.logical_not(
np.ones_like(features) * np.expand_dims(mask, -1))
masked_features = np.ma.masked_array(features, mask=expanded_mask)
return masked_features, mask_t
def Check(self, func, features, expected, axis=None, mask=None):
with self.session() as sess:
features, features_t = self.PrepareFeatures(features)
if mask is not None:
features, mask = self.PrepareMask(features, mask)
self.assertAllClose(
sess.run(func(features_t, reduction_indices=axis, mask=mask)),
expected)
def testMean(self):
self.Check(utils.Mean, features=[0, 1], expected=0.5)
self.Check(
utils.Mean, features=[[0, 1], [2, 3]], expected=[0.5, 2.5], axis=1)
self.Check(
utils.Mean,
features=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
expected=[2.5, 4.5],
axis=[0, 2])
def testMeanWithMask(self):
self.Check(
utils.Mean, features=[[9999], [1], [2]], expected=1.5, mask=[0, 1, 1])
self.Check(
utils.Mean,
features=[[0, 1], [9999, 9999]],
expected=[0, 1],
axis=0,
mask=[1, 0])
self.Check(
utils.Mean,
features=[[[0, 1], [9999, 9999]], [[9999, 9999], [6, 7]]],
expected=[0.5, 6.5],
axis=[0, 2],
mask=[[1, 0], [0, 1]])
def testVariance(self):
self.Check(utils.Variance, features=[0, 1], expected=0.25)
self.Check(
utils.Variance, features=[[0, 2], [2, 3]], expected=[1, 0.25], axis=1)
self.Check(
utils.Variance,
features=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
expected=[4.25, 4.25],
axis=[0, 2])
def testVarianceWithMask(self):
self.Check(
utils.Variance, features=[[0], [1], [2]], expected=0.25, mask=[0, 1, 1])
self.Check(
utils.Variance,
features=[[0, 2], [9999, 9999], [4, 4]],
expected=[4, 1],
axis=0,
mask=[1, 0, 1])
self.Check(
utils.Variance,
features=[[[0, 1], [9999, 9999]], [[9999, 9999], [6, 8]]],
expected=[0.25, 1],
axis=[0, 2],
mask=[[1, 0], [0, 1]])
def testMoment(self):
with self.session() as sess:
features = np.random.random((3, 4, 5))
features_t = tf.constant(features, dtype=tf.float32)
# test k = 1..4
for k in [1, 2, 3, 4]:
# central moments
self.assertAllClose(
sess.run(utils.Moment(k, features_t)[1]),
scipy.stats.moment(features, k, axis=None),
rtol=1e-5,
atol=1e-5)
# standardized moments
self.assertAllClose(
sess.run(utils.Moment(k, features_t, standardize=True)[1]),
np.divide(
scipy.stats.moment(features, k, axis=None),
np.power(features.std(), k)),
rtol=1e-5,
atol=1e-5)
# central across one axis
self.assertAllClose(
sess.run(utils.Moment(k, features_t, reduction_indices=1)[1]),
scipy.stats.moment(features, k, axis=1),
rtol=1e-5,
atol=1e-5)
# standardized across one axis
self.assertAllClose(
sess.run(
utils.Moment(
k, features_t, standardize=True, reduction_indices=1)[1]),
np.divide(
scipy.stats.moment(features, k, axis=1),
np.power(features.std(axis=1), k)),
rtol=1e-5,
atol=1e-5)
def testSkewness(self):
with self.session() as sess:
features = np.random.random((3, 4, 5))
features_t = tf.constant(features, dtype=tf.float32)
self.assertAllClose(
sess.run(utils.Skewness(features_t)),
scipy.stats.skew(features, axis=None),
rtol=1e-5,
atol=1e-5)
self.assertAllClose(
sess.run(utils.Skewness(features_t, 1)),
scipy.stats.skew(features, axis=1),
rtol=1e-5,
atol=1e-5)
def testKurtosis(self):
with self.session() as sess:
features = np.random.random((3, 4, 5))
features_t = tf.constant(features, dtype=tf.float32)
self.assertAllClose(
sess.run(utils.Kurtosis(features_t)),
scipy.stats.kurtosis(features, axis=None),
rtol=1e-5,
atol=1e-5)
self.assertAllClose(
sess.run(utils.Kurtosis(features_t, 1)),
scipy.stats.kurtosis(features, axis=1),
rtol=1e-5,
atol=1e-5)
if __name__ == '__main__':
googletest.main()
<file_sep># README for Kinase Example
The Kinase dataset is an in-house dataset from Merck that was first introduced in the following paper:
<NAME>, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
It contains 2500 Merck in-house compounds that were measured
for IC50 of inhibition on 99 protein kinases. Unlike most of
the other datasets featured in MoleculeNet, the Kinase
collection does not have structures for the compounds tested
since they were proprietary Merck compounds. However, the
collection does feature pre-computed descriptors for these
compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
This example features a few different models trained on this
dataset collection. In particular:
- `kinase_rf.py` trains a random forest model
<file_sep>"""
NCI dataset loader.
Original Author - <NAME>
Author - <NAME>
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
NCI_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/nci_unique.csv"
NCI_TASKS = [
'CCRF-CEM', 'HL-60(TB)', 'K-562', 'MOLT-4', 'RPMI-8226', 'SR', 'A549/ATCC',
'EKVX', 'HOP-62', 'HOP-92', 'NCI-H226', 'NCI-H23', 'NCI-H322M', 'NCI-H460',
'NCI-H522', 'COLO 205', 'HCC-2998', 'HCT-116', 'HCT-15', 'HT29', 'KM12',
'SW-620', 'SF-268', 'SF-295', 'SF-539', 'SNB-19', 'SNB-75', 'U251',
'LOX IMVI', 'MALME-3M', 'M14', 'MDA-MB-435', 'SK-MEL-2', 'SK-MEL-28',
'SK-MEL-5', 'UACC-257', 'UACC-62', 'IGR-OV1', 'OVCAR-3', 'OVCAR-4',
'OVCAR-5', 'OVCAR-8', 'NCI/ADR-RES', 'SK-OV-3', '786-0', 'A498', 'ACHN',
'CAKI-1', 'RXF 393', 'SN12C', 'TK-10', 'UO-31', 'PC-3', 'DU-145', 'MCF7',
'MDA-MB-231/ATCC', 'MDA-MB-468', 'HS 578T', 'BT-549', 'T-47D'
]
class _NCILoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "nci_unique.csv")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=NCI_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_nci(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'random',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load NCI dataset.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
loader = _NCILoader(featurizer, splitter, transformers, NCI_TASKS, data_dir,
save_dir, **kwargs)
return loader.load_dataset('nci', reload)
<file_sep>"""
Featurizers for inorganic crystals.
"""
# flake8: noqa
from deepchem.feat.material_featurizers.element_property_fingerprint import ElementPropertyFingerprint
from deepchem.feat.material_featurizers.sine_coulomb_matrix import SineCoulombMatrix
from deepchem.feat.material_featurizers.cgcnn_featurizer import CGCNNFeaturizer
from deepchem.feat.material_featurizers.elemnet_featurizer import ElemNetFeaturizer
from deepchem.feat.material_featurizers.lcnn_featurizer import LCNNFeaturizer
<file_sep>import unittest
import deepchem as dc
import numpy as np
try:
from deepchem.models import GCNModel, MultitaskClassifier
from deepchem.models.lightning.dc_lightning_module import DCLightningModule
from deepchem.models.lightning.dc_lightning_dataset_module import DCLightningDatasetModule, collate_dataset_wrapper
from deepchem.metrics import to_one_hot
import pytorch_lightning as pl # noqa
PYTORCH_LIGHTNING_IMPORT_FAILED = False
except ImportError:
PYTORCH_LIGHTNING_IMPORT_FAILED = True
class TestDCLightningModule(unittest.TestCase):
@unittest.skipIf(PYTORCH_LIGHTNING_IMPORT_FAILED,
'PyTorch Lightning is not installed')
def test_multitask_classifier(self):
class TestMultitaskDatasetBatch:
def __init__(self, batch):
X = [batch[0]]
y = [
np.array([
to_one_hot(b.flatten(), 2).reshape(2, 2)
for b in batch[1]
])
]
w = [batch[2]]
self.batch_list = [X, y, w]
def collate_dataset_wrapper(batch):
return TestMultitaskDatasetBatch(batch)
tasks, datasets, _ = dc.molnet.load_clintox()
_, valid_dataset, _ = datasets
model = MultitaskClassifier(n_tasks=len(tasks),
n_features=1024,
layer_sizes=[1000],
dropouts=0.2,
learning_rate=0.0001)
molnet_dataloader = DCLightningDatasetModule(valid_dataset, 6,
collate_dataset_wrapper)
lightning_module = DCLightningModule(model)
trainer = pl.Trainer(max_epochs=1)
trainer.fit(lightning_module, molnet_dataloader)
@unittest.skipIf(PYTORCH_LIGHTNING_IMPORT_FAILED,
'PyTorch Lightning is not installed')
def test_gcn_model(self):
train_smiles = [
"C1CCC1", "CCC", "C1CCC1", "CCC", "C1CCC1", "CCC", "C1CCC1", "CCC",
"C1CCC1", "CCC"
]
train_labels = [0., 1., 0., 1., 0., 1., 0., 1., 0., 1.]
model = GCNModel(mode='classification',
n_tasks=1,
batch_size=2,
learning_rate=0.001)
featurizer = dc.feat.MolGraphConvFeaturizer()
X = featurizer.featurize(train_smiles)
sample = dc.data.NumpyDataset(X=X, y=train_labels)
smiles_datasetmodule = DCLightningDatasetModule(
sample, 2, collate_dataset_wrapper)
lightning_module = DCLightningModule(model)
trainer = pl.Trainer(max_epochs=1)
trainer.fit(lightning_module, smiles_datasetmodule)
<file_sep>import numpy as np
import tensorflow as tf
from collections.abc import Sequence as SequenceCollection
import logging
from deepchem.models import KerasModel, layers
from deepchem.models.losses import L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.keras_model import _StandardLoss
from tensorflow.keras.layers import Input, Dense, Dropout, ReLU, Concatenate, Add, Multiply, Softmax
logger = logging.getLogger(__name__)
class ProgressiveMultitaskRegressor(KerasModel):
"""Implements a progressive multitask neural network for regression.
Progressive networks allow for multitask learning where each task
gets a new column of weights. As a result, there is no exponential
forgetting where previous tasks are ignored.
References
----------
See [1]_ for a full description of the progressive architecture
.. [1] Rusu, <NAME>., et al. "Progressive neural networks." arXiv preprint
arXiv:1606.04671 (2016).
"""
def __init__(self,
n_tasks,
n_features,
alpha_init_stddevs=0.02,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
n_outputs=1,
**kwargs):
"""Creates a progressive network.
Only listing parameters specific to progressive networks here.
Parameters
----------
n_tasks: int
Number of tasks
n_features: int
Number of input features
alpha_init_stddevs: list
List of standard-deviations for alpha in adapter layers.
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer.
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1.
The final element corresponds to the output layer. Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
"""
if weight_decay_penalty != 0.0:
raise ValueError('Weight decay is not currently supported')
self.n_tasks = n_tasks
self.n_features = n_features
self.layer_sizes = layer_sizes
self.alpha_init_stddevs = alpha_init_stddevs
self.weight_init_stddevs = weight_init_stddevs
self.bias_init_consts = bias_init_consts
self.dropouts = dropouts
self.activation_fns = activation_fns
self.n_outputs = n_outputs
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
self.weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(alpha_init_stddevs, SequenceCollection):
self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
self.bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
self.dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection):
self.activation_fns = [activation_fns] * n_layers
# Add the input features.
mol_features = Input(shape=(n_features,))
all_layers = {}
outputs = []
self._task_layers = []
for task in range(self.n_tasks):
task_layers = []
for i in range(n_layers):
if i == 0:
prev_layer = mol_features
else:
prev_layer = all_layers[(i - 1, task)]
if task > 0:
lateral_contrib, trainables = self.add_adapter(
all_layers, task, i)
task_layers.extend(trainables)
dense = Dense(
layer_sizes[i],
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.weight_init_stddevs[i]),
bias_initializer=tf.constant_initializer(
value=self.bias_init_consts[i]))
layer = dense(prev_layer)
task_layers.append(dense)
if i > 0 and task > 0:
layer = Add()([layer, lateral_contrib])
assert self.activation_fns[
i] is tf.nn.relu, "Only ReLU is supported"
layer = ReLU()(layer)
if self.dropouts[i] > 0.0:
layer = Dropout(self.dropouts[i])(layer)
all_layers[(i, task)] = layer
prev_layer = all_layers[(n_layers - 1, task)]
dense = Dense(
n_outputs,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.weight_init_stddevs[-1]),
bias_initializer=tf.constant_initializer(
value=self.bias_init_consts[-1]))
layer = dense(prev_layer)
task_layers.append(dense)
if task > 0:
lateral_contrib, trainables = self.add_adapter(
all_layers, task, n_layers)
task_layers.extend(trainables)
layer = Add()([layer, lateral_contrib])
output_layer = self.create_output(layer)
outputs.append(output_layer)
self._task_layers.append(task_layers)
outputs = layers.Stack(axis=1)(outputs)
model = tf.keras.Model(inputs=mol_features, outputs=outputs)
super(ProgressiveMultitaskRegressor,
self).__init__(model, self.create_loss(), **kwargs)
def create_loss(self):
return L2Loss()
def create_output(self, layer):
return layer
def add_adapter(self, all_layers, task, layer_num):
"""Add an adapter connection for given task/layer combo"""
i = layer_num
prev_layers = []
trainable_layers = []
# Handle output layer
if i < len(self.layer_sizes):
layer_sizes = self.layer_sizes
alpha_init_stddev = self.alpha_init_stddevs[i]
weight_init_stddev = self.weight_init_stddevs[i]
bias_init_const = self.bias_init_consts[i]
elif i == len(self.layer_sizes):
layer_sizes = self.layer_sizes + [self.n_outputs]
alpha_init_stddev = self.alpha_init_stddevs[-1]
weight_init_stddev = self.weight_init_stddevs[-1]
bias_init_const = self.bias_init_consts[-1]
else:
raise ValueError("layer_num too large for add_adapter.")
# Iterate over all previous tasks.
for prev_task in range(task):
prev_layers.append(all_layers[(i - 1, prev_task)])
# prev_layers is a list with elements of size
# (batch_size, layer_sizes[i-1])
if len(prev_layers) == 1:
prev_layer = prev_layers[0]
else:
prev_layer = Concatenate(axis=1)(prev_layers)
alpha = layers.Variable(
tf.random.truncated_normal((1,), stddev=alpha_init_stddev))
trainable_layers.append(alpha)
prev_layer = Multiply()([prev_layer, alpha([prev_layer])])
dense1 = Dense(
layer_sizes[i - 1],
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_init_stddev),
bias_initializer=tf.constant_initializer(value=bias_init_const))
prev_layer = dense1(prev_layer)
trainable_layers.append(dense1)
dense2 = Dense(layer_sizes[i],
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_init_stddev),
use_bias=False)
prev_layer = dense2(prev_layer)
trainable_layers.append(dense2)
return prev_layer, trainable_layers
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
checkpoint_interval=1000,
deterministic=False,
restore=False,
**kwargs):
for task in range(self.n_tasks):
self.fit_task(dataset,
task,
nb_epoch=nb_epoch,
max_checkpoints_to_keep=max_checkpoints_to_keep,
checkpoint_interval=checkpoint_interval,
deterministic=deterministic,
restore=restore,
**kwargs)
def fit_task(self,
dataset,
task,
nb_epoch=10,
max_checkpoints_to_keep=5,
checkpoint_interval=1000,
deterministic=False,
restore=False,
**kwargs):
"""Fit one task."""
shape = dataset.get_shape()
batch = [[np.zeros((self.batch_size,) + s[1:])] for s in shape]
self._create_training_ops(batch)
generator = self.default_generator(dataset,
epochs=nb_epoch,
deterministic=deterministic)
variables = []
for layer in self._task_layers[task]:
variables += layer.trainable_variables
loss = TaskLoss(self.model, self.create_loss(), task)
self.fit_generator(generator,
max_checkpoints_to_keep,
checkpoint_interval,
restore,
variables=variables,
loss=loss)
class ProgressiveMultitaskClassifier(ProgressiveMultitaskRegressor):
"""Implements a progressive multitask neural network for classification.
Progressive Networks: https://arxiv.org/pdf/1606.04671v3.pdf
Progressive networks allow for multitask learning where each task
gets a new column of weights. As a result, there is no exponential
forgetting where previous tasks are ignored.
"""
def __init__(self,
n_tasks,
n_features,
alpha_init_stddevs=0.02,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
**kwargs):
n_outputs = 2
super(ProgressiveMultitaskClassifier, self).__init__(
n_tasks,
n_features,
alpha_init_stddevs=alpha_init_stddevs,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
weight_decay_penalty=weight_decay_penalty,
weight_decay_penalty_type=weight_decay_penalty_type,
dropouts=dropouts,
activation_fns=activation_fns,
n_outputs=n_outputs,
**kwargs)
def create_loss(self):
return SparseSoftmaxCrossEntropy()
def create_output(self, layer):
return Softmax()(layer)
class TaskLoss(_StandardLoss):
def __init__(self, model, loss, task):
super(TaskLoss, self).__init__(model, loss)
self.task = task
def __call__(self, outputs, labels, weights):
outputs = [t[:, self.task] for t in outputs]
labels = [t[:, self.task] for t in labels]
weights = [t[:, self.task] for t in weights]
return super(TaskLoss, self).__call__(outputs, labels, weights)
<file_sep>"""Evaluation Metrics for Genomics Datasets."""
from typing import List, Optional
import numpy as np
from scipy.signal import correlate2d
from deepchem.models import Model
from deepchem.data import NumpyDataset
def get_motif_scores(encoded_sequences: np.ndarray,
motif_names: List[str],
max_scores: Optional[int] = None,
return_positions: bool = False,
GC_fraction: float = 0.4) -> np.ndarray:
"""Computes pwm log odds.
Parameters
----------
encoded_sequences: np.ndarray
A numpy array of shape `(N_sequences, N_letters, sequence_length, 1)`.
motif_names: List[str]
List of motif file names.
max_scores: int, optional
Get top `max_scores` scores.
return_positions: bool, default False
Whether to return postions or not.
GC_fraction: float, default 0.4
GC fraction in background sequence.
Returns
-------
np.ndarray
A numpy array of complete score. The shape is `(N_sequences, num_motifs, seq_length)` by default.
If max_scores, the shape of score array is `(N_sequences, num_motifs*max_scores)`.
If max_scores and return_positions, the shape of score array with max scores and their positions.
is `(N_sequences, 2*num_motifs*max_scores)`.
Notes
-----
This method requires simdna to be installed.
"""
try:
import simdna
from simdna import synthetic
except ModuleNotFoundError:
raise ImportError("This function requires simdna to be installed.")
loaded_motifs = synthetic.LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
pseudocountProb=0.001)
num_samples, _, seq_length, _ = encoded_sequences.shape
scores = np.ones((num_samples, len(motif_names), seq_length))
for j, motif_name in enumerate(motif_names):
pwm = loaded_motifs.getPwm(motif_name).getRows().T
log_pwm = np.log(pwm)
gc_pwm = 0.5 * np.array(
[[1 - GC_fraction, GC_fraction, GC_fraction, 1 - GC_fraction]] *
len(pwm[0])).T
gc_log_pwm = np.log(gc_pwm)
log_scores = get_pssm_scores(encoded_sequences, log_pwm)
gc_log_scores = get_pssm_scores(encoded_sequences, gc_log_pwm)
scores[:, j, :] = log_scores - gc_log_scores
if max_scores is not None:
sorted_scores = np.sort(scores)[:, :, ::-1][:, :, :max_scores]
if return_positions:
sorted_positions = scores.argsort()[:, :, ::-1][:, :, :max_scores]
return np.concatenate(
(sorted_scores.reshape(
(num_samples, len(motif_names) * max_scores)),
sorted_positions.reshape(
(num_samples, len(motif_names) * max_scores))),
axis=1)
else:
return sorted_scores.reshape(
(num_samples, len(motif_names) * max_scores))
else:
return scores
def get_pssm_scores(encoded_sequences: np.ndarray,
pssm: np.ndarray) -> np.ndarray:
"""
Convolves pssm and its reverse complement with encoded sequences
and returns the maximum score at each position of each sequence.
Parameters
----------
encoded_sequences: np.ndarray
A numpy array of shape `(N_sequences, N_letters, sequence_length, 1)`.
pssm: np.ndarray
A numpy array of shape `(4, pssm_length)`.
Returns
-------
scores: np.ndarray
A numpy array of shape `(N_sequences, sequence_length)`.
"""
encoded_sequences = encoded_sequences.squeeze(axis=3)
# initialize fwd and reverse scores to -infinity
fwd_scores = np.full_like(encoded_sequences, -np.inf, float)
rc_scores = np.full_like(encoded_sequences, -np.inf, float)
# cross-correlate separately for each base,
# for both the PSSM and its reverse complement
for base_indx in range(encoded_sequences.shape[1]):
base_pssm = pssm[base_indx][None]
base_pssm_rc = base_pssm[:, ::-1]
fwd_scores[:,
base_indx, :] = correlate2d(encoded_sequences[:,
base_indx, :],
base_pssm,
mode='same')
rc_scores[:, base_indx, :] = correlate2d(
encoded_sequences[:, -(base_indx + 1), :],
base_pssm_rc,
mode='same')
# sum over the bases
fwd_scores_sum = fwd_scores.sum(axis=1)
rc_scores_sum = rc_scores.sum(axis=1)
# take max of fwd and reverse scores at each position
return np.maximum(fwd_scores_sum, rc_scores_sum)
def in_silico_mutagenesis(model: Model,
encoded_sequences: np.ndarray) -> np.ndarray:
"""Computes in-silico-mutagenesis scores
Parameters
----------
model: Model
This can be any model that accepts inputs of the required shape and produces
an output of shape `(N_sequences, N_tasks)`.
encoded_sequences: np.ndarray
A numpy array of shape `(N_sequences, N_letters, sequence_length, 1)`
Returns
-------
np.ndarray
A numpy array of ISM scores. The shape is `(num_task, N_sequences, N_letters, sequence_length, 1)`.
"""
# Shape (N_sequences, num_tasks)
wild_type_predictions = model.predict(NumpyDataset(encoded_sequences))
# check whether wild_type_predictions is np.ndarray or not
assert isinstance(wild_type_predictions, np.ndarray)
num_tasks = wild_type_predictions.shape[1]
# Shape (N_sequences, N_letters, sequence_length, 1, num_tasks)
mutagenesis_scores = np.empty(encoded_sequences.shape + (num_tasks,),
dtype=np.float32)
# Shape (N_sequences, num_tasks, 1, 1, 1)
wild_type_predictions = wild_type_predictions[:, np.newaxis, np.newaxis,
np.newaxis]
for sequence_index, (sequence, wild_type_prediction) in enumerate(
zip(encoded_sequences, wild_type_predictions)):
# Mutates every position of the sequence to every letter
# Shape (N_letters * sequence_length, N_letters, sequence_length, 1)
# Breakdown:
# Shape of sequence[np.newaxis] (1, N_letters, sequence_length, 1)
mutated_sequences = np.repeat(sequence[np.newaxis],
np.prod(sequence.shape),
axis=0)
# remove wild-type
# len(arange) = N_letters * sequence_length
arange = np.arange(len(mutated_sequences))
# len(horizontal cycle) = N_letters * sequence_length
horizontal_cycle = np.tile(np.arange(sequence.shape[1]),
sequence.shape[0])
mutated_sequences[arange, :, horizontal_cycle, :] = 0
# add mutant
vertical_repeat = np.repeat(np.arange(sequence.shape[0]),
sequence.shape[1])
mutated_sequences[arange, vertical_repeat, horizontal_cycle, :] = 1
# make mutant predictions
mutated_predictions = model.predict(NumpyDataset(mutated_sequences))
# check whether wild_type_predictions is np.ndarray or not
assert isinstance(mutated_predictions, np.ndarray)
mutated_predictions = mutated_predictions.reshape(sequence.shape +
(num_tasks,))
mutagenesis_scores[
sequence_index] = wild_type_prediction - mutated_predictions
rolled_scores = np.rollaxis(mutagenesis_scores, -1)
return rolled_scores
<file_sep>import unittest
import numpy as np
from rdkit import Chem
from deepchem.feat.molecule_featurizers.dmpnn_featurizer import generate_global_features
class TestGlobalFeatureGenerator(unittest.TestCase):
"""
Test for `generate_global_features` helper function which generates global features for DMPNN featurizer
"""
def setUp(self):
"""
Set up tests.
"""
smiles_list = ["C", "[H]", 'CC(=O)OC1=CC=CC=C1C(=O)O']
self.mol = [Chem.MolFromSmiles(smiles) for smiles in smiles_list]
self.feature_generators = [[''], ['morgan'], ['morgan', ''],
['morgan', 'morgan'], ['morgan_count'],
['rdkit_desc'], ['rdkit_desc_normalized']]
def test_generator_invalid_name(self):
"""
Test for generator when given name of feature generator is not in the list of available generators
"""
global_features = generate_global_features(self.mol[0],
self.feature_generators[0])
assert (global_features == np.empty(0)).all()
def test_generator_morgan(self):
"""
Test for generator when 'morgan' feature generator is provided
"""
global_features = generate_global_features(self.mol[0],
self.feature_generators[1])
assert len(global_features) == 2048
nonzero_features_indices = global_features.nonzero()[0]
assert len(nonzero_features_indices) == 1
assert nonzero_features_indices[0] == 1264
assert global_features[nonzero_features_indices[0]] == 1.0
def test_generator_morgan_with_invalid_name(self):
"""
Test for generator when 'morgan' feature generator and an unavailable generator name is provided
"""
global_features = generate_global_features(self.mol[0],
self.feature_generators[2])
assert len(global_features) == 2048
nonzero_features_indices = global_features.nonzero()[0]
assert len(nonzero_features_indices) == 1
assert nonzero_features_indices[0] == 1264
assert global_features[nonzero_features_indices[0]] == 1.0
def test_generator_morgan_twice(self):
"""
Test for generator when names of multiple generators are provided
"""
global_features = generate_global_features(self.mol[0],
self.feature_generators[3])
assert len(global_features) == 4096
nonzero_features_indices = global_features.nonzero()[0]
assert len(nonzero_features_indices) == 2
assert nonzero_features_indices[0] == 1264
assert nonzero_features_indices[1] == 1264 + 2048
assert global_features[nonzero_features_indices[0]] == 1.0
assert global_features[nonzero_features_indices[1]] == 1.0
def test_generator_hydrogen(self):
"""
Test for generator when provided RDKit mol contains only Hydrogen atoms
"""
global_features = generate_global_features(self.mol[1],
self.feature_generators[2])
assert (global_features == np.zeros(2048)).all()
def test_generator_morgan_count(self):
"""
Test for generator when 'morgan_count' feature generator is provided
"""
global_features = generate_global_features(self.mol[2],
self.feature_generators[4])
assert len(global_features) == 2048
nonzero_features_indices = global_features.nonzero()[0]
assert len(nonzero_features_indices) == 24
assert nonzero_features_indices[0] == 389
# number of indices where feature count is more than 1
assert len(np.where(global_features > 1.0)[0]) == 8
def test_generator_rdkit_desc(self):
"""
Test for generator when 'rdkit_desc' feature generator is provided
"""
global_features = generate_global_features(self.mol[2],
self.feature_generators[5])
assert len(global_features) == 200
def test_generator_rdkit_desc_normalized(self):
"""
Test for generator when 'rdkit_desc_normalized' feature generator is provided
"""
global_features = generate_global_features(self.mol[2],
self.feature_generators[6])
assert len(global_features) == 200
# no normalized feature value should be greater than 1.0
assert len(np.where(global_features > 1.0)[0]) == 0
<file_sep>This directory contains tutorials for DeepChem usage. If you'd like to contribute a new tutorial to DeepChem, please raise a pull request that adds a new IPython notebook to this directory.
Tips:
To use images in your code use
from IPython.display import Image, display
display(Image(filename='filename'))
Your notebook name cannot contain spaces
The first cell has to be markdown type and will
be the title of the tutorial
<file_sep>"""
SIDER dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
SIDER_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/sider.csv.gz"
SIDER_TASKS = [
'Hepatobiliary disorders', 'Metabolism and nutrition disorders',
'Product issues', 'Eye disorders', 'Investigations',
'Musculoskeletal and connective tissue disorders',
'Gastrointestinal disorders', 'Social circumstances',
'Immune system disorders', 'Reproductive system and breast disorders',
'Neoplasms benign, malignant and unspecified (incl cysts and polyps)',
'General disorders and administration site conditions',
'Endocrine disorders', 'Surgical and medical procedures',
'Vascular disorders', 'Blood and lymphatic system disorders',
'Skin and subcutaneous tissue disorders',
'Congenital, familial and genetic disorders', 'Infections and infestations',
'Respiratory, thoracic and mediastinal disorders', 'Psychiatric disorders',
'Renal and urinary disorders',
'Pregnancy, puerperium and perinatal conditions',
'Ear and labyrinth disorders', 'Cardiac disorders',
'Nervous system disorders', 'Injury, poisoning and procedural complications'
]
class _SiderLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "sider.csv.gz")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=SIDER_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_sider(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load SIDER dataset
The Side Effect Resource (SIDER) is a database of marketed
drugs and adverse drug reactions (ADR). The version of the
SIDER dataset in DeepChem has grouped drug side effects into
27 system organ classes following MedDRA classifications
measured for 1427 approved drugs.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "smiles": SMILES representation of the molecular structure
- "Hepatobiliary disorders" ~ "Injury, poisoning and procedural
complications": Recorded side effects for the drug. Please refer
to http://sideeffects.embl.de/se/?page=98 for details on ADRs.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Kuhn, Michael, et al. "The SIDER database of drugs and side effects."
Nucleic acids research 44.D1 (2015): D1075-D1079.
.. [2] Altae-Tran, Han, et al. "Low data drug discovery with one-shot
learning." ACS central science 3.4 (2017): 283-293.
.. [3] Medical Dictionary for Regulatory Activities. http://www.meddra.org/
"""
loader = _SiderLoader(featurizer, splitter, transformers, SIDER_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('sider', reload)
<file_sep>"""
Script that trains multitask models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
from deepchem.molnet import load_tox21
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
n_features = 1024
tox21_tasks, tox21_datasets, transformers = load_tox21()
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
n_layers = 1
n_bypass_layers = 1
nb_epoch = 10
model = dc.models.RobustMultitaskClassifier(
len(tox21_tasks),
train_dataset.get_data_shape()[0],
layer_sizes=[500] * n_layers,
bypass_layer_sizes=[50] * n_bypass_layers,
dropouts=[.25] * n_layers,
bypass_dropouts=[.25] * n_bypass_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[.5] * n_layers,
bypass_weight_init_stddevs=[.02] * n_bypass_layers,
bypass_bias_init_consts=[.5] * n_bypass_layers,
learning_rate=.0003,
weight_decay_penalty=.0001,
weight_decay_penalty_type="l2",
batch_size=100)
# Fit trained model
model.fit(train_dataset, nb_epoch=nb_epoch)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
<file_sep>import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class MACCSKeysFingerprint(MolecularFeaturizer):
"""MACCS Keys Fingerprint.
The MACCS (Molecular ACCess System) keys are one of the most commonly used structural keys.
Please confirm the details in [1]_, [2]_.
Examples
--------
>>> import deepchem as dc
>>> smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
>>> featurizer = dc.feat.MACCSKeysFingerprint()
>>> features = featurizer.featurize([smiles])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(167,)
References
----------
.. [1] <NAME>., et al. "Reoptimization of MDL keys for use in drug discovery."
Journal of chemical information and computer sciences 42.6 (2002): 1273-1280.
.. [2] https://github.com/rdkit/rdkit/blob/master/rdkit/Chem/MACCSkeys.py
Note
----
This class requires RDKit to be installed.
"""
def __init__(self):
"""Initialize this featurizer."""
self.calculator = None
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate MACCS keys fingerprint.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of RDKit descriptors for `mol`. The length is 167.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.calculator is None:
try:
from rdkit.Chem.AllChem import GetMACCSKeysFingerprint
self.calculator = GetMACCSKeysFingerprint
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
return self.calculator(datapoint)
<file_sep>---
name: "\U0001F4DA Installation"
about: Report an installation problem with DeepChem
---
## 📚 Installation
<!-- A clear and concise description of the installation error.
If you have installation log file, please provide it here as well. -->
## Environment
* OS:
* Package manager (PyPI or Conda):
* Python version:
* DeepChem version:
* TensorFlow version:
* PyTorch version (optional):
* CUDA/cuDNN version (optional):
* Any other relevant information:
## Checklist
- [ ] I followed the [installation guide](https://deepchem.readthedocs.io/en/latest/get_started/installation.html).
- [ ] I used Google Colab.
- [ ] I do have multiple CUDA versions on my machine.
## Additional context
<!-- Add any other context about the problem here. -->
<file_sep>"""
Gathers all transformers in one place for convenient imports
"""
# flake8: noqa
from deepchem.trans.transformers import undo_transforms
from deepchem.trans.transformers import undo_grad_transforms
from deepchem.trans.transformers import Transformer
from deepchem.trans.transformers import LogTransformer
from deepchem.trans.transformers import ClippingTransformer
from deepchem.trans.transformers import NormalizationTransformer
from deepchem.trans.transformers import BalancingTransformer
from deepchem.trans.transformers import CDFTransformer
from deepchem.trans.transformers import PowerTransformer
from deepchem.trans.transformers import CoulombFitTransformer
from deepchem.trans.transformers import IRVTransformer
from deepchem.trans.transformers import DAGTransformer
from deepchem.trans.transformers import MinMaxTransformer
from deepchem.trans.transformers import FeaturizationTransformer
from deepchem.trans.transformers import ImageTransformer
from deepchem.trans.transformers import DataTransforms
from deepchem.trans.transformers import Transformer
from deepchem.trans.transformers import FlatteningTransformer
from deepchem.trans.transformers import RxnSplitTransformer
from deepchem.trans.duplicate import DuplicateBalancingTransformer
<file_sep>"""
Process an input dataset into a format suitable for machine learning.
"""
import os
import tempfile
import zipfile
import time
import logging
import warnings
from typing import List, Optional, Tuple, Any, Sequence, Union, Iterator
import pandas as pd
import numpy as np
from deepchem.utils.typing import OneOrMany
from deepchem.utils.data_utils import load_image_files, load_csv_files, load_json_files, load_sdf_files, unzip_file
from deepchem.feat import UserDefinedFeaturizer, Featurizer
from deepchem.data import Dataset, DiskDataset, NumpyDataset, ImageDataset
from deepchem.feat.molecule_featurizers import OneHotFeaturizer
from deepchem.utils.genomics_utils import encode_bio_sequence
try:
from deepchem.feat.dft_data import DFTEntry
import yaml
from yaml.loader import SafeLoader
except ModuleNotFoundError:
pass
logger = logging.getLogger(__name__)
def _convert_df_to_numpy(df: pd.DataFrame,
tasks: List[str]) -> Tuple[np.ndarray, np.ndarray]:
"""Transforms a dataframe containing deepchem input into numpy arrays
This is a private helper method intended to help parse labels and
weights arrays from a pandas dataframe. Here `df` is a dataframe
which has columns for each task in `tasks`. These labels are
extracted into a labels array `y`. Weights `w` are initialized to
all ones, but weights for any missing labels are set to 0.
Parameters
----------
df: pd.DataFrame
Pandas dataframe with columns for all tasks
tasks: List[str]
List of tasks
Returns
-------
Tuple[np.ndarray, np.ndarray]
The tuple is `(w, y)`.
"""
n_samples = df.shape[0]
n_tasks = len(tasks)
y = np.hstack([
np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks
])
w = np.ones((n_samples, n_tasks))
if y.dtype.kind in ['O', 'U']:
missing = (y == '')
y[missing] = 0
w[missing] = 0
return y.astype(float), w.astype(float)
class DataLoader(object):
"""Handles loading/featurizing of data from disk.
The main use of `DataLoader` and its child classes is to make it
easier to load large datasets into `Dataset` objects.`
`DataLoader` is an abstract superclass that provides a
general framework for loading data into DeepChem. This class should
never be instantiated directly. To load your own type of data, make
a subclass of `DataLoader` and provide your own implementation for
the `create_dataset()` method.
To construct a `Dataset` from input data, first instantiate a
concrete data loader (that is, an object which is an instance of a
subclass of `DataLoader`) with a given `Featurizer` object. Then
call the data loader's `create_dataset()` method on a list of input
files that hold the source data to process. Note that each subclass
of `DataLoader` is specialized to handle one type of input data so
you will have to pick the loader class suitable for your input data
type.
Note that it isn't necessary to use a data loader to process input
data. You can directly use `Featurizer` objects to featurize
provided input into numpy arrays, but note that this calculation
will be performed in memory, so you will have to write generators
that walk the source files and write featurized data to disk
yourself. `DataLoader` and its subclasses make this process easier
for you by performing this work under the hood.
"""
def __init__(self,
tasks: List[str],
featurizer: Featurizer,
id_field: Optional[str] = None,
log_every_n: int = 1000):
"""Construct a DataLoader object.
This constructor is provided as a template mainly. You
shouldn't ever call this constructor directly as a user.
Parameters
----------
tasks: List[str]
List of task names
featurizer: Featurizer
Featurizer to use to process data.
id_field: str, optional (default None)
Name of field that holds sample identifier. Note that the
meaning of "field" depends on the input data type and can have a
different meaning in different subclasses. For example, a CSV
file could have a field as a column, and an SDF file could have
a field as molecular property.
log_every_n: int, optional (default 1000)
Writes a logging statement this often.
"""
if self.__class__ is DataLoader:
raise ValueError(
"DataLoader should never be instantiated directly. Use a subclass instead."
)
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.tasks = tasks
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def featurize(self,
inputs: OneOrMany[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> Dataset:
"""Featurize provided files and write to specified location.
DEPRECATED: This method is now a wrapper for `create_dataset()`
and calls that method under the hood.
For large datasets, automatically shards into smaller chunks
for convenience. This implementation assumes that the helper
methods `_get_shards` and `_featurize_shard` are implemented and
that each shard returned by `_get_shards` is a pandas dataframe.
You may choose to reuse or override this method in your subclass
implementations.
Parameters
----------
inputs: List
List of inputs to process. Entries can be filenames or arbitrary objects.
data_dir: str, default None
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Number of examples stored in each shard.
Returns
-------
Dataset
A `Dataset` object containing a featurized representation of data
from `inputs`.
"""
warnings.warn(
"featurize() is deprecated and has been renamed to create_dataset()."
"featurize() will be removed in DeepChem 3.0", FutureWarning)
return self.create_dataset(inputs, data_dir, shard_size)
def create_dataset(self,
inputs: OneOrMany[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> Dataset:
"""Creates and returns a `Dataset` object by featurizing provided files.
Reads in `inputs` and uses `self.featurizer` to featurize the
data in these inputs. For large files, automatically shards
into smaller chunks of `shard_size` datapoints for convenience.
Returns a `Dataset` object that contains the featurized dataset.
This implementation assumes that the helper methods `_get_shards`
and `_featurize_shard` are implemented and that each shard
returned by `_get_shards` is a pandas dataframe. You may choose
to reuse or override this method in your subclass implementations.
Parameters
----------
inputs: List
List of inputs to process. Entries can be filenames or arbitrary objects.
data_dir: str, optional (default None)
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Number of examples stored in each shard.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `inputs`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
# Special case handling of single input
if not isinstance(inputs, list):
inputs = [inputs]
def shard_generator():
for shard_num, shard in enumerate(
self._get_shards(inputs, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir,
self.tasks)
def _get_shards(self, inputs: List, shard_size: Optional[int]) -> Iterator:
"""Stub for children classes.
Should implement a generator that walks over the source data in
`inputs` and returns a "shard" at a time. Here a shard is a
chunk of input data that can reasonably be handled in memory. For
example, this may be a set of rows from a CSV file or a set of
molecules from a SDF file. To re-use the
`DataLoader.create_dataset()` method, each shard must be a pandas
dataframe.
If you chose to override `create_dataset()` directly you don't
need to override this helper method.
Parameters
----------
inputs: list
List of inputs to process. Entries can be filenames or arbitrary objects.
shard_size: int, optional
Number of examples stored in each shard.
"""
raise NotImplementedError
def _featurize_shard(self, shard: Any):
"""Featurizes a shard of input data.
Recall a shard is a chunk of input data that can reasonably be
handled in memory. For example, this may be a set of rows from a
CSV file or a set of molecules from a SDF file. Featurize this
shard in memory and return the results.
Parameters
----------
shard: Any
A chunk of input data
"""
raise NotImplementedError
class CSVLoader(DataLoader):
"""
Creates `Dataset` objects from input CSV files.
This class provides conveniences to load data from CSV files.
It's possible to directly featurize data from CSV files using
pandas, but this class may prove useful if you're processing
large CSV files that you don't want to manipulate directly in
memory. Note that samples which cannot be featurized are filtered
out in the creation of final dataset.
Examples
--------
Let's suppose we have some smiles and labels
>>> smiles = ["C", "CCC"]
>>> labels = [1.5, 2.3]
Let's put these in a dataframe.
>>> import pandas as pd
>>> df = pd.DataFrame(list(zip(smiles, labels)), columns=["smiles", "task1"])
Let's now write this to disk somewhere. We can now use `CSVLoader` to
process this CSV dataset.
>>> import tempfile
>>> import deepchem as dc
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_csv(tmpfile.name)
... loader = dc.data.CSVLoader(["task1"], feature_field="smiles",
... featurizer=dc.feat.CircularFingerprint())
... dataset = loader.create_dataset(tmpfile.name)
>>> len(dataset)
2
Of course in practice you should already have your data in a CSV file if
you're using `CSVLoader`. If your data is already in memory, use
`InMemoryLoader` instead.
Sometimes there will be datasets without specific tasks, for example
datasets which are used in unsupervised learning tasks. Such datasets
can be loaded by leaving the `tasks` field empty.
Example
-------
>>> x1, x2 = [2, 3, 4], [4, 6, 8]
>>> df = pd.DataFrame({"x1":x1, "x2": x2}).reset_index()
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_csv(tmpfile.name)
... loader = dc.data.CSVLoader(tasks=[], id_field="index", feature_field=["x1", "x2"],
... featurizer=dc.feat.DummyFeaturizer())
... dataset = loader.create_dataset(tmpfile.name)
>>> len(dataset)
3
"""
def __init__(self,
tasks: List[str],
featurizer: Featurizer,
feature_field: Optional[str] = None,
id_field: Optional[str] = None,
smiles_field: Optional[str] = None,
log_every_n: int = 1000):
"""Initializes CSVLoader.
Parameters
----------
tasks: List[str]
List of task names
featurizer: Featurizer
Featurizer to use to process data.
feature_field: str, optional (default None)
Field with data to be featurized.
id_field: str, optional, (default None)
CSV column that holds sample identifier
smiles_field: str, optional (default None) (DEPRECATED)
Name of field that holds smiles string.
log_every_n: int, optional (default 1000)
Writes a logging statement this often.
"""
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
if smiles_field is not None:
logger.warning(
"smiles_field is deprecated and will be removed in a future version of DeepChem."
"Use feature_field instead.")
if feature_field is not None and smiles_field != feature_field:
raise ValueError(
"smiles_field and feature_field if both set must have the same value."
)
elif feature_field is None:
feature_field = smiles_field
self.tasks = tasks
self.feature_field = feature_field
self.id_field = id_field
if id_field is None:
self.id_field = feature_field # Use features as unique ids if necessary
else:
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def _get_shards(self, input_files: List[str],
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of filenames to process
shard_size: int, optional
The size of a shard of data to process at a time.
Returns
-------
Iterator[pd.DataFrame]
Iterator over shards
"""
return load_csv_files(input_files, shard_size)
def _featurize_shard(self,
shard: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Featurizes a shard of an input dataframe.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds a shard of the input CSV file
Returns
-------
features: np.ndarray
Features computed from CSV file.
valid_inds: np.ndarray
Indices of rows in source CSV with valid data.
"""
logger.info("About to featurize shard.")
if self.featurizer is None:
raise ValueError(
"featurizer must be specified in constructor to featurizer data/"
)
features = [elt for elt in self.featurizer(shard[self.feature_field])]
valid_inds = np.array(
[1 if np.array(elt).size > 0 else 0 for elt in features],
dtype=bool)
features = [
elt for (is_valid, elt) in zip(valid_inds, features) if is_valid
]
return np.array(features), valid_inds
class UserCSVLoader(CSVLoader):
"""
Handles loading of CSV files with user-defined features.
This is a convenience class that allows for descriptors already present in a
CSV file to be extracted without any featurization necessary.
Examples
--------
Let's suppose we have some descriptors and labels. (Imagine that these
descriptors have been computed by an external program.)
>>> desc1 = [1, 43]
>>> desc2 = [-2, -22]
>>> labels = [1.5, 2.3]
>>> ids = ["cp1", "cp2"]
Let's put these in a dataframe.
>>> import pandas as pd
>>> df = pd.DataFrame(list(zip(ids, desc1, desc2, labels)), columns=["id", "desc1", "desc2", "task1"])
Let's now write this to disk somewhere. We can now use `UserCSVLoader` to
process this CSV dataset.
>>> import tempfile
>>> import deepchem as dc
>>> featurizer = dc.feat.UserDefinedFeaturizer(["desc1", "desc2"])
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_csv(tmpfile.name)
... loader = dc.data.UserCSVLoader(["task1"], id_field="id",
... featurizer=featurizer)
... dataset = loader.create_dataset(tmpfile.name)
>>> len(dataset)
2
>>> dataset.X[0, 0]
1
The difference between `UserCSVLoader` and `CSVLoader` is that our
descriptors (our features) have already been computed for us, but are spread
across multiple columns of the CSV file.
Of course in practice you should already have your data in a CSV file if
you're using `UserCSVLoader`. If your data is already in memory, use
`InMemoryLoader` instead.
"""
def _get_shards(self, input_files: List[str],
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of filenames to process
shard_size: int, optional
The size of a shard of data to process at a time.
Returns
-------
Iterator[pd.DataFrame]
Iterator over shards
"""
return load_csv_files(input_files, shard_size)
def _featurize_shard(self,
shard: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Featurizes a shard of an input dataframe.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds a shard of the input CSV file
Returns
-------
features: np.ndarray
Features extracted from CSV file.
valid_inds: np.ndarray
Indices of rows in source CSV with valid data.
"""
assert isinstance(self.featurizer, UserDefinedFeaturizer)
time1 = time.time()
feature_fields = self.featurizer.feature_fields
shard[feature_fields] = shard[feature_fields].apply(pd.to_numeric)
X_shard = shard[feature_fields].to_numpy()
time2 = time.time()
logger.info("TIMING: user specified processing took %0.3f s" %
(time2 - time1))
return (X_shard, np.ones(len(X_shard), dtype=bool))
class JsonLoader(DataLoader):
"""
Creates `Dataset` objects from input json files.
This class provides conveniences to load data from json files.
It's possible to directly featurize data from json files using
pandas, but this class may prove useful if you're processing
large json files that you don't want to manipulate directly in
memory.
It is meant to load JSON files formatted as "records" in line
delimited format, which allows for sharding.
``list like [{column -> value}, ... , {column -> value}]``.
Examples
--------
Let's create the sample dataframe.
>>> composition = ["LiCoO2", "MnO2"]
>>> labels = [1.5, 2.3]
>>> import pandas as pd
>>> df = pd.DataFrame(list(zip(composition, labels)), columns=["composition", "task"])
Dump the dataframe to the JSON file formatted as "records" in line delimited format and
load the json file by JsonLoader.
>>> import tempfile
>>> import deepchem as dc
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_json(tmpfile.name, orient='records', lines=True)
... featurizer = dc.feat.ElementPropertyFingerprint()
... loader = dc.data.JsonLoader(["task"], feature_field="composition", featurizer=featurizer)
... dataset = loader.create_dataset(tmpfile.name)
>>> len(dataset)
2
"""
def __init__(self,
tasks: List[str],
feature_field: str,
featurizer: Featurizer,
label_field: Optional[str] = None,
weight_field: Optional[str] = None,
id_field: Optional[str] = None,
log_every_n: int = 1000):
"""Initializes JsonLoader.
Parameters
----------
tasks: List[str]
List of task names
feature_field: str
JSON field with data to be featurized.
featurizer: Featurizer
Featurizer to use to process data
label_field: str, optional (default None)
Field with target variables.
weight_field: str, optional (default None)
Field with weights.
id_field: str, optional (default None)
Field for identifying samples.
log_every_n: int, optional (default 1000)
Writes a logging statement this often.
"""
if not isinstance(tasks, list):
raise ValueError("Tasks must be a list.")
self.tasks = tasks
self.feature_field = feature_field
self.label_field = label_field
self.weight_field = weight_field
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def create_dataset(self,
input_files: OneOrMany[str],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> DiskDataset:
"""Creates a `Dataset` from input JSON files.
Parameters
----------
input_files: OneOrMany[str]
List of JSON filenames.
data_dir: Optional[str], default None
Name of directory where featurized data is stored.
shard_size: int, optional (default 8192)
Shard size when loading data.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `input_files`.
"""
if not isinstance(input_files, list):
try:
if isinstance(input_files, str):
input_files = [input_files]
else:
input_files = list(input_files)
except TypeError:
raise ValueError(
"input_files is of an unrecognized form. Must be one filename or a list of filenames."
)
def shard_generator():
"""Yield X, y, w, and ids for shards."""
for shard_num, shard in enumerate(
self._get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
if self.id_field:
ids = shard[self.id_field].values
else:
ids = np.ones(len(valid_inds))
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results if they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
if self.label_field:
y = shard[self.label_field]
if self.weight_field:
w = shard[self.weight_field]
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir,
self.tasks)
def _get_shards(self, input_files: List[str],
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of filenames to process
shard_size: int, optional
The size of a shard of data to process at a time.
Returns
-------
Iterator[pd.DataFrame]
Iterator over shards
"""
return load_json_files(input_files, shard_size)
def _featurize_shard(self,
shard: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Featurizes a shard of an input dataframe.
Helper that computes features for the given shard of data.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds data to be featurized.
Returns
-------
features: np.ndarray
Array of feature vectors. Note that samples for which featurization has
failed will be filtered out.
valid_inds: np.ndarray
Boolean values indicating successful featurization for corresponding
sample in the source.
"""
logger.info("About to featurize shard.")
if self.featurizer is None:
raise ValueError(
"featurizer must be specified in constructor to featurizer data/"
)
features = [elt for elt in self.featurizer(shard[self.feature_field])]
valid_inds = np.array(
[1 if np.array(elt).size > 0 else 0 for elt in features],
dtype=bool)
features = [
elt for (is_valid, elt) in zip(valid_inds, features) if is_valid
]
return np.array(features), valid_inds
class SDFLoader(DataLoader):
"""Creates a `Dataset` object from SDF input files.
This class provides conveniences to load and featurize data from
Structure Data Files (SDFs). SDF is a standard format for structural
information (3D coordinates of atoms and bonds) of molecular compounds.
Examples
--------
>>> import deepchem as dc
>>> import os
>>> current_dir = os.path.dirname(os.path.realpath(__file__))
>>> featurizer = dc.feat.CircularFingerprint(size=16)
>>> loader = dc.data.SDFLoader(["LogP(RRCK)"], featurizer=featurizer, sanitize=True)
>>> dataset = loader.create_dataset(os.path.join(current_dir, "tests", "membrane_permeability.sdf")) # doctest:+ELLIPSIS
>>> len(dataset)
2
"""
def __init__(self,
tasks: List[str],
featurizer: Featurizer,
sanitize: bool = False,
log_every_n: int = 1000):
"""Initialize SDF Loader
Parameters
----------
tasks: list[str]
List of tasknames. These will be loaded from the SDF file.
featurizer: Featurizer
Featurizer to use to process data
sanitize: bool, optional (default False)
Whether to sanitize molecules.
log_every_n: int, optional (default 1000)
Writes a logging statement this often.
"""
self.featurizer = featurizer
self.sanitize = sanitize
self.tasks = tasks
# The field in which dc.utils.save.load_sdf_files stores RDKit mol objects
self.mol_field = "mol"
# The field in which load_sdf_files return value stores smiles
self.id_field = "smiles"
self.log_every_n = log_every_n
def create_dataset(self,
inputs: OneOrMany[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> Dataset:
"""Creates and returns a `Dataset` object by featurizing provided sdf files.
Parameters
----------
inputs: List
List of inputs to process. Entries can be filenames or arbitrary objects.
Each file should be supported format (.sdf) or compressed folder of
.sdf files
data_dir: str, optional (default None)
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Number of examples stored in each shard.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `inputs`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
# Special case handling of single input
if not isinstance(inputs, list):
inputs = [inputs]
processed_files = []
for input_file in inputs:
filename, extension = os.path.splitext(input_file)
extension = extension.lower()
if extension == ".sdf":
processed_files.append(input_file)
elif extension == ".zip":
zip_dir = tempfile.mkdtemp()
unzip_file(input_file, zip_dir)
zip_files = [
os.path.join(zip_dir, name) for name in os.listdir(zip_dir)
]
for zip_file in zip_files:
_, extension = os.path.splitext(zip_file)
extension = extension.lower()
if extension in [".sdf"]:
processed_files.append(zip_file)
else:
raise ValueError("Unsupported file format")
inputs = processed_files
def shard_generator():
for shard_num, shard in enumerate(
self._get_shards(inputs, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir,
self.tasks)
def _get_shards(self, input_files: List[str],
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of filenames to process
shard_size: int, optional
The size of a shard of data to process at a time.
Returns
-------
Iterator[pd.DataFrame]
Iterator over shards
"""
return load_sdf_files(input_files=input_files,
clean_mols=self.sanitize,
tasks=self.tasks,
shard_size=shard_size)
def _featurize_shard(self,
shard: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Featurizes a shard of an input dataframe.
Helper that computes features for the given shard of data.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds data to be featurized.
Returns
-------
features: np.ndarray
Array of feature vectors. Note that samples for which featurization has
failed will be filtered out.
valid_inds: np.ndarray
Boolean values indicating successful featurization for corresponding
sample in the source.
"""
pos_cols = ['pos_x', 'pos_y', 'pos_z']
if set(pos_cols).issubset(shard.columns):
features = [
elt for elt in self.featurizer(shard[self.mol_field],
pos_x=shard['pos_x'],
pos_y=shard['pos_y'],
pos_z=shard['pos_z'])
]
else:
features = [elt for elt in self.featurizer(shard[self.mol_field])]
valid_inds = np.array(
[1 if np.array(elt).size > 0 else 0 for elt in features],
dtype=bool)
features = [
elt for (is_valid, elt) in zip(valid_inds, features) if is_valid
]
return np.array(features), valid_inds
class FASTALoader(DataLoader):
"""Handles loading of FASTA files.
FASTA files are commonly used to hold sequence data. This
class provides convenience files to lead FASTA data and
one-hot encode the genomic sequences for use in downstream
learning tasks.
"""
def __init__(self,
featurizer: Optional[Featurizer] = None,
auto_add_annotations: bool = False,
legacy: bool = True):
"""Initialize FASTALoader.
Parameters
----------
featurizer: Featurizer (default: None)
The Featurizer to be used for the loaded FASTA data.
If featurizer is None and legacy is True, the original featurization
logic is used, creating a one hot encoding of all included FASTA strings
of shape
(number of FASTA sequences, number of channels + 1, sequence length, 1).
If featurizer is None and legacy is False, the featurizer is initialized
as a OneHotFeaturizer object with charset ("A", "C", "T", "G") and
max_length = None.
auto_add_annotations: bool (default False)
Whether create_dataset will automatically add [CLS] and [SEP] annotations
to the sequences it reads in order to assist tokenization.
Keep False if your FASTA file already includes [CLS] and [SEP] annotations.
legacy: bool (default True)
Whether to use legacy logic for featurization. Legacy mode will create
a one hot encoding of the FASTA content of shape
(number of FASTA sequences, number of channels + 1, max length, 1).
Legacy mode is only tested for ACTGN charsets, and will be deprecated.
"""
# Process legacy toggle
if legacy:
warnings.warn(
"""
Legacy mode is deprecated and will be removed in
DeepChem 3.0. Disable legacy mode by passing legacy=False
during construction of FASTALoader object.
""", FutureWarning)
if featurizer is not None or auto_add_annotations:
raise ValueError(f"""
featurizer option must be None and
auto_add_annotations must be false when legacy mode
is enabled. You set featurizer to {featurizer} and
auto_add_annotations to {auto_add_annotations}.
""")
# Set attributes
self.legacy = legacy
self.auto_add_annotations = auto_add_annotations
self.user_specified_features = None
# Handle special featurizer cases
if isinstance(featurizer,
UserDefinedFeaturizer): # User defined featurizer
self.user_specified_features = featurizer.feature_fields
elif featurizer is None: # Default featurizer
featurizer = OneHotFeaturizer(charset=["A", "C", "T", "G"],
max_length=None)
# Set self.featurizer
self.featurizer = featurizer
def create_dataset(self,
input_files: OneOrMany[str],
data_dir: Optional[str] = None,
shard_size: Optional[int] = None) -> DiskDataset:
"""Creates a `Dataset` from input FASTA files.
At present, FASTA support is limited and doesn't allow for sharding.
Parameters
----------
input_files: List[str]
List of fasta files.
data_dir: str, optional (default None)
Name of directory where featurized data is stored.
shard_size: int, optional (default None)
For now, this argument is ignored and each FASTA file gets its
own shard.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `input_files`.
"""
if isinstance(input_files, str):
input_files = [input_files]
def shard_generator(): # TODO Enable sharding with shard size parameter
for input_file in input_files:
if self.legacy:
X = encode_bio_sequence(input_file)
else:
sequences = _read_file(input_file)
X = self.featurizer(sequences)
ids = np.ones(len(X))
# (X, y, w, ids)
yield X, None, None, ids
def _read_file(input_file: str):
"""
Convert the FASTA file to a numpy array of FASTA-format strings.
"""
# TODO don't convert all sequences into np array (allow shards)
def _generate_sequences(fasta_file, header_mark=">") -> np.ndarray:
"""
Uses a fasta_file to create a numpy array of annotated FASTA-format strings
"""
sequences: np.ndarray = np.array([])
sequence: np.ndarray = np.array([])
header_read = False
for line in fasta_file:
# Check if line is a header
if line.startswith(header_mark): # New header line
header_read = True
sequences = _add_sequence(sequences, sequence)
sequence = np.array([])
elif header_read: # Line contains sequence in FASTA format
if line[-1:] == '\n': # Check last character in string
line = line[0:-1] # Remove last character
sequence = np.append(sequence, line)
sequences = _add_sequence(sequences,
sequence) # Add last sequence
return sequences
def _add_sequence(sequences: np.ndarray,
sequence: np.ndarray) -> np.ndarray:
# Handle empty sequence
if sequence is None or len(sequence) <= 0:
return np.array([])
# Annotate start/stop of sequence
if self.auto_add_annotations:
sequence = np.insert(sequence, 0, "[CLS]")
sequence = np.append(sequence, "[SEP]")
new_sequence = ''.join(sequence)
new_sequences = np.append(sequences, new_sequence)
return new_sequences
with open(input_file, 'r') as f: # Read FASTA file
return _generate_sequences(f)
return DiskDataset.create_dataset(shard_generator(), data_dir)
def _fastq_load_files(input_files: List[str],
shard_size: Optional[int] = 4096) -> Iterator:
"""Load data as Iterator.
Parameters
----------
input_files: List[str]
List of fastq filenames.
shard_size: int, optional (default 4096)
Chunksize for reading fastq files.
Yields
-------
Iterator
Generator which yields the data which is the same shard size.
"""
shard_num = 0
for input_file in input_files:
logger.info("About to start loading fastq from %s." % input_file)
# Open index file
with open(input_file, 'r') as f:
# create an empty list to store lines in files.
df = []
line_number = 0
# iterate through each line in the input file
for num, line in enumerate(f):
# If the number of lines iterated through is equal or less than the shard size:
if (shard_size is not None) and ((num + 1) - line_number <=
(shard_size * 4)):
# append to list
df.append(line)
else:
# else yield the list
shard_num += 1
logger.info("Loading shard %d of size %s." %
(shard_num, str(shard_size)))
# set the line_number variable to the last line number (num) before 'yield' was called
line_number = num
# yield list (shard/batch)
yield df
# Re-initialize list with the index line to begin a new shard.
df = [line]
if len(df) > 0:
yield df
class FASTQLoader(DataLoader):
"""Handles loading of FASTQ files.
FASTQ files are commonly used to hold very large sequence data. It is a variant of FASTA format.
This class provides convenience files to load FASTQ data and one-hot encode
the genomic sequences for use in downstream learning tasks.
Example
-------
>>> import os
>>> from deepchem.feat.molecule_featurizers import OneHotFeaturizer
>>> from deepchem.data.data_loader import FASTQLoader
>>> current_dir = os.path.dirname(os.path.abspath(__file__))
>>> input_file = os.path.join(current_dir, "tests", "sample1.fastq")
>>> loader = FASTQLoader()
>>> sequences = loader.create_dataset(input_file)
See Also
--------
`Info on the structure of FASTQ files <https://support.illumina.com/bulletins/2016/04/fastq-files-explained.html>`
"""
def __init__(self,
featurizer: Optional[Featurizer] = None,
auto_add_annotations: bool = False,
return_quality_scores: bool = False):
"""Initialize FASTQLoader.
Parameters
----------
featurizer: Featurizer (default: None)
The Featurizer to be used for the loaded FASTQ data.
The featurizer is initialized as a OneHotFeaturizer object with charset ("A", "C", "T", "G") and
max_length = None.
auto_add_annotations: bool (default False)
Whether create_dataset will automatically add [CLS] and [SEP] annotations
to the sequences it reads in order to assist tokenization.
Keep False if your FASTQ file already includes [CLS] and [SEP] annotations.
return_quality_scores: bool (default True)
returns the quality (likelihood) score of the nucleotides in the sequence.
"""
# Set attributes
self.auto_add_annotations = auto_add_annotations
self.user_specified_features = None
# Handle special featurizer cases
if isinstance(featurizer,
UserDefinedFeaturizer): # User defined featurizer
self.user_specified_features = featurizer.feature_fields
elif featurizer is None: # Default featurizer
featurizer = OneHotFeaturizer(charset=["A", "C", "T", "G"],
max_length=None)
# Set self.featurizer
self.featurizer = featurizer
# Set self.return_quality_scores
self.return_quality_scores = return_quality_scores
def _get_shards(self,
input_files: List[str],
shard_size: Optional[int] = 4096) -> Iterator:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of file names to process
n_samples: int, optional
The number of samples to extract from each variant in the data
shard_size: int, optional (default 4096)
The size of a shard of data to process at a time. Here, shard_size is equal to the
number of variants to fetch. You can think of them as number of rows to get from the
full dataset.
Yields
-------
Iterator
Iterator over shards
"""
return _fastq_load_files(input_files, shard_size)
def create_dataset(self,
input_files: OneOrMany[str],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 4096) -> DiskDataset:
"""Creates a `Dataset` from input FASTQ files.
Parameters
----------
input_files: List[str]
List of fastQ files.
data_dir: str, optional (default None)
Name of directory where featurized data is stored.
shard_size: int, optional (default 4096)
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `input_files`.
"""
if isinstance(input_files, str):
input_files = [input_files]
def shard_generator():
for shard_num, shard in enumerate(
self._get_shards(input_files, shard_size)):
if self.return_quality_scores:
sequences, quality_scores = _generate_sequences(shard)
# Featurize sequences
X = self.featurizer(sequences)
ids = np.ones(len(X))
# (X, y , w, ids)
yield X, None, quality_scores, ids
else:
sequences = _generate_sequences(shard)
# Featurize sequences
X = self.featurizer(sequences)
ids = np.ones(len(X))
# (X, y , w, ids)
yield X, None, None, ids
def _generate_sequences(shard: List) -> OneOrMany[np.ndarray]:
"""
Creates a numpy array of annotated FASTQ-format strings.
"""
assert len(
shard
) % 4 == 0, f'Sharded length not divisible by four: Length of shard = {len(shard)}. File is possibly incomplete'
sequences: np.ndarray = np.array([], dtype='object')
if self.return_quality_scores:
quality_scores: np.ndarray = np.array([], dtype='object')
# Go through each sequence entity in the fastq_file: each sequence consists of 4 lines
# First line : header description
# second line : sequence
# third line : more description usually the same as the first line
# fourth line: quality scores of the sequence
for start_index in range(0, len(shard), 4):
each_sequence = shard[start_index:start_index + 4]
# Second line : add sequence to the sequence array
sequences = _add_sequence(
sequences, np.array([each_sequence[1].strip("\n")]))
# Fourth line
if self.return_quality_scores:
quality_scores = _add_sequence(
quality_scores,
np.array([each_sequence[3].strip("\n")]))
if self.return_quality_scores:
return sequences, quality_scores
else:
return sequences
def _add_sequence(sequences: np.ndarray,
sequence: np.ndarray) -> np.ndarray:
# Handle empty sequence
if sequence is None or len(sequence) <= 0:
return np.array([])
# Annotate start/stop of sequence
if self.auto_add_annotations:
sequence = np.insert(sequence, 0, "[CLS]")
sequence = np.append(sequence, "[SEP]")
new_sequence = ''.join(sequence)
new_sequences = np.append(sequences, new_sequence)
return new_sequences
return DiskDataset.create_dataset(shard_generator(), data_dir)
class ImageLoader(DataLoader):
"""Handles loading of image files.
This class allows for loading of images in various formats.
For user convenience, also accepts zip-files and directories
of images and uses some limited intelligence to attempt to
traverse subdirectories which contain images.
"""
def __init__(self, tasks: Optional[List[str]] = None):
"""Initialize image loader.
At present, custom image featurizers aren't supported by this
loader class.
Parameters
----------
tasks: List[str], optional (default None)
List of task names for image labels.
"""
if tasks is None:
tasks = []
self.tasks = tasks
def create_dataset(self,
inputs: Union[OneOrMany[str], Tuple[Any]],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192,
in_memory: bool = False) -> Dataset:
"""Creates and returns a `Dataset` object by featurizing provided image files and labels/weights.
Parameters
----------
inputs: `Union[OneOrMany[str], Tuple[Any]]`
The inputs provided should be one of the following
- filename
- list of filenames
- Tuple (list of filenames, labels)
- Tuple (list of filenames, labels, weights)
Each file in a given list of filenames should either be of a supported
image format (.png, .tif only for now) or of a compressed folder of
image files (only .zip for now). If `labels` or `weights` are provided,
they must correspond to the sorted order of all filenames provided, with
one label/weight per file.
data_dir: str, optional (default None)
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Shard size when loading data.
in_memory: bool, optioanl (default False)
If true, return in-memory NumpyDataset. Else return ImageDataset.
Returns
-------
ImageDataset or NumpyDataset or DiskDataset
- if `in_memory == False`, the return value is ImageDataset.
- if `in_memory == True` and `data_dir is None`, the return value is NumpyDataset.
- if `in_memory == True` and `data_dir is not None`, the return value is DiskDataset.
"""
labels, weights = None, None
if isinstance(inputs, tuple):
if len(inputs) == 1:
input_files = inputs[0]
if isinstance(inputs, str):
input_files = [inputs]
elif len(inputs) == 2:
input_files, labels = inputs
elif len(inputs) == 3:
input_files, labels, weights = inputs
else:
raise ValueError("Input must be a tuple of length 1, 2, or 3")
else:
input_files = inputs
if isinstance(input_files, str):
input_files = [input_files]
image_files = []
# Sometimes zip files contain directories within. Traverse directories
while len(input_files) > 0:
remainder = []
for input_file in input_files:
filename, extension = os.path.splitext(input_file)
extension = extension.lower()
# TODO(rbharath): Add support for more extensions
if os.path.isdir(input_file):
dirfiles = [
os.path.join(input_file, subfile)
for subfile in os.listdir(input_file)
]
remainder += dirfiles
elif extension == ".zip":
zip_dir = tempfile.mkdtemp()
zip_ref = zipfile.ZipFile(input_file, 'r')
zip_ref.extractall(path=zip_dir)
zip_ref.close()
zip_files = [
os.path.join(zip_dir, name)
for name in zip_ref.namelist()
]
for zip_file in zip_files:
_, extension = os.path.splitext(zip_file)
extension = extension.lower()
if extension in [".png", ".tif"]:
image_files.append(zip_file)
elif extension in [".png", ".tif"]:
image_files.append(input_file)
else:
raise ValueError("Unsupported file format")
input_files = remainder
# Sort image files
image_files = sorted(image_files)
if in_memory:
if data_dir is None:
return NumpyDataset(load_image_files(image_files),
y=labels,
w=weights,
ids=image_files)
else:
dataset = DiskDataset.from_numpy(load_image_files(image_files),
y=labels,
w=weights,
ids=image_files,
tasks=self.tasks,
data_dir=data_dir)
if shard_size is not None:
dataset.reshard(shard_size)
return dataset
else:
return ImageDataset(image_files,
y=labels,
w=weights,
ids=image_files)
class InMemoryLoader(DataLoader):
"""Facilitate Featurization of In-memory objects.
When featurizing a dataset, it's often the case that the initial set of
data (pre-featurization) fits handily within memory. (For example, perhaps
it fits within a column of a pandas DataFrame.) In this case, it would be
convenient to directly be able to featurize this column of data. However,
the process of featurization often generates large arrays which quickly eat
up available memory. This class provides convenient capabilities to process
such in-memory data by checkpointing generated features periodically to
disk.
Example
-------
Here's an example with only datapoints and no labels or weights.
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(smiles, shard_size=2)
>>> len(dataset)
4
Here's an example with both datapoints and labels
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> labels = [1, 0, 1, 0]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(zip(smiles, labels), shard_size=2)
>>> len(dataset)
4
Here's an example with datapoints, labels, weights and ids all provided.
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> labels = [1, 0, 1, 0]
>>> weights = [1.5, 0, 1.5, 0]
>>> ids = ["C", "CC", "CCC", "CCCC"]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(zip(smiles, labels, weights, ids), shard_size=2)
>>> len(dataset)
4
"""
def create_dataset(self,
inputs: Sequence[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> DiskDataset:
"""Creates and returns a `Dataset` object by featurizing provided files.
Reads in `inputs` and uses `self.featurizer` to featurize the
data in these input files. For large files, automatically shards
into smaller chunks of `shard_size` datapoints for convenience.
Returns a `Dataset` object that contains the featurized dataset.
This implementation assumes that the helper methods `_get_shards`
and `_featurize_shard` are implemented and that each shard
returned by `_get_shards` is a pandas dataframe. You may choose
to reuse or override this method in your subclass implementations.
Parameters
----------
inputs: Sequence[Any]
List of inputs to process. Entries can be arbitrary objects so long as
they are understood by `self.featurizer`
data_dir: str, optional (default None)
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Number of examples stored in each shard.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `inputs`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
if not isinstance(inputs, list):
try:
inputs = list(inputs)
except TypeError:
inputs = [inputs]
def shard_generator():
global_index = 0
for shard_num, shard in enumerate(
self._get_shards(inputs, shard_size)):
time1 = time.time()
X, y, w, ids = self._featurize_shard(shard, global_index)
global_index += len(shard)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir,
self.tasks)
def _get_shards(self, inputs: List,
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Break up input into shards.
Parameters
----------
inputs: List
Each entry in this list must be of the form `(featurization_input,
label, weight, id)` or `(featurization_input, label, weight)` or
`(featurization_input, label)` or `featurization_input` for one
datapoint, where `featurization_input` is any input that is recognized
by `self.featurizer`.
shard_size: int, optional
The size of shard to generate.
Returns
-------
Iterator[pd.DataFrame]
Iterator which iterates over shards of data.
"""
current_shard: List = []
for i, datapoint in enumerate(inputs):
if i != 0 and shard_size is not None and i % shard_size == 0:
shard_data = current_shard
current_shard = []
yield shard_data
current_shard.append(datapoint)
yield current_shard
# FIXME: Signature of "_featurize_shard" incompatible with supertype "DataLoader"
def _featurize_shard( # type: ignore[override]
self, shard: List, global_index: int) -> Tuple[np.ndarray, np.ndarray,
np.ndarray, np.ndarray]:
"""Featurizes a shard of an input data.
Parameters
----------
shard: List
List each entry of which must be of the form `(featurization_input,
label, weight, id)` or `(featurization_input, label, weight)` or
`(featurization_input, label)` or `featurization_input` for one
datapoint, where `featurization_input` is any input that is recognized
by `self.featurizer`.
global_index: int
The starting index for this shard in the full set of provided inputs
Returns
------
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
The tuple is `(X, y, w, ids)`. All values are numpy arrays.
"""
features = []
labels = []
weights = []
ids = []
n_tasks = len(self.tasks)
for i, entry in enumerate(shard):
if not isinstance(entry, tuple):
entry = (entry,)
if len(entry) > 4:
raise ValueError(
"Entry is malformed and must be of length 1-4 containing featurization_input"
"and optionally label, weight, and id.")
if len(entry) == 4:
featurization_input, label, weight, entry_id = entry
elif len(entry) == 3:
featurization_input, label, weight = entry
entry_id = global_index + i
elif len(entry) == 2:
featurization_input, label = entry
weight = np.ones((n_tasks), np.float32)
entry_id = global_index + i
elif len(entry) == 1:
featurization_input = entry
label = np.zeros((n_tasks), np.float32)
weight = np.zeros((n_tasks), np.float32)
entry_id = global_index + i
feature = self.featurizer(featurization_input)
features.append(feature)
weights.append(weight)
labels.append(label)
ids.append(entry_id)
X = np.concatenate(features, axis=0)
return X, np.array(labels), np.array(weights), np.array(ids)
class DFTYamlLoader(DataLoader):
"""
Creates a `Dataset` object from YAML input files.
This class provides methods to load and featurize data from a YAML file.
Although, in this class, we only focus on a specfic input format
that can be used to perform Density Functional Theory calculations.
Examples
--------
>>> from deepchem.data.data_loader import DFTYamlLoader
>>> import deepchem as dc
>>> import pytest
>>> inputs = 'deepchem/data/tests/dftdata.yaml'
>>> data = DFTYamlLoader()
>>> output = data.create_dataset(inputs)
Notes
-----
Format (and example) for the YAML file:
- e_type : 'ae'
true_val : '0.09194410469'
systems : [{
'moldesc': 'Li 1.5070 0 0; H -1.5070 0 0',
'basis': '6-311++G(3df,3pd)'
}]
Each entry in the YAML file must contain the three parameters : e_type,
true_val and systems in this particular order.
One entry object may contain one or more systems.
This data class does not support/ require an additional featurizer,
since the datapoints are featurized within the methods.
To read more about the parameters and their possible values please refer to
deepchem.feat.dft_data.
"""
def __init__(self):
"""
Initialize DFTYAML loader
"""
def create_dataset(self,
inputs: OneOrMany[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 1) -> Dataset:
"""
Creates and returns a `Dataset` object by featurizing provided YAML
files.
Parameters
----------
input_files: OneOrMany[str]
List of YAML filenames.
data_dir: Optional[str], default None
Name of directory where featurized data is stored.
shard_size: int, optional (default 1)
Shard size when loading data.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation
of data from `inputs`.
"""
def shard_generator():
entries = self._get_shards(inputs)
for i, shard in enumerate(entries):
X = np.array(self._featurize_shard(shard))
y = X[0].get_true_val()
w = np.array([X[0].get_weight()])
ids = np.array([i])
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir)
def _get_shards(self, inputs):
"""
Loads and divides the .yaml file into shards.
Parameters
----------
input_files: str
.yaml file to be processed.
Returns
-------
data
list of dictionaries where each dictionary corresponds to one
shard and is then featurized into one entry object.
"""
with open(inputs) as f:
data = yaml.load(f, Loader=SafeLoader)
return (data)
def _featurize_shard(self, shard):
"""
Featurizes shards in the dataset
Parameters
----------
shard: dict
Dictionary containing values to initialize the DFTEntry object.
Returns
-------
x: featurized shard (DFTEntry objects)
"""
try:
e_type = shard['e_type']
if 'true_val' in shard.keys():
true_val = shard['true_val']
else:
true_val = '0.0'
systems = shard['systems']
except KeyError:
raise ValueError(
"Unknown key in yaml file. Please check format for correctness."
)
if 'weight' in shard.keys():
weight = shard['weight']
x = DFTEntry.create(e_type, true_val, systems, weight)
else:
x = DFTEntry.create(e_type, true_val, systems)
return [x]
<file_sep>import scipy.stats as st
import scipy
import unittest
class TestVoxelUtils(unittest.TestCase):
def test_gibrat(self):
"""
Test fix of function name 'gilbrat' to 'gibrat' of scipy.stats
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gilbrat.html
"""
assert isinstance(st.gibrat, scipy.stats._continuous_distns.gibrat_gen)
<file_sep>"""
Test topological fingerprints.
"""
import unittest
from deepchem.feat import CircularFingerprint
import numpy as np
class TestCircularFingerprint(unittest.TestCase):
"""
Tests for CircularFingerprint.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
def test_circular_fingerprints(self):
"""
Test CircularFingerprint.
"""
featurizer = CircularFingerprint()
rval = featurizer([self.mol])
assert rval.shape == (1, 2048)
# number of indices, where feature count is more than 1, should be 0
assert len(np.where(rval[0] > 1.0)[0]) == 0
def test_count_based_circular_fingerprints(self):
"""
Test CircularFingerprint with counts-based encoding
"""
featurizer = CircularFingerprint(is_counts_based=True)
rval = featurizer([self.mol])
assert rval.shape == (1, 2048)
# number of indices where feature count is more than 1
assert len(np.where(rval[0] > 1.0)[0]) == 8
def test_circular_fingerprints_with_1024(self):
"""
Test CircularFingerprint with 1024 size.
"""
featurizer = CircularFingerprint(size=1024)
rval = featurizer([self.mol])
assert rval.shape == (1, 1024)
def test_sparse_circular_fingerprints(self):
"""
Test CircularFingerprint with sparse encoding.
"""
featurizer = CircularFingerprint(sparse=True)
rval = featurizer([self.mol])
assert rval.shape == (1,)
assert isinstance(rval[0], dict)
assert len(rval[0])
def test_sparse_circular_fingerprints_with_smiles(self):
"""
Test CircularFingerprint with sparse encoding and SMILES for each
fragment.
"""
featurizer = CircularFingerprint(sparse=True, smiles=True)
rval = featurizer([self.mol])
assert rval.shape == (1,)
assert isinstance(rval[0], dict)
assert len(rval[0])
# check for separate count and SMILES entries for each fragment
for fragment_id, value in rval[0].items():
assert 'count' in value
assert 'smiles' in value
<file_sep># mol2vec implementation
In the recent mol2vec [paper](https://chemrxiv.org/articles/Mol2vec_Unsupervised_Machine_Learning_Approach_with_Chemical_Intuition/5513581), authors Jaeger et al consider the features returned by the rdkit Morgan fingerprint as "words" and a compound as a "sentence" to generate fixed-length embeddings. In this case we reproduce 200-element embeddings via a download of all SDF files in the PubChem compound database
## Setup
Ensure that gensim is installed via:
```bash
pip install gensim
```
## Creating training corpus
First, download the pubchem compound SDF corpus via running:
```bash
python ../pubchem_dataset/download_pubchem_ftp.sh
```
Note - the script assumes that a /media/data/pubchem directory exists for this large download (approx 19 GB as of November 2017)
Then generate the embeddings file via:
```bash
./train_mol2vec.sh
```
Then you can use these embeddings as a fixed-length alternative to fingerprints derived directly from RDKit. A full implementation as a featurized for deepchem is WIP
Example code for using the vec.txt file that is created by the above script can be found in eval_mol2vec_results<file_sep>from logging import raiseExceptions
import os
import subprocess
def system_call(command):
""" Wrapper for system command call """
p = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)
return p.stdout.read()
def hhblits(dataset_path,
database=None,
data_dir=None,
evalue=0.001,
num_iterations=2,
num_threads=4):
"""
Run hhblits multisequence alignment search on a dataset. This function
requires the hhblits binary to be installed and in the path. This function
also requires a Hidden Markov Model reference database to be provided. Both can be
found here: https://github.com/soedinglab/hh-suite
The database should be in the deepchem data directory or specified as an argument.
To set the deepchem data directory, run this command in your environment:
export DEEPCHEM_DATA_DIR=<path to data directory>
Parameters
----------
dataset_path: str
Path to single sequence or multiple sequence alignment (MSA) dataset. Results will be saved in this directory.
database: str
Name of database to search against. Note this is not the path, but the name of the database.
data_dir: str
Path to database directory.
evalue: float
E-value cutoff.
num_iterations: int
Number of iterations.
num_threads: int
Number of threads.
Returns
-------
results: .a3m file
MSA file containing the results of the hhblits search.
results: .hhr file
hhsuite results file containing the results of the hhblits search.
Examples
--------
>>> from deepchem.utils.sequence_utils import hhblits
>>> msa_path = hhblits('test/data/example.fasta', database='example_db', data_dir='test/data/', evalue=0.001, num_iterations=2, num_threads=4)
"""
if data_dir is None:
data_dir = os.environ['DEEPCHEM_DATA_DIR']
if len(data_dir) == 0:
raiseExceptions(
'hhblits requires a database. Please follow the instructions here \
to download a database: https://github.com/soedinglab/hh-suite/wiki#hh-suite-databases'
)
_, dataset_file_type = os.path.splitext(dataset_path)
save_dir = os.path.dirname(os.path.realpath(dataset_path))
if dataset_file_type == '.fas' or '.fasta':
command = 'hhsearch ' + \
' -i ' + os.path.abspath(dataset_path) + \
' -d ' + os.path.join(data_dir, database) + \
' -oa3m ' + os.path.join(save_dir, 'results.a3m') + \
' -cpu ' + str(num_threads) + \
' -n ' + str(num_iterations) + \
' -e ' + str(evalue) + \
' -M first'
if dataset_file_type == '.a3m' or '.a2m' or '.hmm':
command = 'hhsearch ' + \
' -i ' + os.path.abspath(dataset_path) + \
' -d ' + os.path.join(data_dir, database) + \
' -oa3m ' + os.path.join(save_dir, 'results.a3m') + \
' -cpu ' + str(num_threads) + \
' -n ' + str(num_iterations) + \
' -e ' + str(evalue)
else:
raiseExceptions('Unsupported file type')
system_call(command)
msa_path = os.path.join(save_dir, 'results.a3m')
return msa_path
def hhsearch(dataset_path,
database=None,
data_dir=None,
evalue=0.001,
num_iterations=2,
num_threads=4):
"""
Run hhsearch multisequence alignment search on a dataset. This function
requires the hhblits binary to be installed and in the path. This function
also requires a Hidden Markov Model reference database to be provided. Both can be
found here: https://github.com/soedinglab/hh-suite
The database should be in the deepchem data directory or specified as an argument.
To set the deepchem data directory, run this command in your environment:
export DEEPCHEM_DATA_DIR=<path to data directory>
Examples
--------
>>> from deepchem.utils.sequence_utils import hhsearch
>>> msa_path = hhsearch('test/data/example.fasta', database='example_db', data_dir='test/data/', evalue=0.001, num_iterations=2, num_threads=4)
Parameters
----------
dataset_path: str
Path to multiple sequence alignment dataset. Results will be saved in this directory.
database: str
Name of database to search against. Note this is not the path, but the name of the database.
data_dir: str
Path to database directory.
evalue: float
E-value cutoff.
num_iterations: int
Number of iterations.
num_threads: int
Number of threads.
Returns
-------
results: .a3m file
MSA file containing the results of the hhblits search.
results: .hhr file
hhsuite results file containing the results of the hhblits search.
"""
if data_dir is None:
data_dir = os.environ['DEEPCHEM_DATA_DIR']
if len(data_dir) == 0:
raiseExceptions(
'hhsearch requires a database. Please follow the instructions here \
to download a database: https://github.com/soedinglab/hh-suite/wiki#hh-suite-databases'
)
_, dataset_file_type = os.path.splitext(dataset_path)
save_dir = os.path.dirname(os.path.abspath(dataset_path))
if dataset_file_type == '.fas' or '.fasta':
command = 'hhsearch ' + \
' -i ' + os.path.abspath(dataset_path) + \
' -d ' + os.path.join(data_dir, database) + \
' -oa3m ' + os.path.join(save_dir, 'results.a3m') + \
' -cpu ' + str(num_threads) + \
' -e ' + str(evalue) + \
' -M first'
if dataset_file_type == '.a3m' or '.a2m' or '.hmm':
command = 'hhsearch ' + \
' -i ' + os.path.abspath(dataset_path) + \
' -d ' + os.path.join(data_dir, database) + \
' -oa3m ' + os.path.join(save_dir, 'results.a3m') + \
' -cpu ' + str(num_threads) + \
' -e ' + str(evalue)
else:
raiseExceptions('Unsupported file type')
system_call(command)
msa_path = os.path.join(save_dir, 'results.a3m')
return msa_path
def MSA_to_dataset(msa_path):
"""
Convert a multiple sequence alignment to a NumpyDataset object.
"""
from deepchem.data.datasets import NumpyDataset # NumpyDataset depends on utils, so imported here to prevent circular import
from Bio import SeqIO
with open(msa_path, 'r') as f:
ids = []
sequences = []
for record in SeqIO.parse(f, 'fasta'):
ids.append(record.id)
seq = []
for res in record:
seq.append(res)
sequences.append(seq)
dataset = NumpyDataset(X=sequences, ids=ids)
return dataset
<file_sep>"""
Tests for molnet function
"""
import csv
import tempfile
import unittest
import numpy as np
import os
import pytest
import deepchem as dc
from deepchem.molnet.run_benchmark import run_benchmark
try:
import torch # noqa
has_pytorch = True
except:
has_pytorch = False
class TestMolnet(unittest.TestCase):
"""
Test basic function of molnet
"""
def setUp(self):
super(TestMolnet, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.slow
@pytest.mark.tensorflow
def test_delaney_graphconvreg(self):
"""Tests molnet benchmarking on delaney with graphconvreg."""
datasets = ['delaney']
model = 'graphconvreg'
split = 'random'
out_path = tempfile.mkdtemp()
metric = [dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)]
run_benchmark(datasets,
str(model),
metric=metric,
split=split,
out_path=out_path)
with open(os.path.join(out_path, 'results.csv'), newline='\n') as f:
reader = csv.reader(f)
for lastrow in reader:
pass
assert lastrow[-4] == 'valid'
assert float(lastrow[-3]) > 0.65
os.remove(os.path.join(out_path, 'results.csv'))
@pytest.mark.slow
@pytest.mark.torch
def test_qm7_multitask(self):
"""Tests molnet benchmarking on qm7 with multitask network."""
datasets = ['qm7']
model = 'tf_regression_ft'
split = 'random'
out_path = tempfile.mkdtemp()
metric = [dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)]
run_benchmark(datasets,
str(model),
metric=metric,
split=split,
out_path=out_path)
with open(os.path.join(out_path, 'results.csv'), newline='\n') as f:
reader = csv.reader(f)
for lastrow in reader:
pass
assert lastrow[-4] == 'valid'
# TODO For this dataset and model, the R2-scores are less than 0.3.
# This has to be improved.
# See: https://github.com/deepchem/deepchem/issues/2776
assert float(lastrow[-3]) > 0.15
os.remove(os.path.join(out_path, 'results.csv'))
@pytest.mark.torch
def test_clintox_multitask(self):
"""Tests molnet benchmarking on clintox with multitask network."""
datasets = ['clintox']
model = 'tf'
split = 'random'
out_path = tempfile.mkdtemp()
metric = [dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)]
run_benchmark(datasets,
str(model),
metric=metric,
split=split,
out_path=out_path,
test=True)
with open(os.path.join(out_path, 'results.csv'), newline='\n') as f:
reader = csv.reader(f)
for lastrow in reader:
pass
assert lastrow[-4] == 'test'
assert float(lastrow[-3]) > 0.7
os.remove(os.path.join(out_path, 'results.csv'))
<file_sep>from deepchem.data import Dataset
class VocabularyBuilder():
"""Abstract class for building a vocabulary from a dataset."""
def build(self, dataset: Dataset):
"""Builds vocabulary from a dataset
Parameters
----------
dataset: Dataset
dataset to build vocabulary from.
"""
raise NotImplementedError()
@classmethod
def load(cls, fname: str):
"""Loads vocabulary from the specified file
Parameters
----------
fname: str
Path containing pre-build vocabulary.
"""
raise NotImplementedError()
def save(self, fname: str):
"""Dump vocabulary to the specified file.
Parameters
----------
fname: str
A json file fname to save vocabulary.
"""
raise NotImplementedError()
def extend(self, dataset: Dataset):
"""Extends vocabulary from a dataset
Parameters
----------
dataset: Dataset
dataset used for extending vocabulary
"""
raise NotImplementedError()
<file_sep>""" High-Order and Adaptive Graph Convolutional Network (HA-GCN) model, defined in https://arxiv.org/pdf/1706.09916"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
__author__ = "<NAME>"
__license__ = "MIT"
import numpy as np
import tensorflow as tf
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
from deepchem.models.tensorgraph.layers import Feature, Label, Weights
from deepchem.models.tensorgraph.layers import Concat
from deepchem.models.tensorgraph.layers import ReduceSum, Dense, ReLU, Flatten, Reshape
from deepchem.models.tensorgraph.layers import L2Loss, WeightedError
from deepchem.feat.mol_graphs import ConvMol
from hagcn_layers import KOrderGraphConv, AdaptiveFilter
class HAGCN(TensorGraph):
def __init__(self,
max_nodes,
num_node_features,
n_tasks=1,
k_max=1,
task_mode='graph',
combine_method='linear',
**kwargs):
"""
Parameters
----------
max_nodes: int
Maximum number of nodes (atoms) graphs in dataset can have
num_node_features: int
Number of features per node
atoms: list
List of atoms available across train, valid, test
k_max: int, optional
Largest k-hop neighborhood per atom
batch_size: int, optional
Batch size used
task_mode: str, optional
Whether the model is used for node based tasks or edge based tasks or graph tasks
combine_method: str, optional
Combining the inputs for the AdaptiveFilterLayer
"""
if task_mode not in ['graph', 'node', 'edge']:
raise ValueError('task_mode must be one of graph, node, edge')
self.k_max = k_max
self.n_tasks = n_tasks
self.max_nodes = max_nodes
self.num_node_features = num_node_features
self.task_mode = task_mode
self.combine_method = combine_method
super(HAGCN, self).__init__(**kwargs)
self._build()
def _build(self):
self.A_tilda_k = list()
for k in range(1, self.k_max + 1):
self.A_tilda_k.append(
Feature(
name="graph_adjacency_{}".format(k),
dtype=tf.float32,
shape=[None, self.max_nodes, self.max_nodes]))
self.X = Feature(
name='atom_features',
dtype=tf.float32,
shape=[None, self.max_nodes, self.num_node_features])
graph_layers = list()
adaptive_filters = list()
for index, k in enumerate(range(1, self.k_max + 1)):
in_layers = [self.A_tilda_k[index], self.X]
adaptive_filters.append(
AdaptiveFilter(
batch_size=self.batch_size,
in_layers=in_layers,
num_nodes=self.max_nodes,
num_node_features=self.num_node_features,
combine_method=self.combine_method))
graph_layers.append(
KOrderGraphConv(
batch_size=self.batch_size,
in_layers=in_layers + [adaptive_filters[index]],
num_nodes=self.max_nodes,
num_node_features=self.num_node_features,
init='glorot_uniform'))
graph_features = Concat(in_layers=graph_layers, axis=2)
graph_features = ReLU(in_layers=[graph_features])
flattened = Flatten(in_layers=[graph_features])
dense1 = Dense(
in_layers=[flattened], out_channels=64, activation_fn=tf.nn.relu)
dense2 = Dense(
in_layers=[dense1], out_channels=16, activation_fn=tf.nn.relu)
dense3 = Dense(
in_layers=[dense2], out_channels=1 * self.n_tasks, activation_fn=None)
output = Reshape(in_layers=[dense3], shape=(-1, self.n_tasks, 1))
self.add_output(output)
label = Label(shape=(None, self.n_tasks, 1))
weights = Weights(shape=(None, self.n_tasks))
loss = ReduceSum(L2Loss(in_layers=[label, output]))
weighted_loss = WeightedError(in_layers=[loss, weights])
self.set_loss(weighted_loss)
@staticmethod
def pow_k(inputs, k=1):
"""Computes the kth power of inputs, used for adjacency matrix"""
if k == 1:
return inputs
if k == 0:
return np.ones(inputs.shape)
if k % 2 == 0:
half = HAGCN.pow_k(inputs, k=k // 2)
return np.matmul(half, half)
else:
return np.matmul(inputs, HAGCN.pow_k(inputs, (k - 1) // 2))
def compute_adjacency_matrix(self, mol):
"""Computes the adjacency matrix for a mol."""
assert isinstance(mol, ConvMol)
canon_adj_lists = mol.get_adjacency_list()
adjacency = np.zeros((self.max_nodes, self.max_nodes))
for atom_idx, connections in enumerate(canon_adj_lists):
for neighbor_idx in connections:
adjacency[atom_idx, neighbor_idx] = 1
return adjacency
@staticmethod
def compute_a_tilda_k(inputs, k=1):
A_k = HAGCN.pow_k(inputs, k)
A_k_I = A_k + np.eye(inputs.shape[-1])
A_tilda_k = np.minimum(A_k_I, 1)
return A_tilda_k
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
feed_dict = {}
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
if y_b is not None:
feed_dict[self.labels[0]] = y_b
atom_features = list()
A_tilda_k = [[] for _ in range(1, self.k_max + 1)]
for im, mol in enumerate(X_b):
# Atom features with padding
num_atoms = mol.get_num_atoms()
atom_feats = mol.get_atom_features()
num_to_pad = self.max_nodes - num_atoms
if num_to_pad > 0:
to_pad = np.zeros((num_to_pad, self.num_node_features))
atom_feats = np.concatenate([atom_feats, to_pad], axis=0)
atom_features.append(atom_feats)
# A_tilda_k computation
adjacency = self.compute_adjacency_matrix(mol)
for i, k in enumerate(range(1, self.k_max + 1)):
A_tilda_k[i].append(HAGCN.compute_a_tilda_k(adjacency, k=k))
# Final feed_dict setup
atom_features = np.asarray(atom_features)
for i, k in enumerate(range(1, self.k_max + 1)):
val = np.asarray(A_tilda_k[i])
# assert val.shape == (self.batch_size, self.max_nodes, self.max_nodes)
feed_dict[self.A_tilda_k[i]] = val
#assert atom_features.shape == (self.batch_size, self.max_nodes,
# self.num_node_features)
feed_dict[self.X] = atom_features
yield feed_dict
<file_sep>"""Advantage Actor-Critic (A2C) algorithm for reinforcement learning."""
import time
from collections.abc import Sequence as SequenceCollection
import numpy as np
import tensorflow as tf
from deepchem.models import KerasModel
from deepchem.models.optimizers import Adam
class A2CLossDiscrete(object):
"""This class computes the loss function for A2C with discrete action spaces."""
def __init__(self, value_weight, entropy_weight, action_prob_index,
value_index):
self.value_weight = value_weight
self.entropy_weight = entropy_weight
self.action_prob_index = action_prob_index
self.value_index = value_index
def __call__(self, outputs, labels, weights):
prob = outputs[self.action_prob_index]
value = outputs[self.value_index]
reward, advantage = weights
action = labels[0]
advantage = tf.expand_dims(advantage, axis=1)
prob = prob + np.finfo(np.float32).eps
log_prob = tf.math.log(prob)
policy_loss = -tf.reduce_mean(
advantage * tf.reduce_sum(action * log_prob, axis=1))
value_loss = tf.reduce_mean(tf.square(reward - value))
entropy = -tf.reduce_mean(tf.reduce_sum(prob * log_prob, axis=1))
return policy_loss + self.value_weight * value_loss - self.entropy_weight * entropy
class A2CLossContinuous(object):
"""This class computes the loss function for A2C with continuous action spaces.
Note
----
This class requires tensorflow-probability to be installed.
"""
def __init__(self, value_weight, entropy_weight, mean_index, std_index,
value_index):
try:
import tensorflow_probability as tfp # noqa: F401
except ModuleNotFoundError:
raise ValueError(
"This class requires tensorflow-probability to be installed.")
self.value_weight = value_weight
self.entropy_weight = entropy_weight
self.mean_index = mean_index
self.std_index = std_index
self.value_index = value_index
def __call__(self, outputs, labels, weights):
import tensorflow_probability as tfp
mean = outputs[self.mean_index]
std = outputs[self.std_index]
value = outputs[self.value_index]
reward, advantage = weights
action = labels[0]
distrib = tfp.distributions.Normal(mean, std)
reduce_axes = list(range(1, len(action.shape)))
log_prob = tf.reduce_sum(distrib.log_prob(action), reduce_axes)
policy_loss = -tf.reduce_mean(advantage * log_prob)
value_loss = tf.reduce_mean(tf.square(reward - value))
entropy = tf.reduce_mean(distrib.entropy())
return policy_loss + self.value_weight * value_loss - self.entropy_weight * entropy
class A2C(object):
"""
Implements the Advantage Actor-Critic (A2C) algorithm for reinforcement learning.
The algorithm is described in Mnih et al, "Asynchronous Methods for Deep Reinforcement Learning"
(https://arxiv.org/abs/1602.01783). This class supports environments with both discrete and
continuous action spaces. For discrete action spaces, the "action" argument passed to the
environment is an integer giving the index of the action to perform. The policy must output
a vector called "action_prob" giving the probability of taking each action. For continuous
action spaces, the action is an array where each element is chosen independently from a
normal distribution. The policy must output two arrays of the same shape: "action_mean"
gives the mean value for each element, and "action_std" gives the standard deviation for
each element. In either case, the policy must also output a scalar called "value" which
is an estimate of the value function for the current state.
The algorithm optimizes all outputs at once using a loss that is the sum of three terms:
1. The policy loss, which seeks to maximize the discounted reward for each action.
2. The value loss, which tries to make the value estimate match the actual discounted reward
that was attained at each step.
3. An entropy term to encourage exploration.
This class supports Generalized Advantage Estimation as described in Schulman et al., "High-Dimensional
Continuous Control Using Generalized Advantage Estimation" (https://arxiv.org/abs/1506.02438).
This is a method of trading off bias and variance in the advantage estimate, which can sometimes
improve the rate of convergance. Use the advantage_lambda parameter to adjust the tradeoff.
This class supports Hindsight Experience Replay as described in Andrychowicz et al., "Hindsight
Experience Replay" (https://arxiv.org/abs/1707.01495). This is a method that can enormously
accelerate learning when rewards are very rare. It requires that the environment state contains
information about the goal the agent is trying to achieve. Each time it generates a rollout, it
processes that rollout twice: once using the actual goal the agent was pursuing while generating
it, and again using the final state of that rollout as the goal. This guarantees that half of
all rollouts processed will be ones that achieved their goals, and hence received a reward.
To use this feature, specify use_hindsight=True to the constructor. The environment must have
a method defined as follows:
def apply_hindsight(self, states, actions, goal):
...
return new_states, rewards
The method receives the list of states generated during the rollout, the action taken for each one,
and a new goal state. It should generate a new list of states that are identical to the input ones,
except specifying the new goal. It should return that list of states, and the rewards that would
have been received for taking the specified actions from those states. The output arrays may be
shorter than the input ones, if the modified rollout would have terminated sooner.
Note
----
Using this class on continuous action spaces requires that `tensorflow_probability` be installed.
"""
def __init__(self,
env,
policy,
max_rollout_length=20,
discount_factor=0.99,
advantage_lambda=0.98,
value_weight=1.0,
entropy_weight=0.01,
optimizer=None,
model_dir=None,
use_hindsight=False):
"""Create an object for optimizing a policy.
Parameters
----------
env: Environment
the Environment to interact with
policy: Policy
the Policy to optimize. It must have outputs with the names 'action_prob'
and 'value' (for discrete action spaces) or 'action_mean', 'action_std',
and 'value' (for continuous action spaces)
max_rollout_length: int
the maximum length of rollouts to generate
discount_factor: float
the discount factor to use when computing rewards
advantage_lambda: float
the parameter for trading bias vs. variance in Generalized Advantage Estimation
value_weight: float
a scale factor for the value loss term in the loss function
entropy_weight: float
a scale factor for the entropy term in the loss function
optimizer: Optimizer
the optimizer to use. If None, a default optimizer is used.
model_dir: str
the directory in which the model will be saved. If None, a temporary directory will be created.
use_hindsight: bool
if True, use Hindsight Experience Replay
"""
self._env = env
self._policy = policy
self.max_rollout_length = max_rollout_length
self.discount_factor = discount_factor
self.advantage_lambda = advantage_lambda
self.value_weight = value_weight
self.entropy_weight = entropy_weight
self.use_hindsight = use_hindsight
self._state_is_list = isinstance(env.state_shape[0], SequenceCollection)
if optimizer is None:
self._optimizer = Adam(learning_rate=0.001, beta1=0.9, beta2=0.999)
else:
self._optimizer = optimizer
output_names = policy.output_names
self.continuous = ('action_mean' in output_names)
self._value_index = output_names.index('value')
if self.continuous:
self._action_mean_index = output_names.index('action_mean')
self._action_std_index = output_names.index('action_std')
else:
self._action_prob_index = output_names.index('action_prob')
self._rnn_final_state_indices = [
i for i, n in enumerate(output_names) if n == 'rnn_state'
]
self._rnn_states = policy.rnn_initial_states
self._model = self._build_model(model_dir)
self._checkpoint = tf.train.Checkpoint()
self._checkpoint.save_counter # Ensure the variable has been created
self._checkpoint.listed = self._model.model.trainable_variables
def _build_model(self, model_dir):
"""Construct a KerasModel containing the policy and loss calculations."""
policy_model = self._policy.create_model()
if self.continuous:
loss = A2CLossContinuous(self.value_weight, self.entropy_weight,
self._action_mean_index,
self._action_std_index, self._value_index)
else:
loss = A2CLossDiscrete(self.value_weight, self.entropy_weight,
self._action_prob_index, self._value_index)
model = KerasModel(policy_model,
loss,
batch_size=self.max_rollout_length,
model_dir=model_dir,
optimize=self._optimizer)
model._ensure_built()
return model
def fit(self,
total_steps,
max_checkpoints_to_keep=5,
checkpoint_interval=600,
restore=False):
"""Train the policy.
Parameters
----------
total_steps: int
the total number of time steps to perform on the environment, across all rollouts
on all threads
max_checkpoints_to_keep: int
the maximum number of checkpoint files to keep. When this number is reached, older
files are deleted.
checkpoint_interval: float
the time interval at which to save checkpoints, measured in seconds
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
"""
if restore:
self.restore()
manager = tf.train.CheckpointManager(self._checkpoint,
self._model.model_dir,
max_checkpoints_to_keep)
checkpoint_time = time.time()
self._env.reset()
rnn_states = self._policy.rnn_initial_states
# Training loop.
step_count = 0
while step_count < total_steps:
initial_rnn_states = rnn_states
states, actions, rewards, values, rnn_states = self._create_rollout(
rnn_states)
self._process_rollout(states, actions, rewards, values,
initial_rnn_states)
if self.use_hindsight:
self._process_rollout_with_hindsight(states, actions,
initial_rnn_states)
step_count += len(actions)
self._model._global_step.assign_add(len(actions))
# Do checkpointing.
if step_count >= total_steps or time.time(
) >= checkpoint_time + checkpoint_interval:
manager.save()
checkpoint_time = time.time()
def predict(self, state, use_saved_states=True, save_states=True):
"""Compute the policy's output predictions for a state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array or list of arrays
the state of the environment for which to generate predictions
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to the initial values defined by the policy before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the array of action probabilities, and the estimated value function
"""
results = self._predict_outputs(state, use_saved_states, save_states)
if self.continuous:
return [
results[i] for i in (self._action_mean_index,
self._action_std_index, self._value_index)
]
else:
return [
results[i] for i in (self._action_prob_index, self._value_index)
]
def select_action(self,
state,
deterministic=False,
use_saved_states=True,
save_states=True):
"""Select an action to perform based on the environment's state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array or list of arrays
the state of the environment for which to select an action
deterministic: bool
if True, always return the best action (that is, the one with highest probability).
If False, randomly select an action based on the computed probabilities.
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to the initial values defined by the policy before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the index of the selected action
"""
outputs = self._predict_outputs(state, use_saved_states, save_states)
return self._select_action_from_outputs(outputs, deterministic)
def restore(self):
"""Reload the model parameters from the most recent checkpoint file."""
last_checkpoint = tf.train.latest_checkpoint(self._model.model_dir)
if last_checkpoint is None:
raise ValueError('No checkpoint found')
self._checkpoint.restore(last_checkpoint)
def _predict_outputs(self, state, use_saved_states, save_states):
"""Compute a set of outputs for a state. """
if not self._state_is_list:
state = [state]
if use_saved_states:
state = state + list(self._rnn_states)
else:
state = state + list(self._policy.rnn_initial_states)
inputs = [np.expand_dims(s, axis=0) for s in state]
results = self._compute_model(inputs)
results = [r.numpy() for r in results]
if save_states:
self._rnn_states = [
np.squeeze(results[i], 0) for i in self._rnn_final_state_indices
]
return results
@tf.function(experimental_relax_shapes=True)
def _compute_model(self, inputs):
return self._model.model(inputs)
def _select_action_from_outputs(self, outputs, deterministic):
"""Given the policy outputs, select an action to perform."""
if self.continuous:
action_mean = outputs[self._action_mean_index]
action_std = outputs[self._action_std_index]
if deterministic:
return action_mean[0]
else:
return np.random.normal(action_mean[0], action_std[0])
else:
action_prob = outputs[self._action_prob_index]
if deterministic:
return action_prob.argmax()
else:
action_prob = action_prob.flatten()
return np.random.choice(np.arange(len(action_prob)),
p=action_prob)
def _create_rollout(self, rnn_states):
"""Generate a rollout."""
states = []
actions = []
rewards = []
values = []
# Generate the rollout.
for i in range(self.max_rollout_length):
if self._env.terminated:
break
state = self._env.state
states.append(state)
results = self._compute_model(
self._create_model_inputs(state, rnn_states))
results = [r.numpy() for r in results]
value = results[self._value_index]
rnn_states = [
np.squeeze(results[i], 0) for i in self._rnn_final_state_indices
]
action = self._select_action_from_outputs(results, False)
actions.append(action)
values.append(float(value))
rewards.append(self._env.step(action))
# Compute an estimate of the reward for the rest of the episode.
if not self._env.terminated:
results = self._compute_model(
self._create_model_inputs(self._env.state, rnn_states))
final_value = self.discount_factor * results[
self._value_index].numpy()[0]
else:
final_value = 0.0
values.append(final_value)
if self._env.terminated:
self._env.reset()
rnn_states = self._policy.rnn_initial_states
return states, actions, np.array(rewards, dtype=np.float32), np.array(
values, dtype=np.float32), rnn_states
def _process_rollout(self, states, actions, rewards, values,
initial_rnn_states):
"""Train the network based on a rollout."""
# Compute the discounted rewards and advantages.
discounted_rewards = rewards.copy()
discounted_rewards[-1] += values[-1]
advantages = rewards - values[:-1] + self.discount_factor * np.array(
values[1:])
for j in range(len(rewards) - 1, 0, -1):
discounted_rewards[
j - 1] += self.discount_factor * discounted_rewards[j]
advantages[
j -
1] += self.discount_factor * self.advantage_lambda * advantages[
j]
# Record the actions, converting to one-hot if necessary.
actions_matrix = []
if self.continuous:
for action in actions:
actions_matrix.append(action)
else:
n_actions = self._env.n_actions
for action in actions:
a = np.zeros(n_actions, np.float32)
a[action] = 1.0
actions_matrix.append(a)
actions_matrix = np.array(actions_matrix, dtype=np.float32)
# Rearrange the states into the proper set of arrays.
if self._state_is_list:
state_arrays = [[] for i in range(len(self._env.state_shape))]
for state in states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [states]
state_arrays = [np.stack(s) for s in state_arrays]
# Build the inputs and apply gradients.
inputs = state_arrays + [
np.expand_dims(s, axis=0) for s in initial_rnn_states
]
self._apply_gradients(inputs, actions_matrix, discounted_rewards,
advantages)
@tf.function(experimental_relax_shapes=True)
def _apply_gradients(self, inputs, actions_matrix, discounted_rewards,
advantages):
"""Compute the gradient of the loss function for a rollout and update the model."""
vars = self._model.model.trainable_variables
with tf.GradientTape() as tape:
outputs = self._model.model(inputs)
loss = self._model._loss_fn(outputs, [actions_matrix],
[discounted_rewards, advantages])
gradients = tape.gradient(loss, vars)
self._model._tf_optimizer.apply_gradients(zip(gradients, vars))
def _process_rollout_with_hindsight(self, states, actions,
initial_rnn_states):
"""Create a new rollout by applying hindsight to an existing one, then train the network."""
hindsight_states, rewards = self._env.apply_hindsight(
states, actions, states[-1])
if self._state_is_list:
state_arrays = [[] for i in range(len(self._env.state_shape))]
for state in hindsight_states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [hindsight_states]
state_arrays = [np.stack(s) for s in state_arrays]
inputs = state_arrays + [
np.expand_dims(s, axis=0) for s in initial_rnn_states
]
outputs = self._compute_model(inputs)
values = outputs[self._value_index].numpy()
values = np.append(values.flatten(), 0.0)
self._process_rollout(hindsight_states, actions[:len(rewards)],
np.array(rewards, dtype=np.float32),
np.array(values, dtype=np.float32),
initial_rnn_states)
def _create_model_inputs(self, state, rnn_states):
"""Create the inputs to the model for use during a rollout."""
if not self._state_is_list:
state = [state]
state = state + rnn_states
return [np.expand_dims(s, axis=0) for s in state]
<file_sep>try:
import torch
from deepchem.models.dft.nnxc import HybridXC
from deepchem.models.dft.dftxc import _construct_nn_model
from deepchem.models.dft.scf import XCNNSCF
from deepchem.feat.dft_data import DFTEntry
has_dqc = True
except ModuleNotFoundError:
has_dqc = False
import pytest
@pytest.mark.dqc
def test_pbe():
input_size = 3
hidden_size = 3
n_layers = 3
modeltype = 1
nnmodel = _construct_nn_model(input_size, hidden_size, n_layers,
modeltype).to(torch.double)
e_type = 'ae'
true_val = 0.237898
systems = [{
'moldesc': 'Be 0 0 0; H -2.5065 0 0; H 2.5065 0 0',
'basis': '6-311++G(3df,3pd)'
}, {
'moldesc': 'H 0 0 0',
'basis': '6-311++G(3df,3pd)',
'spin': '1',
'number': '2'
}, {
'moldesc': 'Be 0 0 0',
'basis': '6-311++G(3df,3pd)'
}]
entry = DFTEntry.create(e_type, true_val, systems)
hybridxc = HybridXC("gga_x_pbe", nnmodel, aweight0=0.0)
evl = XCNNSCF(hybridxc, entry)
qcs = []
for system in entry.get_systems():
qcs.append(evl.run(system))
output = qcs[0].energy()
expected_output = torch.tensor(-15.7262, dtype=torch.float64)
torch.testing.assert_close(output, expected_output, atol=1e-4, rtol=0)
<file_sep>"""
Convenience class that lets singletask models fit on multitask data.
"""
import os
import numpy as np
import shutil
import logging
from deepchem.models import Model
from deepchem.data import DiskDataset
from deepchem.trans import undo_transforms
logger = logging.getLogger(__name__)
class SingletaskToMultitask(Model):
"""Convenience class to let singletask models be fit on multitask data.
This wrapper class groups a set of singletask `SklearnModel` objects to
create a multitask model. This class exists primarily to facilitate
benchmarking.
Note
----
This current implementation is only functional for sklearn models.
"""
def __init__(self, tasks, model_builder, model_dir=None):
super(SingletaskToMultitask, self).__init__(self, model_dir=model_dir)
self.tasks = tasks
self.task_model_dirs = {}
self.model_builder = model_builder
logger.info("About to initialize singletask to multitask model")
for task in self.tasks:
task_model_dir = os.path.join(self.model_dir, str(task))
if not os.path.exists(task_model_dir):
os.makedirs(task_model_dir)
logger.info("Initializing directory for task %s" % task)
self.task_model_dirs[task] = task_model_dir
def _create_task_datasets(self, dataset):
"""Make directories to hold data for tasks"""
task_data_dirs = []
for task in self.tasks:
task_data_dir = os.path.join(self.model_dir, str(task) + "_data")
if os.path.exists(task_data_dir):
shutil.rmtree(task_data_dir)
os.makedirs(task_data_dir)
task_data_dirs.append(task_data_dir)
task_datasets = self._to_singletask(dataset, task_data_dirs)
for task, task_dataset in zip(self.tasks, task_datasets):
logger.info("Dataset for task %s has shape %s" %
(task, str(task_dataset.get_shape())))
return task_datasets
@staticmethod
def _to_singletask(dataset, task_dirs):
"""Transforms a multitask dataset to a collection of singletask datasets."""
tasks = dataset.get_task_names()
assert len(tasks) == len(task_dirs)
logger.info("Splitting multitask dataset into singletask datasets")
task_datasets = [
DiskDataset.create_dataset([], task_dirs[task_num], [task.item()])
for (task_num, task) in enumerate(tasks)
]
# task_metadata_rows = {task: [] for task in tasks}
for shard_num, (X, y, w, ids) in enumerate(dataset.itershards()):
logger.info("Processing shard %d" % shard_num)
for task_num, task in enumerate(tasks):
logger.info("\tTask %s" % task)
if len(w.shape) == 1:
w_task = w
elif w.shape[1] == 1:
w_task = w[:, 0]
else:
w_task = w[:, task_num]
y_task = y[:, task_num]
# Extract those datapoints which are present for this task
X_nonzero = X[w_task != 0]
num_datapoints = X_nonzero.shape[0]
y_nonzero = np.reshape(y_task[w_task != 0], (num_datapoints, 1))
w_nonzero = np.reshape(w_task[w_task != 0], (num_datapoints, 1))
ids_nonzero = ids[w_task != 0]
task_datasets[task_num].add_shard(X_nonzero, y_nonzero,
w_nonzero, ids_nonzero)
return task_datasets
def fit(self, dataset, **kwargs):
"""Updates all singletask models with new information.
Note
----
This current implementation is only functional for sklearn models.
"""
if not isinstance(dataset, DiskDataset):
raise ValueError(
'SingletaskToMultitask only works with DiskDatasets')
logger.info("About to create task-specific datasets")
task_datasets = self._create_task_datasets(dataset)
for ind, task in enumerate(self.tasks):
logger.info("Fitting model for task %s" % task)
task_model = self.model_builder(self.task_model_dirs[task])
task_model.fit(task_datasets[ind], **kwargs)
task_model.save()
def predict_on_batch(self, X):
"""Concatenates results from all singletask models."""
y_preds = []
for ind, task in enumerate(self.tasks):
task_model = self.model_builder(self.task_model_dirs[task])
task_model.reload()
y_preds.append(task_model.predict_on_batch(X))
y_pred = np.stack(y_preds, axis=1)
return y_pred
def predict(self, dataset, transformers=[]):
"""Prediction for multitask models."""
y_preds = []
for ind, task in enumerate(self.tasks):
task_model = self.model_builder(self.task_model_dirs[task])
task_model.reload()
y_preds.append(task_model.predict(dataset, []))
y_pred = np.stack(y_preds, axis=1)
y_pred = undo_transforms(y_pred, transformers)
return y_pred
def save(self):
"""Save all models
TODO(rbharath): Saving is not yet supported for this model.
"""
pass
def reload(self):
"""Load all models"""
# Loading is done on-the-fly
pass
<file_sep>import deepchem as dc
import numpy as np
def test_setshard_with_X_y():
"""Test setsharding on a simple example"""
X = np.random.rand(10, 3)
y = np.random.rand(10,)
dataset = dc.data.DiskDataset.from_numpy(X, y)
X_shape, y_shape, _, _ = dataset.get_shape()
assert X_shape[0] == 10
assert y_shape[0] == 10
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
X = X[1:]
y = y[1:]
w = w[1:]
ids = ids[1:]
dataset.set_shard(i, X, y, w, ids)
X_shape, y_shape, _, _ = dataset.get_shape()
assert X_shape[0] == 9
assert y_shape[0] == 9
<file_sep>"""
Topological fingerprints for macromolecular structures.
"""
import numpy as np
import logging
import itertools
from deepchem.utils.hash_utils import hash_ecfp
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.rdkit_utils import load_complex
from deepchem.utils.hash_utils import vectorize
from deepchem.utils.voxel_utils import voxelize
from deepchem.utils.voxel_utils import convert_atom_to_voxel
from deepchem.utils.rdkit_utils import compute_all_ecfp
from deepchem.utils.rdkit_utils import compute_contact_centroid
from deepchem.utils.rdkit_utils import MoleculeLoadException
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import subtract_centroid
from typing import Optional, Tuple, Dict
logger = logging.getLogger(__name__)
def featurize_contacts_ecfp(
frag1: Tuple,
frag2: Tuple,
pairwise_distances: Optional[np.ndarray] = None,
cutoff: float = 4.5,
ecfp_degree: int = 2) -> Tuple[Dict[int, str], Dict[int, str]]:
"""Computes ECFP dicts for pairwise interaction between two molecular fragments.
Parameters
----------
frag1: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
frag2: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
pairwise_distances: np.ndarray
Array of pairwise fragment-fragment distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration
ecfp_degree: int
ECFP radius
Returns
-------
Tuple of dictionaries of ECFP contact fragments
"""
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
frag1_ecfp_dict = compute_all_ecfp(frag1[1],
indices=frag1_atoms,
degree=ecfp_degree)
frag2_ecfp_dict = compute_all_ecfp(frag2[1],
indices=frag2_atoms,
degree=ecfp_degree)
return (frag1_ecfp_dict, frag2_ecfp_dict)
class ContactCircularFingerprint(ComplexFeaturizer):
"""Compute (Morgan) fingerprints near contact points of macromolecular complexes.
Given a macromolecular complex made up of multiple
constituent molecules, first compute the contact points where
atoms from different molecules come close to one another. For
atoms within "contact regions," compute radial "ECFP"
fragments which are sub-molecules centered at atoms in the
contact region.
For a macromolecular complex, returns a vector of shape
`(2*size,)`
"""
def __init__(self, cutoff: float = 4.5, radius: int = 2, size: int = 8):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
radius: int, optional (default 2)
Fingerprint radius.
size: int, optional (default 8)
Length of generated bit vector.
"""
self.cutoff = cutoff
self.radius = radius
self.size = size
def _featurize(self, datapoint, **kwargs):
"""
Compute featurization for a molecular complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
for (frag1, frag2) in itertools.combinations(fragments, 2):
# Get coordinates
distances = compute_pairwise_distances(frag1[0], frag2[0])
vector = [
vectorize(hash_ecfp, feature_dict=ecfp_dict, size=self.size) for
ecfp_dict in featurize_contacts_ecfp(frag1,
frag2,
distances,
cutoff=self.cutoff,
ecfp_degree=self.radius)
]
pairwise_features += vector
pairwise_features = np.concatenate(pairwise_features)
return pairwise_features
class ContactCircularVoxelizer(ComplexFeaturizer):
"""Computes ECFP fingerprints on a voxel grid.
Given a macromolecular complex made up of multiple
constituent molecules, first compute the contact points where
atoms from different molecules come close to one another. For
atoms within "contact regions," compute radial "ECFP"
fragments which are sub-molecules centered at atoms in the
contact region. Localize these ECFP fingeprints at the voxel
in which they originated.
Featurizes a macromolecular complex into a tensor of shape
`(voxels_per_edge, voxels_per_edge, voxels_per_edge, size)` where
`voxels_per_edge = int(box_width/voxel_width)`. If `flatten==True`,
then returns a flattened version of this tensor of length
`size*voxels_per_edge**3`
"""
def __init__(self,
cutoff: float = 4.5,
radius: int = 2,
size: int = 8,
box_width: float = 16.0,
voxel_width: float = 1.0,
flatten: bool = False):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
radius : int, optional (default 2)
Fingerprint radius.
size : int, optional (default 8)
Length of generated bit vector.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
flatten: bool, optional (default False)
If True, then returns a flat feature vector rather than voxel grid. This
feature vector is constructed by flattening the usual voxel grid.
"""
self.cutoff = cutoff
self.radius = radius
self.size = size
self.box_width = box_width
self.voxel_width = voxel_width
self.voxels_per_edge = int(self.box_width / self.voxel_width)
self.flatten = flatten
def _featurize(self, complex):
"""
Compute featurization for a molecular complex
Parameters
----------
complex: Tuple[str, str]
Filenames for molecule and protein.
"""
try:
fragments = load_complex(complex, add_hydrogens=False)
except MoleculeLoadException:
logger.warning(
"This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
for (frag1, frag2) in itertools.combinations(fragments, 2):
distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
pairwise_features.append(
sum([
voxelize(convert_atom_to_voxel,
xyz,
self.box_width,
self.voxel_width,
hash_function=hash_ecfp,
feature_dict=ecfp_dict,
nb_channel=self.size)
for xyz, ecfp_dict in zip(
xyzs,
featurize_contacts_ecfp(frag1,
frag2,
distances,
cutoff=self.cutoff,
ecfp_degree=self.radius))
]))
if self.flatten:
return np.concatenate(
[features.flatten() for features in pairwise_features])
else:
# Features are of shape (voxels_per_edge, voxels_per_edge,
# voxels_per_edge, num_feat) so we should concatenate on the last
# axis.
return np.concatenate(pairwise_features, axis=-1)
def compute_all_sybyl(mol, indices=None):
"""Computes Sybyl atom types for atoms in molecule."""
raise NotImplementedError("This function is not implemented yet")
def featurize_binding_pocket_sybyl(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
cutoff=7.0):
"""Computes Sybyl dicts for ligand and binding pocket of the protein.
Parameters
----------
protein_xyz: np.ndarray
Of shape (N_protein_atoms, 3)
protein: Rdkit Molecule
Contains more metadata.
ligand_xyz: np.ndarray
Of shape (N_ligand_atoms, 3)
ligand: Rdkit Molecule
Contains more metadata
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration.
"""
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
contacts = np.nonzero((pairwise_distances < cutoff))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
protein_sybyl_dict = compute_all_sybyl(protein, indices=protein_atoms)
ligand_sybyl_dict = compute_all_sybyl(ligand)
return (protein_sybyl_dict, ligand_sybyl_dict)
<file_sep>import unittest
from deepchem.feat import MATFeaturizer
import numpy as np
class TestMATFeaturizer(unittest.TestCase):
"""
Test MATFeaturizer.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
smiles = 'CC'
self.mol = Chem.MolFromSmiles(smiles)
def test_mat_featurizer(self):
"""
Test featurizer.py
"""
featurizer = MATFeaturizer()
out = featurizer.featurize(self.mol)
assert isinstance(out, np.ndarray)
assert (out[0].node_features.shape == (3, 36))
assert (out[0].adjacency_matrix.shape == (3, 3))
assert (out[0].distance_matrix.shape == (3, 3))
expected_node_features = np.array(
[[
1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.
],
[
0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.,
0., 0., 0., 0.
],
[
0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.,
0., 0., 0., 0.
]])
expected_adj_matrix = np.array([[0., 0., 0.], [0., 0., 1.],
[0., 1., 0.]])
expected_dist_matrix = np.array([[1.e+06, 1.e+06, 1.e+06],
[1.e+06, 0.e+00, 1.e+00],
[1.e+06, 1.e+00, 0.e+00]])
assert (np.array_equal(out[0].node_features, expected_node_features))
assert (np.array_equal(out[0].adjacency_matrix, expected_adj_matrix))
assert (np.array_equal(out[0].distance_matrix, expected_dist_matrix))
<file_sep>import unittest
import deepchem as dc
import numpy as np
class TestDummyFeaturizer(unittest.TestCase):
"""
Test for DummyFeaturizer.
"""
def test_featurize(self):
"""
Test the featurize method on an array of inputs.
"""
input_array = np.array(
[[
"N#C[S-].O=C(CBr)c1ccc(C(F)(F)F)cc1>CCO.[K+]",
"N#CSCC(=O)c1ccc(C(F)(F)F)cc1"
],
[
"C1COCCN1.FCC(Br)c1cccc(Br)n1>CCN(C(C)C)C(C)C.CN(C)C=O.O",
"FCC(c1cccc(Br)n1)N1CCOCC1"
]])
featurizer = dc.feat.DummyFeaturizer()
out = featurizer.featurize(input_array)
assert isinstance(out, np.ndarray)
assert (out.shape == input_array.shape)
<file_sep>import sys
import time
from setuptools import setup, find_packages
if '--release' in sys.argv:
IS_RELEASE = True
sys.argv.remove('--release')
else:
# Build a nightly package by default.
IS_RELEASE = False
# Environment-specific dependencies.
extras = {
'jax': ['jax', 'jaxlib', 'dm-haiku', 'optax'],
'torch': ['torch', 'torchvision', 'pytorch-lightning', 'dgl', 'dgllife'],
'tensorflow': ['tensorflow', 'tensorflow_probability', 'tensorflow_addons'],
'dqc': ['dqc', 'xitorch', 'torch', 'pylibxc2']
}
# get the version from deepchem/__init__.py
def _get_version():
with open('deepchem/__init__.py') as fp:
for line in fp:
if line.startswith('__version__'):
g = {}
exec(line, g)
base = g['__version__']
if IS_RELEASE:
return base
else:
# nightly version : .devYearMonthDayHourMinute
if base.endswith('.dev') is False:
# Force to add `.dev` if `--release` option isn't passed when building
base += '.dev'
return base + time.strftime("%Y%m%d%H%M%S")
raise ValueError('`__version__` not defined in `deepchem/__init__.py`')
setup(name='deepchem',
version=_get_version(),
url='https://github.com/deepchem/deepchem',
maintainer='DeepChem contributors',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
license='MIT',
description='Deep learning models for drug discovery, \
quantum chemistry, and the life sciences.',
keywords=[
'deepchem',
'chemistry',
'biology',
'materials-science',
'life-science',
'drug-discovery',
],
packages=find_packages(exclude=["*.tests"]),
project_urls={
'Documentation': 'https://deepchem.readthedocs.io/en/latest/',
'Source': 'https://github.com/deepchem/deepchem',
},
install_requires=[
'joblib',
'numpy>=1.21',
'pandas',
'scikit-learn',
'scipy>=1.10.1',
'rdkit',
],
extras_require=extras,
python_requires='>=3.7,<3.11')
<file_sep>"""This module adds utilities for coordinate boxes"""
from typing import List, Sequence, Tuple
import numpy as np
from scipy.spatial import ConvexHull
class CoordinateBox(object):
"""A coordinate box that represents a block in space.
Molecular complexes are typically represented with atoms as
coordinate points. Each complex is naturally associated with a
number of different box regions. For example, the bounding box is a
box that contains all atoms in the molecular complex. A binding
pocket box is a box that focuses in on a binding region of a protein
to a ligand. A interface box is the region in which two proteins
have a bulk interaction.
The `CoordinateBox` class is designed to represent such regions of
space. It consists of the coordinates of the box, and the collection
of atoms that live in this box alongside their coordinates.
"""
def __init__(self, x_range: Tuple[float, float],
y_range: Tuple[float, float], z_range: Tuple[float, float]):
"""Initialize this box.
Parameters
----------
x_range: Tuple[float, float]
A tuple of `(x_min, x_max)` with max and min x-coordinates.
y_range: Tuple[float, float]
A tuple of `(y_min, y_max)` with max and min y-coordinates.
z_range: Tuple[float, float]
A tuple of `(z_min, z_max)` with max and min z-coordinates.
Raises
------
`ValueError` if this interval is malformed
"""
if not isinstance(x_range, tuple) or not len(x_range) == 2:
raise ValueError("x_range must be a tuple of length 2")
else:
x_min, x_max = x_range
if not x_min <= x_max:
raise ValueError("x minimum must be <= x maximum")
if not isinstance(y_range, tuple) or not len(y_range) == 2:
raise ValueError("y_range must be a tuple of length 2")
else:
y_min, y_max = y_range
if not y_min <= y_max:
raise ValueError("y minimum must be <= y maximum")
if not isinstance(z_range, tuple) or not len(z_range) == 2:
raise ValueError("z_range must be a tuple of length 2")
else:
z_min, z_max = z_range
if not z_min <= z_max:
raise ValueError("z minimum must be <= z maximum")
self.x_range = x_range
self.y_range = y_range
self.z_range = z_range
def __repr__(self):
"""Create a string representation of this box"""
x_str = str(self.x_range)
y_str = str(self.y_range)
z_str = str(self.z_range)
return "Box[x_bounds=%s, y_bounds=%s, z_bounds=%s]" % (x_str, y_str,
z_str)
def __str__(self):
"""Create a string representation of this box."""
return self.__repr__()
def __contains__(self, point: Sequence[float]) -> bool:
"""Check whether a point is in this box.
Parameters
----------
point: Sequence[float]
3-tuple or list of length 3 or np.ndarray of shape `(3,)`.
The `(x, y, z)` coordinates of a point in space.
Returns
-------
bool
`True` if `other` is contained in this box.
"""
(x_min, x_max) = self.x_range
(y_min, y_max) = self.y_range
(z_min, z_max) = self.z_range
x_cont = (x_min <= point[0] and point[0] <= x_max)
y_cont = (y_min <= point[1] and point[1] <= y_max)
z_cont = (z_min <= point[2] and point[2] <= z_max)
return x_cont and y_cont and z_cont
# FIXME: Argument 1 of "__eq__" is incompatible with supertype "object"
def __eq__(self, other: "CoordinateBox") -> bool: # type: ignore
"""Compare two boxes to see if they're equal.
Parameters
----------
other: CoordinateBox
Compare this coordinate box to the other one.
Returns
-------
bool
That's `True` if all bounds match.
Raises
------
`ValueError` if attempting to compare to something that isn't a
`CoordinateBox`.
"""
if not isinstance(other, CoordinateBox):
raise ValueError("Can only compare to another box.")
return (self.x_range == other.x_range and
self.y_range == other.y_range and self.z_range == other.z_range)
def __hash__(self) -> int:
"""Implement hashing function for this box.
Uses the default `hash` on `self.x_range, self.y_range,
self.z_range`.
Returns
-------
int
Unique integer
"""
return hash((self.x_range, self.y_range, self.z_range))
def center(self) -> Tuple[float, float, float]:
"""Computes the center of this box.
Returns
-------
Tuple[float, float, float]
`(x, y, z)` the coordinates of the center of the box.
Examples
--------
>>> box = CoordinateBox((0, 1), (0, 1), (0, 1))
>>> box.center()
(0.5, 0.5, 0.5)
"""
x_min, x_max = self.x_range
y_min, y_max = self.y_range
z_min, z_max = self.z_range
return (x_min + (x_max - x_min) / 2, y_min + (y_max - y_min) / 2,
z_min + (z_max - z_min) / 2)
def volume(self) -> float:
"""Computes and returns the volume of this box.
Returns
-------
float
The volume of this box. Can be 0 if box is empty
Examples
--------
>>> box = CoordinateBox((0, 1), (0, 1), (0, 1))
>>> box.volume()
1
"""
x_min, x_max = self.x_range
y_min, y_max = self.y_range
z_min, z_max = self.z_range
return (x_max - x_min) * (y_max - y_min) * (z_max - z_min)
def contains(self, other: "CoordinateBox") -> bool:
"""Test whether this box contains another.
This method checks whether `other` is contained in this box.
Parameters
----------
other: CoordinateBox
The box to check is contained in this box.
Returns
-------
bool
`True` if `other` is contained in this box.
Raises
------
`ValueError` if `not isinstance(other, CoordinateBox)`.
"""
if not isinstance(other, CoordinateBox):
raise ValueError("other must be a CoordinateBox")
other_x_min, other_x_max = other.x_range
other_y_min, other_y_max = other.y_range
other_z_min, other_z_max = other.z_range
self_x_min, self_x_max = self.x_range
self_y_min, self_y_max = self.y_range
self_z_min, self_z_max = self.z_range
return (self_x_min <= other_x_min and other_x_max <= self_x_max and
self_y_min <= other_y_min and other_y_max <= self_y_max and
self_z_min <= other_z_min and other_z_max <= self_z_max)
def intersect_interval(interval1: Tuple[float, float],
interval2: Tuple[float, float]) -> Tuple[float, float]:
"""Computes the intersection of two intervals.
Parameters
----------
interval1: Tuple[float, float]
Should be `(x1_min, x1_max)`
interval2: Tuple[float, float]
Should be `(x2_min, x2_max)`
Returns
-------
x_intersect: Tuple[float, float]
Should be the intersection. If the intersection is empty returns
`(0, 0)` to represent the empty set. Otherwise is `(max(x1_min,
x2_min), min(x1_max, x2_max))`.
"""
x1_min, x1_max = interval1
x2_min, x2_max = interval2
if x1_max < x2_min:
# If interval1 < interval2 entirely
return (0, 0)
elif x2_max < x1_min:
# If interval2 < interval1 entirely
return (0, 0)
x_min = max(x1_min, x2_min)
x_max = min(x1_max, x2_max)
return (x_min, x_max)
def intersection(box1: CoordinateBox, box2: CoordinateBox) -> CoordinateBox:
"""Computes the intersection box of provided boxes.
Parameters
----------
box1: CoordinateBox
First `CoordinateBox`
box2: CoordinateBox
Another `CoordinateBox` to intersect first one with.
Returns
-------
CoordinateBox
A `CoordinateBox` containing the intersection. If the intersection is empty,
returns the box with 0 bounds.
"""
x_intersection = intersect_interval(box1.x_range, box2.x_range)
y_intersection = intersect_interval(box1.y_range, box2.y_range)
z_intersection = intersect_interval(box1.z_range, box2.z_range)
return CoordinateBox(x_intersection, y_intersection, z_intersection)
def union(box1: CoordinateBox, box2: CoordinateBox) -> CoordinateBox:
"""Merges provided boxes to find the smallest union box.
This method merges the two provided boxes.
Parameters
----------
box1: CoordinateBox
First box to merge in
box2: CoordinateBox
Second box to merge into this box
Returns
-------
CoordinateBox
Smallest `CoordinateBox` that contains both `box1` and `box2`
"""
x_min = min(box1.x_range[0], box2.x_range[0])
y_min = min(box1.y_range[0], box2.y_range[0])
z_min = min(box1.z_range[0], box2.z_range[0])
x_max = max(box1.x_range[1], box2.x_range[1])
y_max = max(box1.y_range[1], box2.y_range[1])
z_max = max(box1.z_range[1], box2.z_range[1])
return CoordinateBox((x_min, x_max), (y_min, y_max), (z_min, z_max))
def merge_overlapping_boxes(boxes: List[CoordinateBox],
threshold: float = 0.8) -> List[CoordinateBox]:
"""Merge boxes which have an overlap greater than threshold.
Parameters
----------
boxes: list[CoordinateBox]
A list of `CoordinateBox` objects.
threshold: float, default 0.8
The volume fraction of the boxes that must overlap for them to be
merged together.
Returns
-------
List[CoordinateBox]
List[CoordinateBox] of merged boxes. This list will have length less
than or equal to the length of `boxes`.
"""
outputs: List[CoordinateBox] = []
for box in boxes:
for other in boxes:
if box == other:
continue
intersect_box = intersection(box, other)
if (intersect_box.volume() >= threshold * box.volume() or
intersect_box.volume() >= threshold * other.volume()):
box = union(box, other)
unique_box = True
for output in outputs:
if output.contains(box):
unique_box = False
if unique_box:
outputs.append(box)
return outputs
def get_face_boxes(coords: np.ndarray, pad: float = 5.0) -> List[CoordinateBox]:
"""For each face of the convex hull, compute a coordinate box around it.
The convex hull of a macromolecule will have a series of triangular
faces. For each such triangular face, we construct a bounding box
around this triangle. Think of this box as attempting to capture
some binding interaction region whose exterior is controlled by the
box. Note that this box will likely be a crude approximation, but
the advantage of this technique is that it only uses simple geometry
to provide some basic biological insight into the molecule at hand.
The `pad` parameter is used to control the amount of padding around
the face to be used for the coordinate box.
Parameters
----------
coords: np.ndarray
A numpy array of shape `(N, 3)`. The coordinates of a molecule.
pad: float, optional (default 5.0)
The number of angstroms to pad.
Returns
-------
boxes: List[CoordinateBox]
List of `CoordinateBox`
Examples
--------
>>> coords = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> boxes = get_face_boxes(coords, pad=5)
"""
hull = ConvexHull(coords)
boxes = []
# Each triangle in the simplices is a set of 3 atoms from
# coordinates which forms the vertices of an exterior triangle on
# the convex hull of the macromolecule.
for triangle in hull.simplices:
# Points is the set of atom coordinates that make up this
# triangular face on the convex hull
points = np.array(
[coords[triangle[0]], coords[triangle[1]], coords[triangle[2]]])
# Let's extract x/y/z coords for this face
x_coords = points[:, 0]
y_coords = points[:, 1]
z_coords = points[:, 2]
# Let's compute min/max points
x_min, x_max = np.amin(x_coords), np.amax(x_coords)
x_min, x_max = int(np.floor(x_min)) - pad, int(np.ceil(x_max)) + pad
x_bounds = (x_min, x_max)
y_min, y_max = np.amin(y_coords), np.amax(y_coords)
y_min, y_max = int(np.floor(y_min)) - pad, int(np.ceil(y_max)) + pad
y_bounds = (y_min, y_max)
z_min, z_max = np.amin(z_coords), np.amax(z_coords)
z_min, z_max = int(np.floor(z_min)) - pad, int(np.ceil(z_max)) + pad
z_bounds = (z_min, z_max)
box = CoordinateBox(x_bounds, y_bounds, z_bounds)
boxes.append(box)
return boxes
<file_sep>import pytest
try:
import torch
except ModuleNotFoundError:
pass
@pytest.mark.torch
def testGroverEmbedding(grover_graph_attributes):
from deepchem.models.torch_models.grover_layers import GroverEmbedding
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, fg_labels, additional_features = grover_graph_attributes
hidden_size = 8
n_atoms, n_bonds = f_atoms.shape[0], f_bonds.shape[0]
node_fdim, edge_fdim = f_atoms.shape[1], f_bonds.shape[1]
layer = GroverEmbedding(hidden_size=hidden_size,
edge_fdim=edge_fdim,
node_fdim=node_fdim)
output = layer([f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a])
assert output['atom_from_atom'].shape == (n_atoms, hidden_size)
assert output['bond_from_atom'].shape == (n_bonds, hidden_size)
assert output['atom_from_bond'].shape == (n_atoms, hidden_size)
assert output['bond_from_bond'].shape == (n_bonds, hidden_size)
@pytest.mark.torch
def testGroverBondVocabPredictor():
from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor
num_bonds = 20
in_features, vocab_size = 16, 10
layer = GroverBondVocabPredictor(vocab_size, in_features)
embedding = torch.randn(num_bonds * 2, in_features)
result = layer(embedding)
assert result.shape == (num_bonds, vocab_size)
@pytest.mark.torch
def testGroverAtomVocabPredictor():
from deepchem.models.torch_models.grover_layers import GroverAtomVocabPredictor
num_atoms, in_features, vocab_size = 30, 16, 10
layer = GroverAtomVocabPredictor(vocab_size, in_features)
embedding = torch.randn(num_atoms, in_features)
result = layer(embedding)
assert result.shape == (num_atoms, vocab_size)
@pytest.mark.torch
def testGroverFunctionalGroupPredictor():
from deepchem.models.torch_models.grover_layers import GroverFunctionalGroupPredictor
in_features, functional_group_size = 8, 20
num_atoms, num_bonds = 10, 20
predictor = GroverFunctionalGroupPredictor(functional_group_size=20,
in_features=8)
# In a batched graph, atoms and bonds belonging to different graphs are differentiated
# via scopes. In the below scenario, we assume a batched mol graph of three molecules
# with 10 atoms, 20 bonds. On the 10 atoms, we consider the first 3 belonging to mol1,
# next 3 belonging to mol2 and remaining 4 belonging to mol4.
# Hence, the atom scope is [(0, 3), (3, 3), (6, 4)]. Similarly, for bonds, we have first 5 bonds belonging to mol1, next 4 to mol2 and remaining 11 to bond3.
atom_scope, bond_scope = [(0, 3), (3, 3), (6, 4)], [(0, 5), (5, 4), (9, 11)]
embeddings = {}
embeddings['bond_from_atom'] = torch.randn(num_bonds, in_features)
embeddings['bond_from_bond'] = torch.randn(num_bonds, in_features)
embeddings['atom_from_atom'] = torch.randn(num_atoms, in_features)
embeddings['atom_from_bond'] = torch.randn(num_atoms, in_features)
result = predictor(embeddings, atom_scope, bond_scope)
assert result['bond_from_bond'].shape == (len(bond_scope),
functional_group_size)
assert result['bond_from_atom'].shape == (len(bond_scope),
functional_group_size)
assert result['atom_from_atom'].shape == (len(atom_scope),
functional_group_size)
assert result['atom_from_bond'].shape == (len(atom_scope),
functional_group_size)
@pytest.mark.torch
@pytest.mark.parametrize('dynamic_depth', ['none', 'uniform'])
@pytest.mark.parametrize('atom_messages', [False, True])
def testGroverMPNEncoder(grover_graph_attributes, dynamic_depth, atom_messages):
from deepchem.models.torch_models.grover_layers import GroverMPNEncoder
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, _, _, _, _ = grover_graph_attributes
# TODO Write tests for undirected = True case, currently fails. for this case, we have
# to generate inputs (a2b, b2a, b2revb) for undirected graph.
hidden_size = 32
depth = 5
undirected = False
attach_feats = True
if not atom_messages:
init_message_dim = f_bonds.shape[1]
attached_feat_fdim = f_atoms.shape[1]
layer = GroverMPNEncoder(atom_messages=atom_messages,
init_message_dim=init_message_dim,
attached_feat_fdim=attached_feat_fdim,
hidden_size=hidden_size,
depth=depth,
dynamic_depth=dynamic_depth,
undirected=undirected,
attach_feats=attach_feats)
init_messages = f_bonds
init_attached_features = f_atoms
a2nei = a2b
a2attached = a2a
out = layer(init_messages, init_attached_features, a2nei, a2attached,
b2a, b2revb)
assert out.shape == (f_bonds.shape[0], hidden_size)
else:
init_message_dim = f_atoms.shape[1]
attached_feat_fdim = f_bonds.shape[1]
layer = GroverMPNEncoder(atom_messages=atom_messages,
init_message_dim=init_message_dim,
attached_feat_fdim=attached_feat_fdim,
hidden_size=hidden_size,
depth=depth,
dynamic_depth=dynamic_depth,
undirected=undirected,
attach_feats=attach_feats)
init_attached_features = f_bonds
init_messages = f_atoms
a2nei = a2a
a2attached = a2b
out = layer(init_messages, init_attached_features, a2nei, a2attached,
b2a, b2revb)
assert out.shape == (f_atoms.shape[0], hidden_size)
@pytest.mark.torch
def testGroverAttentionHead(grover_graph_attributes):
from deepchem.models.torch_models.grover_layers import GroverAttentionHead
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, _, _, _, _ = grover_graph_attributes
hidden_size = 165
atom_messages = False
layer = GroverAttentionHead(hidden_size,
bias=True,
depth=4,
undirected=False,
atom_messages=atom_messages)
query, key, value = layer(f_atoms, f_bonds, a2b, a2a, b2a, b2revb)
assert query.size() == (f_bonds.shape[0], hidden_size)
assert key.size() == (f_bonds.shape[0], hidden_size)
assert value.size() == (f_bonds.shape[0], hidden_size)
@pytest.mark.torch
def testGroverMTBlock(grover_graph_attributes):
from deepchem.models.torch_models.grover_layers import GroverMTBlock
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, _, _ = grover_graph_attributes
hidden_size = 16
layer = GroverMTBlock(atom_messages=True,
input_dim=f_atoms.shape[1],
num_heads=4,
depth=1,
hidden_size=hidden_size)
new_batch = layer(
[f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a])
new_f_atoms, new_f_bonds, new_a2b, new_b2a, new_b2revb, new_a_scope, new_b_scope, new_a2a = new_batch
# The shapes should match the earlier shapes because message passing only updates node features.
assert new_f_atoms.shape == (f_atoms.shape[0], hidden_size)
assert new_f_bonds.shape == f_bonds.shape
# The following variables are utility variables used during message passing to compute neighbors. Here we are asserting that MTBlock layer is not modifying these variables.
assert (new_a2b == a2b).all()
assert (new_b2a == b2a).all()
assert (new_b2revb == b2revb).all()
assert (new_a_scope == a_scope).all()
assert (new_b_scope == b_scope).all()
assert (new_a2a == a2a).all()
@pytest.mark.torch
def testGroverTransEncoder(grover_graph_attributes):
from deepchem.models.torch_models.grover_layers import GroverTransEncoder
f_atoms, f_bonds, a2b, b2a, b2revb, a2a, a_scope, b_scope, _, _ = grover_graph_attributes
hidden_size = 8
n_atoms, n_bonds = f_atoms.shape[0], f_bonds.shape[0]
node_fdim, edge_fdim = f_atoms.shape[1], f_bonds.shape[1]
layer = GroverTransEncoder(hidden_size=hidden_size,
edge_fdim=edge_fdim,
node_fdim=node_fdim)
output = layer([f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a])
assert output[0][0].shape == (n_atoms, hidden_size)
assert output[0][1].shape == (n_bonds, hidden_size)
assert output[1][0].shape == (n_atoms, hidden_size)
assert output[1][1].shape == (n_bonds, hidden_size)
<file_sep>import pytest
try:
import torch
from deepchem.models.dft.nnxc import NNLDA, HybridXC
import torch.nn as nn
from dqc.utils.datastruct import ValGrad
has_dqc = True
except ModuleNotFoundError:
has_dqc = False
@pytest.mark.dqc
def dummymodel():
n = 2
class DummyModel(torch.nn.Module):
def __init__(self, n):
super(DummyModel, self).__init__()
self.linear = nn.Linear(n, 1)
def forward(self, x):
return self.linear(x)
return DummyModel(n)
@pytest.mark.dqc
def test_nnlda():
torch.manual_seed(42)
# https://github.com/diffqc/dqc/blob/742eb2576418464609f942def4fb7c3bbdc0cd82/dqc/test/test_xc.py#L15
n = 2
model = dummymodel()
k = NNLDA(model)
densinfo = ValGrad(
value=torch.rand((n,), dtype=torch.float32).requires_grad_())
output = k.get_edensityxc(densinfo).detach()
expected_output = torch.tensor([0.3386, 0.0177])
torch.testing.assert_close(output, expected_output, atol=1e-4, rtol=0)
@pytest.mark.dqc
def test_hybridxc():
torch.manual_seed(42)
n = 2
nnmodel = dummymodel()
k = HybridXC("lda_x", nnmodel, aweight0=0.0)
densinfo = ValGrad(
value=torch.rand((n,), dtype=torch.float32).requires_grad_())
output = k.get_edensityxc(densinfo).detach()
expected_output = torch.tensor([-0.6988, -0.2108], dtype=torch.float64)
torch.testing.assert_close(output, expected_output, atol=1e-4, rtol=0)
<file_sep>import os
import unittest
import numpy as np
from deepchem.utils import rdkit_utils
from deepchem.utils.fragment_utils import get_contact_atom_indices
from deepchem.utils.fragment_utils import merge_molecular_fragments
from deepchem.utils.fragment_utils import get_partial_charge
from deepchem.utils.fragment_utils import strip_hydrogens
from deepchem.utils.fragment_utils import MolecularFragment
from deepchem.utils.fragment_utils import AtomShim
class TestFragmentUtil(unittest.TestCase):
def setUp(self):
# TODO test more formats for ligand
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(
current_dir, '../../feat/tests/data/3ws9_protein_fixer_rdkit.pdb')
self.ligand_file = os.path.join(
current_dir, '../../feat/tests/data/3ws9_ligand.sdf')
def test_get_contact_atom_indices(self):
complexes = rdkit_utils.load_complex(
[self.protein_file, self.ligand_file])
contact_indices = get_contact_atom_indices(complexes)
assert len(contact_indices) == 2
def test_create_molecular_fragment(self):
mol_xyz, mol_rdk = rdkit_utils.load_molecule(self.ligand_file)
fragment = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
assert len(mol_rdk.GetAtoms()) == len(fragment.GetAtoms())
assert (fragment.GetCoords() == mol_xyz).all()
def test_strip_hydrogens(self):
mol_xyz, mol_rdk = rdkit_utils.load_molecule(self.ligand_file)
_ = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
# Test on RDKit
_ = strip_hydrogens(mol_xyz, mol_rdk)
def test_merge_molecular_fragments(self):
mol_xyz, mol_rdk = rdkit_utils.load_molecule(self.ligand_file)
fragment1 = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
fragment2 = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
joint = merge_molecular_fragments([fragment1, fragment2])
assert len(mol_rdk.GetAtoms()) * 2 == len(joint.GetAtoms())
def test_get_partial_charge(self):
from rdkit import Chem
mol = Chem.MolFromSmiles("CC")
atom = mol.GetAtoms()[0]
partial_charge = get_partial_charge(atom)
assert partial_charge == 0
def test_atom_shim(self):
atomic_num = 5
partial_charge = 1
atom_coords = np.array([0., 1., 2.])
shim = AtomShim(atomic_num, partial_charge, atom_coords)
assert shim.GetAtomicNum() == atomic_num
assert shim.GetPartialCharge() == partial_charge
assert (shim.GetCoords() == atom_coords).all()
<file_sep>"""
Tests for Molecular Graph data structures.
"""
import unittest
import numpy as np
from deepchem.feat.mol_graphs import ConvMol
class TestMolGraphs(unittest.TestCase):
"""
Test mol graphs.
"""
def test_construct_conv_mol(self):
"""Tests that ConvMols can be constructed without crash."""
# Artificial feature array.
atom_features = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
adj_list = [[1], [0, 2], [1]]
_ = ConvMol(atom_features, adj_list)
def test_conv_mol_deg_slice(self):
"""Tests that deg_slice works properly."""
atom_features = np.array([[20, 21, 22, 23], [24, 25, 26, 27],
[28, 29, 30, 31], [32, 33, 34, 35]])
adj_list = [[1, 2], [0, 3], [0, 3], [1, 2]]
mol = ConvMol(atom_features, adj_list)
assert np.array_equal(
mol.get_deg_slice(),
# 0 atoms of degree 0
# 0 atoms of degree 1
# 4 atoms of degree 2
# 0 atoms of degree 3
# 0 atoms of degree 4
# 0 atoms of degree 5
# 0 atoms of degree 6
# 0 atoms of degree 7
# 0 atoms of degree 8
# 0 atoms of degree 9
# 0 atoms of degree 10
np.array([[0, 0], [0, 0], [0, 4], [0, 0], [0, 0], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0], [0, 0]]))
def test_get_atom_features(self):
"""Test that the atom features are computed properly."""
atom_features = np.array([[40, 41, 42, 43], [44, 45, 46, 47],
[48, 49, 50, 51], [52, 53, 54, 55],
[56, 57, 58, 59]])
canon_adj_list = [[1, 2], [0, 3], [0, 3], [1, 2, 4], [3]]
mol = ConvMol(atom_features, canon_adj_list)
# atom 4 has 0 neighbors
# atom 0 has 2 neighbors
# atom 1 has 2 neighbors
# atom 2 has 2 neighbors
# atom 3 has 3 neighbors.
# Verify that atom features have been sorted by atom degree.
assert np.array_equal(
mol.get_atom_features(),
np.array([[56, 57, 58, 59], [40, 41, 42, 43], [44, 45, 46, 47],
[48, 49, 50, 51], [52, 53, 54, 55]]))
def test_get_adjacency_list(self):
"""Tests that adj-list is canonicalized properly."""
atom_features = np.array([[40, 41, 42, 43], [44, 45, 46, 47],
[48, 49, 50, 51], [52, 53, 54, 55],
[56, 57, 58, 59]])
canon_adj_list = [[1, 2], [0, 3], [0, 3], [1, 2, 4], [3]]
mol = ConvMol(atom_features, canon_adj_list)
# Sorting is done by atom degree as before. So the ordering goes
# 4, 0, 1, 2, 3 now in terms of the original ordering. The mapping
# from new position to old position is
# {(4, 0), (0, 1), (1, 2), (2, 3), (3, 4)}. Check that adjacency
# list respects this reordering and returns correct adjacency list.
assert (mol.get_adjacency_list() == [[4], [2, 3], [1, 4], [1, 4],
[2, 3, 0]])
def test_agglomerate_molecules(self):
"""Test AggrMol.agglomerate_mols."""
molecules = []
# First example molecule
# Artificial feature array.
atom_features = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
adj_list = [[1], [0, 2], [1]]
molecules.append(ConvMol(atom_features, adj_list))
# Second example molecule
atom_features = np.array([[20, 21, 22, 23], [24, 25, 26, 27],
[28, 29, 30, 31], [32, 33, 34, 35]])
adj_list = [[1, 2], [0, 3], [0, 3], [1, 2]]
molecules.append(ConvMol(atom_features, adj_list))
# Third example molecule
atom_features = np.array([[40, 41, 42, 43], [44, 45, 46, 47],
[48, 49, 50, 51], [52, 53, 54, 55],
[56, 57, 58, 59]])
adj_list = [[1, 2], [0, 3], [0, 3], [1, 2, 4], [3]]
molecules.append(ConvMol(atom_features, adj_list))
# Test agglomerate molecule method
concat_mol = ConvMol.agglomerate_mols(molecules)
assert concat_mol.get_num_atoms() == 12
assert concat_mol.get_num_molecules() == 3
atom_features = concat_mol.get_atom_features()
assert np.array_equal(atom_features[0, :], [1, 2, 3, 4])
assert np.array_equal(atom_features[2, :], [56, 57, 58, 59])
assert np.array_equal(atom_features[11, :], [52, 53, 54, 55])
assert np.array_equal(atom_features[4, :], [20, 21, 22, 23])
deg_adj_lists = concat_mol.get_deg_adjacency_lists()
# No atoms of degree 0
assert np.array_equal(deg_adj_lists[0], np.zeros([0, 0]))
# 3 atoms of degree 1
assert np.array_equal(deg_adj_lists[1], [[3], [3], [11]])
# 8 atoms of degree 2
assert np.array_equal(
deg_adj_lists[2],
[[0, 1], [5, 6], [4, 7], [4, 7], [5, 6], [9, 10], [8, 11], [8, 11]])
# 1 atom of degree 3
assert np.array_equal(deg_adj_lists[3], [[9, 10, 2]])
# 0 atoms of degree 4
assert np.array_equal(deg_adj_lists[4], np.zeros([0, 4]))
# 0 atoms of degree 5
assert np.array_equal(deg_adj_lists[5], np.zeros([0, 5]))
def test_null_conv_mol(self):
"""Running Null AggrMol Test. Only works when max_deg=6 and min_deg=0"""
num_feat = 4
null_mol = ConvMol.get_null_mol(num_feat)
deg_adj_lists = null_mol.get_deg_adjacency_lists()
# Check that atoms are only connected to themselves.
assert np.array_equal(deg_adj_lists[10],
[[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]])
assert np.array_equal(deg_adj_lists[1], [[1]])
# Check that there's one atom of each degree.
assert np.array_equal(null_mol.get_deg_slice(),
[[0, 1], [1, 1], [2, 1], [3, 1], [4, 1], [5, 1],
[6, 1], [7, 1], [8, 1], [9, 1], [10, 1]])
<file_sep>Multiclass Examples
-------------------
This directory contains examples of building multiclass models in DeepChem.
<file_sep>import hydra
from omegaconf import DictConfig
import deepchem as dc
from deepchem.models import GCNModel
from deepchem.models.lightning.dc_lightning_module import DCLightningModule
from deepchem.models.lightning.dc_lightning_dataset_module import DCLightningDatasetModule, collate_dataset_wrapper
from deepchem.feat import MolGraphConvFeaturizer
import pytorch_lightning as pl
@hydra.main(version_base=None, config_path="configs", config_name="zinc15")
def main(cfg: DictConfig) -> None:
featurizer = MolGraphConvFeaturizer()
tasks, datasets, _ = dc.molnet.load_zinc15(featurizer=featurizer)
_, valid_dataset, _ = datasets
n_tasks = len(tasks)
model = GCNModel(graph_conv_layers=cfg.graph_conv_layers,
mode=cfg.mode,
n_tasks=n_tasks,
number_atom_features=cfg.number_atom_features,
learning_rate=cfg.learning_rate)
gcnmodule = DCLightningModule(model)
smiles_datasetmodule = DCLightningDatasetModule(valid_dataset, cfg.batch_size,
collate_dataset_wrapper)
trainer = pl.Trainer(
max_epochs=cfg.max_epochs,
devices=cfg.num_device,
accelerator=cfg.device,
)
trainer.fit(gcnmodule, smiles_datasetmodule)
if __name__ == "__main__":
main()
<file_sep>"""
Tox21 dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
TOX21_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/tox21.csv.gz"
TOX21_TASKS = [
'NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'
]
class _Tox21Loader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "tox21.csv.gz")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=TOX21_URL,
dest_dir=self.data_dir)
loader = dc.data.CSVLoader(tasks=self.tasks,
feature_field="smiles",
featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_tox21(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
tasks: List[str] = TOX21_TASKS,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load Tox21 dataset
The "Toxicology in the 21st Century" (Tox21) initiative created a public
database measuring toxicity of compounds, which has been used in the 2014
Tox21 Data Challenge. This dataset contains qualitative toxicity measurements
for 8k compounds on 12 different targets, including nuclear receptors and
stress response pathways.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "smiles" - SMILES representation of the molecular structure
- "NR-XXX" - Nuclear receptor signaling bioassays results
- "SR-XXX" - Stress response bioassays results
please refer to https://tripod.nih.gov/tox21/challenge/data.jsp for details.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
tasks: List[str], (optional)
Specify the set of tasks to load. If no task is specified, then it loads
the default set of tasks which are NR-AR, NR-AR-LBD, NR-AhR, NR-Aromatase, NR-ER,
NR-ER-LBD, NR-PPAR-gamma, SR-ARE, SR-ATAD5, SR-HSE, SR-MMP, SR-p53.
References
----------
.. [1] Tox21 Challenge. https://tripod.nih.gov/tox21/challenge/
"""
loader = _Tox21Loader(featurizer, splitter, transformers, tasks, data_dir,
save_dir, **kwargs)
return loader.load_dataset('tox21', reload)
<file_sep>import unittest
import os
import tempfile
from deepchem.utils import rdkit_utils
from deepchem.utils import pdbqt_utils
class TestPDBQTUtils(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(current_dir,
"../../dock/tests/1jld_protein.pdb")
self.ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
def test_pdbqt_to_pdb(self):
"""Test that a PDBQT molecule can be converted back in to PDB."""
xyz, mol = rdkit_utils.load_molecule(self.protein_file,
calc_charges=False,
add_hydrogens=False)
with tempfile.TemporaryDirectory() as tmp:
out_pdb = os.path.join(tmp, "mol.pdb")
out_pdbqt = os.path.join(tmp, "mol.pdbqt")
rdkit_utils.write_molecule(mol, out_pdb, is_protein=True)
rdkit_utils.write_molecule(mol, out_pdbqt, is_protein=True)
pdb_block = pdbqt_utils.pdbqt_to_pdb(out_pdbqt)
from rdkit import Chem
pdb_mol = Chem.MolFromPDBBlock(pdb_block,
sanitize=False,
removeHs=False)
xyz, pdbqt_mol = rdkit_utils.load_molecule(out_pdbqt,
add_hydrogens=False,
calc_charges=False)
assert pdb_mol.GetNumAtoms() == pdbqt_mol.GetNumAtoms()
for atom_idx in range(pdb_mol.GetNumAtoms()):
atom1 = pdb_mol.GetAtoms()[atom_idx]
atom2 = pdbqt_mol.GetAtoms()[atom_idx]
assert atom1.GetAtomicNum() == atom2.GetAtomicNum()
def test_convert_mol_to_pdbqt(self):
"""Test that a ligand molecule can be coverted to PDBQT."""
from rdkit import Chem
xyz, mol = rdkit_utils.load_molecule(self.ligand_file,
calc_charges=False,
add_hydrogens=False)
with tempfile.TemporaryDirectory() as tmp:
outfile = os.path.join(tmp, "mol.pdbqt")
writer = Chem.PDBWriter(outfile)
writer.write(mol)
writer.close()
pdbqt_utils.convert_mol_to_pdbqt(mol, outfile)
pdbqt_xyz, pdbqt_mol = rdkit_utils.load_molecule(
outfile, add_hydrogens=False, calc_charges=False)
assert pdbqt_mol.GetNumAtoms() == pdbqt_mol.GetNumAtoms()
for atom_idx in range(pdbqt_mol.GetNumAtoms()):
atom1 = pdbqt_mol.GetAtoms()[atom_idx]
atom2 = pdbqt_mol.GetAtoms()[atom_idx]
assert atom1.GetAtomicNum() == atom2.GetAtomicNum()
def test_convert_protein_to_pdbqt(self):
"""Test a protein in a PDB can be converted to PDBQT."""
from rdkit import Chem
xyz, mol = rdkit_utils.load_molecule(self.protein_file,
calc_charges=False,
add_hydrogens=False)
with tempfile.TemporaryDirectory() as tmp:
outfile = os.path.join(tmp, "mol.pdbqt")
writer = Chem.PDBWriter(outfile)
writer.write(mol)
writer.close()
pdbqt_utils.convert_protein_to_pdbqt(mol, outfile)
pdbqt_xyz, pdbqt_mol = rdkit_utils.load_molecule(
outfile, add_hydrogens=False, calc_charges=False)
assert pdbqt_mol.GetNumAtoms() == pdbqt_mol.GetNumAtoms()
for atom_idx in range(pdbqt_mol.GetNumAtoms()):
atom1 = pdbqt_mol.GetAtoms()[atom_idx]
atom2 = pdbqt_mol.GetAtoms()[atom_idx]
assert atom1.GetAtomicNum() == atom2.GetAtomicNum()
<file_sep>"""
Tests for Atomic Convolutions.
"""
import os
import pytest
from flaky import flaky
import numpy as np
from deepchem.data import NumpyDataset
from deepchem.feat import AtomicConvFeaturizer
try:
import tensorflow as tf # noqa: F401
from deepchem.models import atomic_conv
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def test_atomic_conv_initialize():
"""Quick test of AtomicConv."""
acm = atomic_conv.AtomicConvModel(n_tasks=1,
batch_size=1,
layer_sizes=[
1,
],
frag1_num_atoms=5,
frag2_num_atoms=5,
complex_num_atoms=10)
assert acm.complex_num_atoms == 10
assert len(acm.atom_types) == 15
@flaky
@pytest.mark.slow
@pytest.mark.tensorflow
def test_atomic_conv():
"""A simple test that initializes and fits an AtomicConvModel."""
# For simplicity, let's assume both molecules have same number of
# atoms.
N_atoms = 5
batch_size = 1
atomic_convnet = atomic_conv.AtomicConvModel(n_tasks=1,
batch_size=batch_size,
layer_sizes=[10],
frag1_num_atoms=5,
frag2_num_atoms=5,
complex_num_atoms=10,
dropouts=0.0,
learning_rate=0.003)
# Creates a set of dummy features that contain the coordinate and
# neighbor-list features required by the AtomicConvModel.
features = []
frag1_coords = np.random.rand(N_atoms, 3)
frag1_nbr_list = {0: [], 1: [], 2: [], 3: [], 4: []}
frag1_z = np.random.randint(10, size=(N_atoms))
frag2_coords = np.random.rand(N_atoms, 3)
frag2_nbr_list = {0: [], 1: [], 2: [], 3: [], 4: []}
frag2_z = np.random.randint(10, size=(N_atoms))
system_coords = np.random.rand(2 * N_atoms, 3)
system_nbr_list = {
0: [],
1: [],
2: [],
3: [],
4: [],
5: [],
6: [],
7: [],
8: [],
9: []
}
system_z = np.random.randint(10, size=(2 * N_atoms))
features.append(
(frag1_coords, frag1_nbr_list, frag1_z, frag2_coords, frag2_nbr_list,
frag2_z, system_coords, system_nbr_list, system_z))
features = np.asarray(features)
labels = np.random.rand(batch_size)
train = NumpyDataset(features, labels)
atomic_convnet.fit(train, nb_epoch=150)
assert np.allclose(labels, atomic_convnet.predict(train), atol=0.01)
@pytest.mark.slow
@pytest.mark.tensorflow
def test_atomic_conv_variable():
"""A simple test that initializes and fits an AtomicConvModel on variable input size."""
frag1_num_atoms = 1000
frag2_num_atoms = 1200
complex_num_atoms = frag1_num_atoms + frag2_num_atoms
batch_size = 1
atomic_convnet = atomic_conv.AtomicConvModel(
n_tasks=1,
batch_size=batch_size,
layer_sizes=[
10,
],
frag1_num_atoms=frag1_num_atoms,
frag2_num_atoms=frag2_num_atoms,
complex_num_atoms=complex_num_atoms)
# Creates a set of dummy features that contain the coordinate and
# neighbor-list features required by the AtomicConvModel.
features = []
frag1_coords = np.random.rand(frag1_num_atoms, 3)
frag1_nbr_list = {i: [] for i in range(frag1_num_atoms)}
frag1_z = np.random.randint(10, size=(frag1_num_atoms))
frag2_coords = np.random.rand(frag2_num_atoms, 3)
frag2_nbr_list = {i: [] for i in range(frag2_num_atoms)}
frag2_z = np.random.randint(10, size=(frag2_num_atoms))
system_coords = np.random.rand(complex_num_atoms, 3)
system_nbr_list = {i: [] for i in range(complex_num_atoms)}
system_z = np.random.randint(10, size=(complex_num_atoms))
features.append(
(frag1_coords, frag1_nbr_list, frag1_z, frag2_coords, frag2_nbr_list,
frag2_z, system_coords, system_nbr_list, system_z))
features = np.asarray(features)
labels = np.zeros(batch_size)
train = NumpyDataset(features, labels)
atomic_convnet.fit(train, nb_epoch=1)
preds = atomic_convnet.predict(train)
assert preds.shape == (1, 1, 1)
assert np.count_nonzero(preds) > 0
@pytest.mark.slow
@pytest.mark.tensorflow
def test_atomic_conv_with_feat():
"""A simple test for running an atomic convolution on featurized data."""
dir_path = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(dir_path,
"../../feat/tests/data/3zso_ligand_hyd.pdb")
protein_file = os.path.join(dir_path,
"../../feat/tests/data/3zso_protein_noH.pdb")
# Pulled from PDB files. For larger datasets with more PDBs, would use
# max num atoms instead of exact.
frag1_num_atoms = 44 # for ligand atoms
frag2_num_atoms = 2334 # for protein atoms
complex_num_atoms = 2378 # in total
max_num_neighbors = 4
# Cutoff in angstroms
neighbor_cutoff = 4
complex_featurizer = AtomicConvFeaturizer(frag1_num_atoms, frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
neighbor_cutoff)
# arbitrary label
labels = np.array([0])
features = complex_featurizer.featurize([(ligand_file, protein_file)])
dataset = NumpyDataset(features, labels)
batch_size = 1
print("Constructing Atomic Conv model")
atomic_convnet = atomic_conv.AtomicConvModel(
n_tasks=1,
batch_size=batch_size,
layer_sizes=[10],
frag1_num_atoms=frag1_num_atoms,
frag2_num_atoms=frag2_num_atoms,
complex_num_atoms=complex_num_atoms)
print("About to call fit")
# Run a fitting operation
atomic_convnet.fit(dataset)
preds = atomic_convnet.predict(dataset)
assert preds.shape == (1, 1, 1)
assert np.count_nonzero(preds) > 0
<file_sep>"""Optimizers and related classes for use with TensorGraph."""
import math
from typing import Dict, Union, Optional
class Optimizer(object):
"""An algorithm for optimizing a model.
This is an abstract class. Subclasses represent specific optimization algorithms.
"""
def __init__(self, learning_rate: "Union[float, LearningRateSchedule]"):
"""This constructor should only be called by subclasses.
Parameters
----------
learning_rate: float or LearningRateSchedule
the learning rate to use for optimization
"""
self.learning_rate = learning_rate
def _create_tf_optimizer(self, global_step):
"""Construct a TensorFlow optimizer.
Parameters
----------
global_step: tensor
a tensor containing the global step index during optimization, used for learning rate decay
Returns
-------
a new TensorFlow optimizer implementing the algorithm
"""
raise NotImplementedError("Subclasses must implement this")
def _create_pytorch_optimizer(self, params):
"""Construct a PyTorch optimizer.
Parameters
----------
params: Iterable
the model parameters to optimize
Returns
-------
a new PyTorch optimizer implementing the algorithm
"""
raise NotImplementedError("Subclasses must implement this")
def _create_jax_optimizer(self):
"""Construct a Jax optimizer.
Returns
-------
a new Optax optimizer optax.GradientTransformation implementing the algorithm
"""
raise NotImplementedError("Subclasses must implement this")
class LearningRateSchedule(object):
"""A schedule for changing the learning rate over the course of optimization.
This is an abstract class. Subclasses represent specific schedules.
"""
def _create_tf_tensor(self, global_step):
"""Construct a tensor that equals the learning rate.
Parameters
----------
global_step: tensor
a tensor containing the global step index during optimization
Returns
-------
a tensor that equals the learning rate
"""
raise NotImplementedError("Subclasses must implement this")
def _create_pytorch_schedule(self, optimizer):
"""Construct a PyTorch learning rate scheduler.
Parameters
----------
optimizer: torch.optim.Optimizer
the Optimizer whose learning rate will be modified
Returns
-------
a PyTorch scheduler implementing the schedule
"""
raise NotImplementedError("Subclasses must implement this")
def _create_jax_schedule(self, learning_rate):
"""Construct a Jax learning rate scheduler using optax.
Parameters
----------
learning_rate: float
the initial learning rate that will be modified
Returns
-------
a optax scheduler implementing the schedule
"""
raise NotImplementedError("Subclasses must implement this")
class AdaGrad(Optimizer):
"""The AdaGrad optimization algorithm.
Adagrad is an optimizer with parameter-specific learning rates, which are
adapted relative to how frequently a parameter gets updated during training.
The more updates a parameter receives, the smaller the updates. See [1]_ for
a full reference for the algorithm.
References
----------
.. [1] Duchi, John, <NAME>, and <NAME>. "Adaptive subgradient
methods for online learning and stochastic optimization." Journal of machine
learning research 12.7 (2011).
"""
def __init__(self,
learning_rate: Union[float, LearningRateSchedule] = 0.001,
initial_accumulator_value: float = 0.1,
epsilon: float = 1e-07):
"""Construct an AdaGrad optimizer.
Parameters
----------
learning_rate: float or LearningRateSchedule
the learning rate to use for optimization
initial_accumulator_value: float
a parameter of the AdaGrad algorithm
epsilon: float
a parameter of the AdaGrad algorithm
"""
super(AdaGrad, self).__init__(learning_rate)
self.initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon
def _create_tf_optimizer(self, global_step):
import tensorflow as tf
if isinstance(self.learning_rate, LearningRateSchedule):
learning_rate = self.learning_rate._create_tf_tensor(global_step)
else:
learning_rate = self.learning_rate
return tf.keras.optimizers.Adagrad(
learning_rate=learning_rate,
initial_accumulator_value=self.initial_accumulator_value,
epsilon=self.epsilon)
def _create_pytorch_optimizer(self, params):
import torch
if isinstance(self.learning_rate, LearningRateSchedule):
lr = self.learning_rate.initial_rate
else:
lr = self.learning_rate
return torch.optim.Adagrad(
params,
lr,
initial_accumulator_value=self.initial_accumulator_value,
eps=self.epsilon)
def _create_jax_optimizer(self):
import optax
process = []
if isinstance(self.learning_rate, LearningRateSchedule):
lr = self.learning_rate.initial_rate
last_process = optax.scale(-1.0)
else:
lr = self.learning_rate
last_process = optax.scale(-1.0 * lr)
process.append(
optax.scale_by_rss(
initial_accumulator_value=self.initial_accumulator_value,
eps=self.epsilon))
process.append(last_process)
return optax.chain(*process)
class Adam(Optimizer):
"""The Adam optimization algorithm."""
def __init__(self,
learning_rate: Union[float, LearningRateSchedule] = 0.001,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-08):
"""Construct an Adam optimizer.
Parameters
----------
learning_rate: float or LearningRateSchedule
the learning rate to use for optimization
beta1: float
a parameter of the Adam algorithm
beta2: float
a parameter of the Adam algorithm
epsilon: float
a parameter of the Adam algorithm
"""
super(Adam, self).__init__(learning_rate)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def _create_tf_optimizer(self, global_step):
import tensorflow as tf
if isinstance(self.learning_rate, LearningRateSchedule):
learning_rate = self.learning_rate._create_tf_tensor(global_step)
else:
learning_rate = self.learning_rate
return tf.keras.optimizers.Adam(learning_rate=learning_rate,
beta_1=self.beta1,
beta_2=self.beta2,
epsilon=self.epsilon)
def _create_pytorch_optimizer(self, params):
import torch
if isinstance(self.learning_rate, LearningRateSchedule):
lr = self.learning_rate.initial_rate
else:
lr = self.learning_rate
return torch.optim.Adam(params, lr, (self.beta1, self.beta2),
self.epsilon)
def _create_jax_optimizer(self):
import optax
process = []
if isinstance(self.learning_rate, LearningRateSchedule):
scheduler = self.learning_rate._create_jax_schedule()
process.append(optax.scale_by_schedule(scheduler))
last_process = optax.scale(-1.0)
else:
lr = self.learning_rate
last_process = optax.scale(-1.0 * lr)
process.append(
optax.scale_by_adam(b1=self.beta1, b2=self.beta2, eps=self.epsilon))
process.append(last_process)
return optax.chain(*process)
class SparseAdam(Optimizer):
"""The Sparse Adam optimization algorithm, also known as Lazy Adam.
Sparse Adam is suitable for sparse tensors. It handles sparse updates more efficiently.
It only updates moving-average accumulators for sparse variable indices that appear in the current batch, rather than updating the accumulators for all indices.
"""
def __init__(self,
learning_rate: Union[float, LearningRateSchedule] = 0.001,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-08):
"""Construct an Adam optimizer.
Parameters
----------
learning_rate: float or LearningRateSchedule
the learning rate to use for optimization
beta1: float
a parameter of the SparseAdam algorithm
beta2: float
a parameter of the SparseAdam algorithm
epsilon: float
a parameter of the SparseAdam algorithm
"""
super(SparseAdam, self).__init__(learning_rate)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def _create_tf_optimizer(self, global_step):
import tensorflow_addons as tfa
if isinstance(self.learning_rate, LearningRateSchedule):
learning_rate = self.learning_rate._create_tf_tensor(global_step)
else:
learning_rate = self.learning_rate
return tfa.optimizers.LazyAdam(learning_rate=learning_rate,
beta_1=self.beta1,
beta_2=self.beta2,
epsilon=self.epsilon)
def _create_pytorch_optimizer(self, params):
import torch
if isinstance(self.learning_rate, LearningRateSchedule):
lr = self.learning_rate.initial_rate
else:
lr = self.learning_rate
return torch.optim.SparseAdam(params, lr, (self.beta1, self.beta2),
self.epsilon)
class AdamW(Optimizer):
"""The AdamW optimization algorithm.
AdamW is a variant of Adam, with improved weight decay.
In Adam, weight decay is implemented as: weight_decay (float, optional) – weight decay (L2 penalty) (default: 0)
In AdamW, weight decay is implemented as: weight_decay (float, optional) – weight decay coefficient (default: 1e-2)
"""
def __init__(self,
learning_rate: Union[float, LearningRateSchedule] = 0.001,
weight_decay: Union[float, LearningRateSchedule] = 0.01,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-08,
amsgrad: bool = False):
"""Construct an AdamW optimizer.
Parameters
----------
learning_rate: float or LearningRateSchedule
the learning rate to use for optimization
weight_decay: float or LearningRateSchedule
weight decay coefficient for AdamW
beta1: float
a parameter of the Adam algorithm
beta2: float
a parameter of the Adam algorithm
epsilon: float
a parameter of the Adam algorithm
amsgrad: bool
If True, will use the AMSGrad variant of AdamW (from "On the Convergence of Adam and Beyond"), else will use the original algorithm.
"""
super(AdamW, self).__init__(learning_rate)
self.weight_decay = weight_decay
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.amsgrad = amsgrad
def _create_tf_optimizer(self, global_step):
import tensorflow_addons as tfa
if isinstance(self.learning_rate, LearningRateSchedule):
learning_rate = self.learning_rate._create_tf_tensor(global_step)
else:
learning_rate = self.learning_rate
return tfa.optimizers.AdamW(weight_decay=self.weight_decay,
learning_rate=learning_rate,
beta_1=self.beta1,
beta_2=self.beta2,
epsilon=self.epsilon,
amsgrad=self.amsgrad)
def _create_pytorch_optimizer(self, params):
import torch
if isinstance(self.learning_rate, LearningRateSchedule):
lr = self.learning_rate.initial_rate
else:
lr = self.learning_rate
return torch.optim.AdamW(params, lr, (self.beta1, self.beta2),
self.epsilon, self.weight_decay, self.amsgrad)
def _create_jax_optimizer(self):
import optax
process = []
if isinstance(self.learning_rate, LearningRateSchedule):
scheduler = self.learning_rate._create_jax_schedule()
process.append(optax.scale_by_schedule(scheduler))
last_process = optax.scale(-1.0)
else:
lr = self.learning_rate
last_process = optax.scale(-1.0 * lr)
process.append(
optax.scale_by_adam(b1=self.beta1,
b2=self.beta2,
eps=self.epsilon,
eps_root=0.0))
process.append(optax.add_decayed_weights(self.weight_decay, None))
process.append(last_process)
return optax.chain(*process)
class RMSProp(Optimizer):
"""RMSProp Optimization algorithm."""
def __init__(self,
learning_rate: Union[float, LearningRateSchedule] = 0.001,
momentum: float = 0.0,
decay: float = 0.9,
epsilon: float = 1e-10):
"""Construct an RMSProp Optimizer.
Parameters
----------
learning_rate: float or LearningRateSchedule
the learning_rate used for optimization
momentum: float, default 0.0
a parameter of the RMSProp algorithm
decay: float, default 0.9
a parameter of the RMSProp algorithm
epsilon: float, default 1e-10
a parameter of the RMSProp algorithm
"""
super(RMSProp, self).__init__(learning_rate)
self.momentum = momentum
self.decay = decay
self.epsilon = epsilon
def _create_tf_optimizer(self, global_step):
import tensorflow as tf
if isinstance(self.learning_rate, LearningRateSchedule):
learning_rate = self.learning_rate._create_tf_tensor(global_step)
else:
learning_rate = self.learning_rate
return tf.keras.optimizers.RMSprop(learning_rate=learning_rate,
momentum=self.momentum,
rho=self.decay,
epsilon=self.epsilon)
def _create_pytorch_optimizer(self, params):
import torch
if isinstance(self.learning_rate, LearningRateSchedule):
lr = self.learning_rate.initial_rate
else:
lr = self.learning_rate
return torch.optim.RMSprop(params,
lr,
alpha=self.decay,
eps=self.epsilon,
momentum=self.momentum)
def _create_jax_optimizer(self):
import optax
process = []
if isinstance(self.learning_rate, LearningRateSchedule):
scheduler = self.learning_rate._create_jax_schedule()
process.append(optax.scale_by_schedule(scheduler))
last_process = optax.scale(-1.0)
else:
lr = self.learning_rate
last_process = optax.scale(-1.0 * lr)
process.append(
optax.scale_by_rms(decay=self.decay,
eps=self.epsilon,
initial_scale=0.0))
if self.momentum is not None or self.momentum != 0.0:
process.append(optax.trace(decay=self.momentum, nesterov=False))
process.append(last_process)
return optax.chain(*process)
class GradientDescent(Optimizer):
"""The gradient descent optimization algorithm."""
def __init__(self,
learning_rate: Union[float, LearningRateSchedule] = 0.001):
"""Construct a gradient descent optimizer.
Parameters
----------
learning_rate: float or LearningRateSchedule
the learning rate to use for optimization
"""
super(GradientDescent, self).__init__(learning_rate)
def _create_tf_optimizer(self, global_step):
import tensorflow as tf
if isinstance(self.learning_rate, LearningRateSchedule):
learning_rate = self.learning_rate._create_tf_tensor(global_step)
else:
learning_rate = self.learning_rate
return tf.keras.optimizers.SGD(learning_rate=learning_rate)
def _create_pytorch_optimizer(self, params):
import torch
if isinstance(self.learning_rate, LearningRateSchedule):
lr = self.learning_rate.initial_rate
else:
lr = self.learning_rate
return torch.optim.SGD(params, lr)
def _create_jax_optimizer(self):
import optax
process = []
if isinstance(self.learning_rate, LearningRateSchedule):
scheduler = self.learning_rate._create_jax_schedule()
process.append(optax.scale_by_schedule(scheduler))
last_process = optax.scale(-1.0)
else:
lr = self.learning_rate
last_process = optax.scale(-1.0 * lr)
process.append(last_process)
return optax.chain(*process)
class ExponentialDecay(LearningRateSchedule):
"""A learning rate that decreases exponentially with the number of training steps."""
def __init__(self,
initial_rate: float,
decay_rate: float,
decay_steps: int,
staircase: bool = True):
"""Create an exponentially decaying learning rate.
The learning rate starts as initial_rate. Every decay_steps training steps, it is multiplied by decay_rate.
Parameters
----------
initial_rate: float
the initial learning rate
decay_rate: float
the base of the exponential
decay_steps: int
the number of training steps over which the rate decreases by decay_rate
staircase: bool
if True, the learning rate decreases by discrete jumps every decay_steps.
if False, the learning rate decreases smoothly every step
"""
self.initial_rate = initial_rate
self.decay_rate = decay_rate
self.decay_steps = decay_steps
self.staircase = staircase
def _create_tf_tensor(self, global_step):
import tensorflow as tf
return tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=self.initial_rate,
decay_rate=self.decay_rate,
decay_steps=self.decay_steps,
staircase=self.staircase)(global_step)
def _create_pytorch_schedule(self, optimizer):
import torch
if self.staircase:
return torch.optim.lr_scheduler.StepLR(optimizer, self.decay_steps,
self.decay_rate)
return torch.optim.lr_scheduler.ExponentialLR(
optimizer, math.pow(self.decay_rate, 1 / self.decay_steps))
def _create_jax_schedule(self):
import optax
return optax.exponential_decay(init_value=self.initial_rate,
transition_steps=self.decay_steps,
decay_rate=self.decay_rate,
staircase=self.staircase)
class PolynomialDecay(LearningRateSchedule):
"""A learning rate that decreases from an initial value to a final value over a fixed number of training steps."""
def __init__(self,
initial_rate: float,
final_rate: float,
decay_steps: int,
power: float = 1.0):
"""Create a smoothly decaying learning rate.
The learning rate starts as initial_rate. It smoothly decreases to final_rate over decay_steps training steps.
It decays as a function of (1-step/decay_steps)**power. Once the final rate is reached, it remains there for
the rest of optimization.
Parameters
----------
initial_rate: float
the initial learning rate
final_rate: float
the final learning rate
decay_steps: int
the number of training steps over which the rate decreases from initial_rate to final_rate
power: float
the exponent controlling the shape of the decay
"""
self.initial_rate = initial_rate
self.final_rate = final_rate
self.decay_steps = decay_steps
self.power = power
def _create_tf_tensor(self, global_step):
import tensorflow as tf
return tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=self.initial_rate,
end_learning_rate=self.final_rate,
decay_steps=self.decay_steps,
power=self.power)(global_step)
def _create_pytorch_schedule(self, optimizer):
def f(step):
t = min(step, self.decay_steps) / self.decay_steps
return ((self.initial_rate - self.final_rate) *
(1 - t)**self.power) + self.final_rate
import torch
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
def _create_jax_schedule(self):
import optax
return optax.polynomial_schedule(init_value=self.initial_rate,
end_value=self.final_rate,
power=self.power,
transition_steps=self.decay_steps)
class LinearCosineDecay(LearningRateSchedule):
"""Applies linear cosine decay to the learning rate"""
def __init__(self,
initial_rate: float,
decay_steps: int,
alpha: float = 0.0,
beta: float = 0.001,
num_periods: float = 0.5):
"""
Parameters
----------
learning_rate : float
initial learning rate
decay_steps : int
number of steps to decay over
num_periods : number of periods in the cosine part of the decay
"""
self.initial_rate = initial_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.beta = beta
self.num_periods = num_periods
def _create_tf_tensor(self, global_step):
import tensorflow as tf
return tf.compat.v1.train.linear_cosine_decay(
learning_rate=self.initial_rate,
global_step=global_step,
decay_steps=self.decay_steps,
alpha=self.alpha,
beta=self.beta,
num_periods=self.num_periods)
def _create_pytorch_schedule(self, optimizer):
def f(step):
t = min(step, self.decay_steps) / self.decay_steps
linear_decay = 1 - t
cosine_decay = 0.5 * (1 +
math.cos(math.pi * 2 * self.num_periods * t))
decayed = (self.alpha + linear_decay) * cosine_decay + self.beta
return self.initial_rate * decayed
import torch
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
def _create_jax_schedule(self):
import optax
return optax.cosine_decay_schedule(init_value=self.initial_rate,
decay_steps=self.decay_steps,
alpha=self.alpha)
class PiecewiseConstantSchedule(LearningRateSchedule):
"""Applies scheduler which multiplies by a constant factor on the boundaries"""
def __init__(self,
initial_rate: float,
boundaries_and_scales: Optional[Dict[int, float]] = None):
"""
Parameters
----------
init_value : float
initial learning rate
boundaries_and_scales:
A map from boundaries b_i to non-negative scaling factors f_i. For any step
count s, the schedule returns init_v scaled by the product of all factors f_i
such that b_i < s.
"""
self.initial_rate = initial_rate
self.boundaries_and_scales = boundaries_and_scales
def _create_jax_schedule(self):
import optax
return optax.piecewise_constant_schedule(
init_value=self.initial_rate,
boundaries_and_scales=self.boundaries_and_scales)
class KFAC(Optimizer):
"""The Second order gradient optimiation algorithm which uses an approximation to calculate the inverse of the Fischer matrrix"""
def __init__(self, **kwargs):
"""
Parameters:
-----------
model: torch.nn.Module
The model to be optimized.
lr: float (default: 0.001)
Learning rate for the optimizer.
momentum: float (default: 0.9)
Momentum for the optimizer.
stat_decay: float (default: 0.95)
Decay rate for the update of covariance matrix with mean.
damping: float (default: 0.001)
damping factor for the update of covariance matrix.
kl_clip: float (default: 0.001)
Clipping value for the update of covariance matrix.
weight_decay: float (default: 0)
weight decay for the optimizer.
Tcov: int (default: 10)
The number of steps to update the covariance matrix.
Tinv: int (default: 100)
The number of steps to calculate the inverse of covariance matrix.
batch_averaged: bool (default: True)
States whether to use batch averaged covariance matrix.
mean: bool (default: False)
States whether to use mean centered covariance matrix.
"""
self.kwargs = kwargs
def _create_pytorch_optimizer(self):
from deepchem.models.torch_models.kfac_optimizer import KFACOptimizer
if isinstance(self.learning_rate, LearningRateSchedule):
self.kwargs['lr'] = self.learning_rate.initial_rate
else:
self.kwargs['lr'] = self.learning_rate
return KFACOptimizer([self.kwargs])
<file_sep>import deepchem as dc
try:
from deepchem.models.dft.dftxc import XCModel
from deepchem.data.data_loader import DFTYamlLoader
has_dqc = True
except ModuleNotFoundError:
has_dqc = False
import pytest
import tempfile
@pytest.mark.dqc
def test_dftxc_eval():
inputs = 'deepchem/models/tests/assets/test_dftxcdata.yaml'
data = DFTYamlLoader()
model_dir = tempfile.mkdtemp()
dataset = (data.create_dataset(inputs))
model = XCModel("lda_x",
batch_size=1,
log_frequency=1,
mode="classification",
model_dir=model_dir)
loss = model.fit(dataset, nb_epoch=2, checkpoint_interval=1)
assert loss < 0.001
reload_model = XCModel("lda_x",
batch_size=1,
log_frequency=1,
mode="classification",
model_dir=model_dir)
reload_model.restore()
inputs1 = 'deepchem/models/tests/assets/test_ieLi.yaml'
predict_dataset = data.create_dataset(inputs1)
predict = reload_model.predict(predict_dataset)
assert predict < 0.199
metric = dc.metrics.Metric(dc.metrics.mae_score)
scores = model.evaluate(dataset, [metric])
assert scores['mae_score'] < 0.3
# testing batch size > 1
model2 = XCModel("lda_x",
batch_size=2,
log_frequency=1,
mode="classification")
loss2 = model2.fit(dataset, nb_epoch=2, checkpoint_interval=2)
assert loss2 < 0.2
# testing true values
assert dataset.y[0] != dataset.y[1]
@pytest.mark.dqc
def test_dm():
inputs = 'deepchem/models/tests/assets/test_dm.yaml'
data = DFTYamlLoader()
dataset = (data.create_dataset(inputs))
model = XCModel("lda_x", batch_size=1)
loss = model.fit(dataset, nb_epoch=1, checkpoint_interval=1)
assert loss < 0.008
<file_sep>from deepchem.feat import MolecularFeaturizer
from rdkit import Chem
import numpy as np
from deepchem.feat.graph_data import GraphData
allowable_features = {
'possible_atomic_num_list':
list(range(0, 119)), # 0 represents a masked atom
'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5],
'possible_chirality_list': [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER
], # noqa: E122
'possible_hybridization_list': [
Chem.rdchem.HybridizationType.S, Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2, Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D, Chem.rdchem.HybridizationType.SP3D2,
Chem.rdchem.HybridizationType.UNSPECIFIED
], # noqa: E122
'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8],
'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6],
'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
# 0 represents a masked bond
'possible_bonds': [
0, Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC
], # noqa: E122
'possible_bond_dirs': [ # only for double bond stereo information
Chem.rdchem.BondDir.NONE, Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT
] # noqa: E122
}
class SNAPFeaturizer(MolecularFeaturizer):
"""
This featurizer is based on the SNAP featurizer used in the paper [1].
Example
-------
>>> smiles = ["CC(=O)C"]
>>> featurizer = SNAPFeaturizer()
>>> print(featurizer.featurize(smiles))
[GraphData(node_features=[4, 2], edge_index=[2, 6], edge_features=[6, 2])]
References
----------
.. [1] <NAME> al. Strategies for Pre-training Graph Neural Networks. Preprint at https://doi.org/10.48550/arXiv.1905.12265 (2020).
"""
def _featurize(self, mol, **kwargs):
"""
Converts rdkit mol object to the deepchem Graph Data object. Uses
simplified atom and bond features, represented as indices.
Parameters
----------
mol: RDKitMol
RDKit molecule object
Returns
-------
data: GraphData
Graph data object with the attributes: x, edge_index, edge_features
"""
# atoms
atom_features_list = []
for atom in mol.GetAtoms():
atom_feature = [
allowable_features['possible_atomic_num_list'].index(
atom.GetAtomicNum())
] + [
allowable_features['possible_chirality_list'].index(
atom.GetChiralTag())
]
atom_features_list.append(atom_feature)
x = np.array(atom_features_list)
# bonds
num_bond_features = 2 # bond type, bond direction
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = [
allowable_features['possible_bonds'].index(
bond.GetBondType())
] + [
allowable_features['possible_bond_dirs'].index(
bond.GetBondDir())
]
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# Graph connectivity in COO format with shape [2, num_edges]
edge_index = np.array(edges_list).T
# Edge feature matrix with shape [num_edges, num_edge_features]
edge_feats = np.array(edge_features_list)
else: # mol has no bonds
edge_index = np.empty((2, 0), dtype=np.int8)
edge_feats = np.empty((0, num_bond_features), dtype=np.int8)
data = GraphData(node_features=x,
edge_index=edge_index,
edge_features=edge_feats)
return data
<file_sep>import nglview
import tempfile
import os
import mdtraj as md
import numpy as np
import tempfile
from rdkit import Chem
from rdkit.Chem import Draw
from itertools import islice
from IPython.display import Image, HTML, display
def combine_mdtraj(protein, ligand):
chain = protein.topology.add_chain()
residue = protein.topology.add_residue("LIG", chain, resSeq=1)
for atom in ligand.topology.atoms:
protein.topology.add_atom(atom.name, atom.element, residue)
protein.xyz = np.hstack([protein.xyz, ligand.xyz])
protein.topology.create_standard_bonds()
return protein
def visualize_complex(complex_mdtraj):
ligand_atoms = [a.index for a in complex_mdtraj.topology.atoms if "LIG" in str(a.residue)]
binding_pocket_atoms = md.compute_neighbors(complex_mdtraj, 0.5, ligand_atoms)[0]
binding_pocket_residues = list(set([complex_mdtraj.topology.atom(a).residue.resSeq for a in binding_pocket_atoms]))
binding_pocket_residues = [str(r) for r in binding_pocket_residues]
binding_pocket_residues = " or ".join(binding_pocket_residues)
traj = nglview.MDTrajTrajectory( complex_mdtraj ) # load file from RCSB PDB
ngltraj = nglview.NGLWidget( traj )
ngltraj.representations = [
{ "type": "cartoon", "params": {
"sele": "protein", "color": "residueindex"
} },
{ "type": "licorice", "params": {
"sele": "(not hydrogen) and (%s)" % binding_pocket_residues
} },
{ "type": "ball+stick", "params": {
"sele": "LIG"
} }
]
return ngltraj
def visualize_ligand(ligand_mdtraj):
traj = nglview.MDTrajTrajectory( ligand_mdtraj ) # load file from RCSB PDB
ngltraj = nglview.NGLWidget( traj )
ngltraj.representations = [
{ "type": "ball+stick", "params": {"sele": "all" } } ]
return ngltraj
def convert_lines_to_mdtraj(molecule_lines):
tempdir = tempfile.mkdtemp()
molecule_file = os.path.join(tempdir, "molecule.pdb")
with open(molecule_file, "wb") as f:
f.writelines(molecule_lines)
molecule_mdtraj = md.load(molecule_file)
return molecule_mdtraj
def display_images(filenames):
"""Helper to pretty-print images."""
imagesList=''.join(
["<img style='width: 140px; margin: 0px; float: left; border: 1px solid black;' src='%s' />"
% str(s) for s in sorted(filenames)])
display(HTML(imagesList))
def mols_to_pngs(mols, basename="test"):
"""Helper to write RDKit mols to png files."""
filenames = []
for i, mol in enumerate(mols):
filename = "%s%d.png" % (basename, i)
Draw.MolToFile(mol, filename)
filenames.append(filename)
return filenames
<file_sep># BACE Dataset Examples
The BACE dataset is from the following paper:
Subramanian, Govindan, et al. "Computational modeling of β-secretase 1 (BACE-1) inhibitors using ligand based approaches." Journal of chemical information and modeling 56.10 (2016): 1936-1949.
This study considers a small dataset of 205 compounds datasets
which are used to train a model which is evaluated on a larger
external validation set of 1273 compounds.
The file `bace_datasets.py` loads the data as used in the
original paper. `bace_rf.py` demonstrates training a random
forest against this dataset.
<file_sep>import unittest
import deepchem as dc
from deepchem.splits.splitters import ScaffoldSplitter
class TestScaffoldSplitter(unittest.TestCase):
def test_scaffolds(self):
tox21_tasks, tox21_datasets, transformers = \
dc.molnet.load_tox21(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = tox21_datasets
splitter = ScaffoldSplitter()
scaffolds_separate = splitter.generate_scaffolds(train_dataset)
scaffolds_train, scaffolds_valid, _ = splitter.split(train_dataset)
# The amount of datapoints has to be the same
data_cnt = sum([len(sfd) for sfd in scaffolds_separate])
self.assertTrue(data_cnt == train_dataset.X.shape[0])
# The number of scaffolds generated by the splitter
# has to be smaller or equal than number of total molecules
scaffolds_separate_cnt = len(scaffolds_separate)
self.assertTrue(scaffolds_separate_cnt <= train_dataset.X.shape[0])
def test_generate_scaffold(self):
from deepchem.splits.splitters import _generate_scaffold
valid_smiles = r's1cc(nc1\[N]=C(\N)N)C'
scaffold = _generate_scaffold(valid_smiles)
self.assertTrue(scaffold == 'c1cscn1')
# Invalid because valence for atom 5 N is greater than permitted (4)
invalid_smiles = r's1cc(nc1\[NH]=C(\N)N)C'
scaffold = _generate_scaffold(invalid_smiles)
self.assertIsNone(scaffold)
<file_sep>import pytest
@pytest.mark.dqc
def test_pyscf():
import pyscf
mol_h2o = pyscf.gto.M(atom='O 0 0 0; H 0 1 0; H 0 0 1',
parse_arg=False,
basis='ccpvdz')
assert mol_h2o.basis == 'ccpvdz'
<file_sep>from typing import Optional, Sequence
import numpy as np
class GraphData:
"""GraphData class
This data class is almost same as `torch_geometric.data.Data
<https://pytorch-geometric.readthedocs.io/en/latest/modules/data.html#torch_geometric.data.Data>`_.
Attributes
----------
node_features: np.ndarray
Node feature matrix with shape [num_nodes, num_node_features]
edge_index: np.ndarray, dtype int
Graph connectivity in COO format with shape [2, num_edges]
edge_features: np.ndarray, optional (default None)
Edge feature matrix with shape [num_edges, num_edge_features]
node_pos_features: np.ndarray, optional (default None)
Node position matrix with shape [num_nodes, num_dimensions].
num_nodes: int
The number of nodes in the graph
num_node_features: int
The number of features per node in the graph
num_edges: int
The number of edges in the graph
num_edges_features: int, optional (default None)
The number of features per edge in the graph
Examples
--------
>>> import numpy as np
>>> node_features = np.random.rand(5, 10)
>>> edge_index = np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]], dtype=np.int64)
>>> edge_features = np.random.rand(5, 5)
>>> global_features = np.random.random(5)
>>> graph = GraphData(node_features, edge_index, edge_features, z=global_features)
>>> graph
GraphData(node_features=[5, 10], edge_index=[2, 5], edge_features=[5, 5], z=[5])
"""
def __init__(self,
node_features: np.ndarray,
edge_index: np.ndarray,
edge_features: Optional[np.ndarray] = None,
node_pos_features: Optional[np.ndarray] = None,
**kwargs):
"""
Parameters
----------
node_features: np.ndarray
Node feature matrix with shape [num_nodes, num_node_features]
edge_index: np.ndarray, dtype int
Graph connectivity in COO format with shape [2, num_edges]
edge_features: np.ndarray, optional (default None)
Edge feature matrix with shape [num_edges, num_edge_features]
node_pos_features: np.ndarray, optional (default None)
Node position matrix with shape [num_nodes, num_dimensions].
kwargs: optional
Additional attributes and their values
"""
# validate params
if isinstance(node_features, np.ndarray) is False:
raise ValueError('node_features must be np.ndarray.')
if isinstance(edge_index, np.ndarray) is False:
raise ValueError('edge_index must be np.ndarray.')
elif issubclass(edge_index.dtype.type, np.integer) is False:
raise ValueError('edge_index.dtype must contains integers.')
elif edge_index.shape[0] != 2:
raise ValueError('The shape of edge_index is [2, num_edges].')
# np.max() method works only for a non-empty array, so size of the array should be non-zero
elif (edge_index.size != 0) and (np.max(edge_index) >=
len(node_features)):
raise ValueError('edge_index contains the invalid node number.')
if edge_features is not None:
if isinstance(edge_features, np.ndarray) is False:
raise ValueError('edge_features must be np.ndarray or None.')
elif edge_index.shape[1] != edge_features.shape[0]:
raise ValueError(
'The first dimension of edge_features must be the same as the second dimension of edge_index.'
)
if node_pos_features is not None:
if isinstance(node_pos_features, np.ndarray) is False:
raise ValueError(
'node_pos_features must be np.ndarray or None.')
elif node_pos_features.shape[0] != node_features.shape[0]:
raise ValueError(
'The length of node_pos_features must be the same as the length of node_features.'
)
self.node_features = node_features
self.edge_index = edge_index
self.edge_features = edge_features
self.node_pos_features = node_pos_features
self.kwargs = kwargs
self.num_nodes, self.num_node_features = self.node_features.shape
self.num_edges = edge_index.shape[1]
if self.edge_features is not None:
self.num_edge_features = self.edge_features.shape[1]
for key, value in self.kwargs.items():
setattr(self, key, value)
def __repr__(self) -> str:
"""Returns a string containing the printable representation of the object"""
cls = self.__class__.__name__
node_features_str = str(list(self.node_features.shape))
edge_index_str = str(list(self.edge_index.shape))
if self.edge_features is not None:
edge_features_str = str(list(self.edge_features.shape))
else:
edge_features_str = "None"
out = "%s(node_features=%s, edge_index=%s, edge_features=%s" % (
cls, node_features_str, edge_index_str, edge_features_str)
# Adding shapes of kwargs
for key, value in self.kwargs.items():
if isinstance(value, np.ndarray):
out += (', ' + key + '=' + str(list(value.shape)))
elif isinstance(value, str):
out += (', ' + key + '=' + value)
elif isinstance(value, int) or isinstance(value, float):
out += (', ' + key + '=' + str(value))
out += ')'
return out
def to_pyg_graph(self):
"""Convert to PyTorch Geometric graph data instance
Returns
-------
torch_geometric.data.Data
Graph data for PyTorch Geometric
Note
----
This method requires PyTorch Geometric to be installed.
"""
try:
import torch
from torch_geometric.data import Data
except ModuleNotFoundError:
raise ImportError(
"This function requires PyTorch Geometric to be installed.")
edge_features = self.edge_features
if edge_features is not None:
edge_features = torch.from_numpy(self.edge_features).float()
node_pos_features = self.node_pos_features
if node_pos_features is not None:
node_pos_features = torch.from_numpy(self.node_pos_features).float()
kwargs = {}
for key, value in self.kwargs.items():
kwargs[key] = torch.from_numpy(value).float()
return Data(x=torch.from_numpy(self.node_features).float(),
edge_index=torch.from_numpy(self.edge_index).long(),
edge_attr=edge_features,
pos=node_pos_features,
**kwargs)
def to_dgl_graph(self, self_loop: bool = False):
"""Convert to DGL graph data instance
Returns
-------
dgl.DGLGraph
Graph data for DGL
self_loop: bool
Whether to add self loops for the nodes, i.e. edges from nodes
to themselves. Default to False.
Note
----
This method requires DGL to be installed.
"""
try:
import dgl
import torch
except ModuleNotFoundError:
raise ImportError("This function requires DGL to be installed.")
src = self.edge_index[0]
dst = self.edge_index[1]
g = dgl.graph(
(torch.from_numpy(src).long(), torch.from_numpy(dst).long()),
num_nodes=self.num_nodes)
g.ndata['x'] = torch.from_numpy(self.node_features).float()
if self.node_pos_features is not None:
g.ndata['pos'] = torch.from_numpy(self.node_pos_features).float()
g.edata['d'] = torch.norm(g.ndata['pos'][g.edges()[0]] -
g.ndata['pos'][g.edges()[1]],
p=2,
dim=-1).unsqueeze(-1).detach()
if self.edge_features is not None:
g.edata['edge_attr'] = torch.from_numpy(self.edge_features).float()
if self_loop:
# This assumes that the edge features for self loops are full-zero tensors
# In the future we may want to support featurization for self loops
g.add_edges(np.arange(self.num_nodes), np.arange(self.num_nodes))
return g
def numpy_to_torch(self, device: str = 'cpu'):
"""Convert numpy arrays to torch tensors. This may be useful when you are using PyTorch Geometric with GraphData objects.
Parameters
----------
device : str
Device to store the tensors. Default to 'cpu'.
Example
-------
>>> num_nodes, num_node_features = 5, 32
>>> num_edges, num_edge_features = 6, 32
>>> node_features = np.random.random_sample((num_nodes, num_node_features))
>>> edge_features = np.random.random_sample((num_edges, num_edge_features))
>>> edge_index = np.random.randint(0, num_nodes, (2, num_edges))
>>> graph_data = GraphData(node_features, edge_index, edge_features)
>>> graph_data = graph_data.numpy_to_torch()
>>> print(type(graph_data.node_features))
<class 'torch.Tensor'>
"""
import copy
import torch
graph_copy = copy.deepcopy(self)
graph_copy.node_features = torch.from_numpy( # type: ignore
self.node_features).float().to(device)
graph_copy.edge_index = torch.from_numpy( # type: ignore
self.edge_index).long().to(device)
if self.edge_features is not None:
graph_copy.edge_features = torch.from_numpy( # type: ignore
self.edge_features).float().to(device)
else:
graph_copy.edge_features = None
if self.node_pos_features is not None:
graph_copy.node_pos_features = torch.from_numpy( # type: ignore
self.node_pos_features).float().to(device)
else:
graph_copy.node_pos_features = None
graph_copy.kwargs = {}
for key, value in self.kwargs.items():
if isinstance(value, np.ndarray):
value = torch.from_numpy(value).to(device)
graph_copy.kwargs[key] = value
setattr(graph_copy, key, value)
return graph_copy
def subgraph(self, nodes):
"""Returns a subgraph of `nodes` indicies.
Parameters
----------
nodes : list, iterable
A list of node indices to be included in the subgraph.
Returns
-------
subgraph_data : GraphData
A new GraphData object containing the subgraph induced on `nodes`.
Example
-------
>>> import numpy as np
>>> from deepchem.feat.graph_data import GraphData
>>> node_features = np.random.rand(5, 10)
>>> edge_index = np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]], dtype=np.int64)
>>> edge_features = np.random.rand(5, 3)
>>> graph_data = GraphData(node_features, edge_index, edge_features)
>>> nodes = [0, 2, 4]
>>> subgraph_data, node_mapping = graph_data.subgraph(nodes)
"""
nodes = set(nodes)
if not nodes.issubset(range(self.num_nodes)):
raise ValueError("Some nodes are not in the original graph")
# Create a mapping from the original node indices to the new node indices
node_mapping = {
old_idx: new_idx for new_idx, old_idx in enumerate(nodes)
}
# Filter and reindex node features
subgraph_node_features = self.node_features[list(nodes)]
# Filter and reindex edge indices and edge features
subgraph_edge_indices = []
subgraph_edge_features = []
if self.edge_features is not None:
for i in range(self.num_edges):
src, dest = self.edge_index[:, i]
if src in nodes and dest in nodes:
subgraph_edge_indices.append(
(node_mapping[src], node_mapping[dest]))
subgraph_edge_features.append(self.edge_features[i])
subgraph_edge_index = np.array(subgraph_edge_indices, dtype=np.int64).T
subgraph_edge_features = np.array(subgraph_edge_features)
subgraph_data = GraphData(node_features=subgraph_node_features,
edge_index=subgraph_edge_index,
edge_features=subgraph_edge_features,
**self.kwargs)
return subgraph_data, node_mapping
class BatchGraphData(GraphData):
"""Batch GraphData class
Attributes
----------
node_features: np.ndarray
Concatenated node feature matrix with shape [num_nodes, num_node_features].
`num_nodes` is total number of nodes in the batch graph.
edge_index: np.ndarray, dtype int
Concatenated graph connectivity in COO format with shape [2, num_edges].
`num_edges` is total number of edges in the batch graph.
edge_features: np.ndarray, optional (default None)
Concatenated edge feature matrix with shape [num_edges, num_edge_features].
`num_edges` is total number of edges in the batch graph.
node_pos_features: np.ndarray, optional (default None)
Concatenated node position matrix with shape [num_nodes, num_dimensions].
`num_nodes` is total number of edges in the batch graph.
num_nodes: int
The number of nodes in the batch graph.
num_node_features: int
The number of features per node in the graph.
num_edges: int
The number of edges in the batch graph.
num_edges_features: int, optional (default None)
The number of features per edge in the graph.
graph_index: np.ndarray, dtype int
This vector indicates which graph the node belongs with shape [num_nodes,].
Examples
--------
>>> import numpy as np
>>> from deepchem.feat.graph_data import GraphData
>>> node_features_list = np.random.rand(2, 5, 10)
>>> edge_index_list = np.array([
... [[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]],
... [[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]],
... ], dtype=int)
>>> user_defined_attribute = np.array([0, 1])
>>> graph_list = [GraphData(node_features, edge_index, attribute=user_defined_attribute)
... for node_features, edge_index in zip(node_features_list, edge_index_list)]
>>> batch_graph = BatchGraphData(graph_list=graph_list)
"""
def __init__(self, graph_list: Sequence[GraphData]):
"""
Parameters
----------
graph_list: Sequence[GraphData]
List of GraphData
"""
# stack features
batch_node_features = np.vstack(
[graph.node_features for graph in graph_list])
# before stacking edge_features or node_pos_features,
# we should check whether these are None or not
if graph_list[0].edge_features is not None:
batch_edge_features: Optional[np.ndarray] = np.vstack(
[graph.edge_features for graph in graph_list]) # type: ignore
else:
batch_edge_features = None
if graph_list[0].node_pos_features is not None:
batch_node_pos_features: Optional[np.ndarray] = np.vstack([
graph.node_pos_features for graph in graph_list # type: ignore
])
else:
batch_node_pos_features = None
# create new edge index
# number of nodes in each graph
num_nodes_list = [graph.num_nodes for graph in graph_list]
# cumulative number of nodes for each graph. This is necessary because the values in edge_index are node indices of all of the graphs in graph_list and so we need to offset the indices by the number of nodes in the previous graphs.
cum_num_nodes_list = np.cumsum([0] + num_nodes_list)[:-1]
# columns are the edge index, values are the node index
batch_edge_index = np.hstack([
graph.edge_index + cum_num_nodes
for cum_num_nodes, graph in zip(cum_num_nodes_list, graph_list)
])
# graph_index indicates which nodes belong to which graph
graph_index = []
for i, num_nodes in enumerate(num_nodes_list):
graph_index.extend([i] * num_nodes)
self.graph_index = np.array(graph_index)
# Batch user defined attributes
kwargs = {}
user_defined_attribute_names = self._get_user_defined_attributes(
graph_list[0])
for name in user_defined_attribute_names:
kwargs[name] = np.vstack(
[getattr(graph, name) for graph in graph_list])
super().__init__(node_features=batch_node_features,
edge_index=batch_edge_index,
edge_features=batch_edge_features,
node_pos_features=batch_node_pos_features,
**kwargs)
def _get_user_defined_attributes(self, graph_data: GraphData):
"""A GraphData object can have user defined attributes but the attribute name of those
are unknown since it can be arbitary. This method helps to find user defined attribute's
name by making a list of known graph data attributes and finding other user defined
attributes via `vars` method. The user defined attributes are attributes other than
`node_features`, `edge_index`, `edge_features`, `node_pos_features`, `kwargs`, `num_nodes`,
`num_node_features`, `num_edges`, `num_edge_features` as these are graph data attributes."""
graph_data_attributes = [
'node_features', 'edge_index', 'edge_features', 'node_pos_features',
'kwargs', 'num_nodes', 'num_node_features', 'num_edges',
'num_edge_features'
]
user_defined_attribute_names = []
for arg in vars(graph_data):
if arg not in graph_data_attributes:
user_defined_attribute_names.append(arg)
return user_defined_attribute_names
def numpy_to_torch(self, device: str = "cpu"):
"""
Convert numpy arrays to torch tensors for BatchGraphData. BatchGraphData is very similar to GraphData, but it combines all graphs into a single graph object and it has an additional attribute `graph_index` which indicates which nodes belong to which graph.
Parameters
----------
device : str
Device to store the tensors. Default to 'cpu'.
Example
-------
>>> num_nodes, num_node_features = 5, 32
>>> num_edges, num_edge_features = 6, 32
>>> node_features = np.random.random_sample((num_nodes, num_node_features))
>>> edge_features = np.random.random_sample((num_edges, num_edge_features))
>>> edge_index = np.random.randint(0, num_nodes, (2, num_edges))
>>> graph_data = GraphData(node_features, edge_index, edge_features)
>>> node_features2 = np.random.random_sample((num_nodes, num_node_features))
>>> edge_features2 = np.random.random_sample((num_edges, num_edge_features))
>>> edge_index2 = np.random.randint(0, num_nodes, (2, num_edges))
>>> graph_data2 = GraphData(node_features2, edge_index2, edge_features2)
>>> batch_graph_data = BatchGraphData([graph_data, graph_data2])
>>> batch_graph_data = batch_graph_data.numpy_to_torch()
>>> print(type(batch_graph_data.node_features))
<class 'torch.Tensor'>
"""
import torch
graph_copy = super().numpy_to_torch(device)
graph_index = torch.from_numpy(
self.graph_index).long().to( # type: ignore
device)
graph_copy.graph_index = graph_index
return graph_copy
def shortest_path_length(graph_data, source, cutoff=None):
"""Compute the shortest path lengths from source to all reachable nodes in a GraphData object.
This function only works with undirected graphs.
Parameters
----------
graph_data : GraphData
GraphData object containing the graph information
source : int
Starting node index for path
cutoff : int, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dict
Dict of node index and shortest path length from source to that node.
Examples
--------
>>> import numpy as np
>>> node_features = np.random.rand(5, 10)
>>> edge_index = np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]], dtype=np.int64)
>>> graph_data = GraphData(node_features, edge_index)
>>> shortest_path_length(graph_data, 0)
{0: 0, 1: 1, 2: 2, 3: 2, 4: 1}
>>> shortest_path_length(graph_data, 0, cutoff=1)
{0: 0, 1: 1, 4: 1}
"""
if source >= graph_data.num_nodes:
raise ValueError(f"Source {source} is not in graph_data")
if cutoff is None:
cutoff = float("inf")
# Convert edge_index to adjacency list
adj_list = [[] for _ in range(graph_data.num_nodes)]
for i in range(graph_data.num_edges):
src, dest = graph_data.edge_index[:, i]
adj_list[src].append(dest)
adj_list[dest].append(src) # Assuming undirected graph
# Breadth-first search
visited = np.full(graph_data.num_nodes, False)
distances = np.full(graph_data.num_nodes, np.inf)
queue = [source]
visited[source] = True
distances[source] = 0
while queue:
node = queue.pop(0)
for neighbor in adj_list[node]:
if not visited[neighbor]:
visited[neighbor] = True
distances[neighbor] = distances[node] + 1
if distances[neighbor] < cutoff:
queue.append(neighbor)
return {i: int(d) for i, d in enumerate(distances) if d <= cutoff}
<file_sep>"""
Train support-based models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import warnings
import numpy as np
import tensorflow as tf
import sys
import time
from deepchem.models import Model
from deepchem.data import pad_batch
from deepchem.data import NumpyDataset
from deepchem.metrics import to_one_hot
from deepchem.metrics import from_one_hot
from deepchem.models.tf_new_models.graph_topology import merge_dicts
from deepchem.nn import model_ops
from deepchem.data import SupportGenerator
from deepchem.data import EpisodeGenerator
from deepchem.data import get_task_dataset
from deepchem.data import get_single_task_test
from deepchem.data import get_task_dataset_minus_support
from deepchem.nn.copy import Input
class SupportGraphClassifier(Model):
def __init__(self,
model,
test_batch_size=10,
support_batch_size=10,
learning_rate=.001,
similarity="cosine",
**kwargs):
"""Builds a support-based classifier.
See https://arxiv.org/pdf/1606.04080v1.pdf for definition of support.
Parameters
----------
sess: tf.Session
Session for this model
model: SequentialSupportModel
Contains core layers in model.
n_pos: int
Number of positive examples in support.
n_neg: int
Number of negative examples in support.
"""
warnings.warn("SupportGraphClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.similarity = similarity
self.model = model
self.sess = tf.Session(graph=self.model.graph)
self.test_batch_size = test_batch_size
self.support_batch_size = support_batch_size
self.learning_rate = learning_rate
self.epsilon = 1e-7
with self.model.graph.as_default():
self.add_placeholders()
self.pred_op, self.scores_op, self.loss_op = self.add_training_loss()
# Get train function
self.train_op = self.get_training_op(self.loss_op)
# Initialize
self.init_fn = tf.global_variables_initializer()
self.sess.run(self.init_fn)
def get_training_op(self, loss):
"""Attaches an optimizer to the graph."""
opt = tf.train.AdamOptimizer(self.learning_rate)
return opt.minimize(self.loss_op, name="train")
def add_placeholders(self):
"""Adds placeholders to graph."""
#################################################################### DEBUG
#self.test_label_placeholder = Input(
# tensor=tf.placeholder(dtype='float32', shape=(self.test_batch_size),
# name="label_placeholder"))
#self.test_weight_placeholder = Input(
# tensor=tf.placeholder(dtype='float32', shape=(self.test_batch_size),
# name="weight_placeholder"))
self.test_label_placeholder = tf.placeholder(
dtype='float32', shape=(self.test_batch_size), name="label_placeholder")
self.test_weight_placeholder = tf.placeholder(
dtype='float32',
shape=(self.test_batch_size),
name="weight_placeholder")
# TODO(rbharath): Should weights for the support be used?
# Support labels
#self.support_label_placeholder = Input(
# tensor=tf.placeholder(dtype='float32', shape=[self.support_batch_size],
# name="support_label_placeholder"))
self.support_label_placeholder = tf.placeholder(
dtype='float32',
shape=[self.support_batch_size],
name="support_label_placeholder")
self.phase = tf.placeholder(dtype='bool', name='keras_learning_phase')
#################################################################### DEBUG
def construct_feed_dict(self, test, support, training=True, add_phase=False):
"""Constructs tensorflow feed from test/support sets."""
# Generate dictionary elements for support
feed_dict = (
self.model.support_graph_topology.batch_to_feed_dict(support.X))
feed_dict[self.support_label_placeholder] = np.squeeze(support.y)
# Get graph information for test
batch_topo_dict = (
self.model.test_graph_topology.batch_to_feed_dict(test.X))
feed_dict = merge_dicts([batch_topo_dict, feed_dict])
# Generate dictionary elements for test
feed_dict[self.test_label_placeholder] = np.squeeze(test.y)
feed_dict[self.test_weight_placeholder] = np.squeeze(test.w)
if add_phase:
feed_dict[self.phase] = training
return feed_dict
def fit(self,
dataset,
n_episodes_per_epoch=1000,
nb_epochs=1,
n_pos=1,
n_neg=9,
log_every_n_samples=10,
**kwargs):
"""Fits model on dataset using cached supports.
For each epcoh, sample n_episodes_per_epoch (support, test) pairs and does
gradient descent.
Parameters
----------
dataset: dc.data.Dataset
Dataset to fit model on.
nb_epochs: int, optional
number of epochs of training.
n_episodes_per_epoch: int, optional
Number of (support, test) pairs to sample and train on per epoch.
n_pos: int, optional
Number of positive examples per support.
n_neg: int, optional
Number of negative examples per support.
log_every_n_samples: int, optional
Displays info every this number of samples
"""
time_start = time.time()
# Perform the optimization
n_tasks = len(dataset.get_task_names())
n_test = self.test_batch_size
feed_total, run_total = 0, 0
for epoch in range(nb_epochs):
# Create different support sets
episode_generator = EpisodeGenerator(dataset, n_pos, n_neg, n_test,
n_episodes_per_epoch)
recent_losses = []
for ind, (task, support, test) in enumerate(episode_generator):
if ind % log_every_n_samples == 0:
print("Epoch %d, Sample %d from task %s" % (epoch, ind, str(task)))
# Get batch to try it out on
feed_start = time.time()
feed_dict = self.construct_feed_dict(test, support)
feed_end = time.time()
feed_total += (feed_end - feed_start)
# Train on support set, batch pair
run_start = time.time()
_, loss = self.sess.run(
[self.train_op, self.loss_op], feed_dict=feed_dict)
run_end = time.time()
run_total += (run_end - run_start)
if ind % log_every_n_samples == 0:
mean_loss = np.mean(np.array(recent_losses))
print("\tmean loss is %s" % str(mean_loss))
recent_losses = []
else:
recent_losses.append(loss)
time_end = time.time()
print("fit took %s seconds" % str(time_end - time_start))
print("feed_total: %s" % str(feed_total))
print("run_total: %s" % str(run_total))
def save(self):
"""Save all models
TODO(rbharath): Saving is not yet supported for this model.
"""
pass
def add_training_loss(self):
"""Adds training loss and scores for network."""
pred, scores = self.get_scores()
losses = tf.nn.sigmoid_cross_entropy_with_logits(
logits=scores, labels=self.test_label_placeholder)
weighted_losses = tf.multiply(losses, self.test_weight_placeholder)
loss = tf.reduce_sum(weighted_losses)
return pred, scores, loss
def get_scores(self):
"""Adds tensor operations for computing scores.
Computes prediction yhat (eqn (1) in Matching networks) of class for test
compounds.
"""
# Get featurization for test
# Shape (n_test, n_feat)
test_feat = self.model.get_test_output()
# Get featurization for support
# Shape (n_support, n_feat)
support_feat = self.model.get_support_output()
# Computes the inner part c() of the kernel
# (the inset equation in section 2.1.1 of Matching networks paper).
# Normalize
if self.similarity == 'cosine':
g = model_ops.cosine_distances(test_feat, support_feat)
else:
raise ValueError("Only cosine similarity is supported.")
# TODO(rbharath): euclidean kernel is broken!
#elif self.similarity == 'euclidean':
# g = model_ops.euclidean_distance(test_feat, support_feat)
# Note that gram matrix g has shape (n_test, n_support)
# soft corresponds to a(xhat, x_i) in eqn (1) of Matching Networks paper
# https://arxiv.org/pdf/1606.04080v1.pdf
# Computes softmax across axis 1, (so sums distances to support set for
# each test entry) to get attention vector
# Shape (n_test, n_support)
attention = tf.nn.softmax(g) # Renormalize
# Weighted sum of support labels
# Shape (n_support, 1)
support_labels = tf.expand_dims(self.support_label_placeholder, 1)
# pred is yhat in eqn (1) of Matching Networks.
# Shape squeeze((n_test, n_support) * (n_support, 1)) = (n_test,)
pred = tf.squeeze(tf.matmul(attention, support_labels), [1])
# Clip softmax probabilities to range [epsilon, 1-epsilon]
# Shape (n_test,)
pred = tf.clip_by_value(pred, 1e-7, 1. - 1e-7)
# Convert to logit space using inverse sigmoid (logit) function
# logit function: log(pred) - log(1-pred)
# Used to invoke tf.nn.sigmoid_cross_entropy_with_logits
# in Cross Entropy calculation.
# Shape (n_test,)
scores = tf.log(pred) - tf.log(tf.constant(1., dtype=tf.float32) - pred)
return pred, scores
def predict(self, support, test):
"""Makes predictions on test given support.
TODO(rbharath): Does not currently support any transforms.
TODO(rbharath): Only for 1 task at a time currently. Is there a better way?
"""
y_preds = []
for (X_batch, y_batch, w_batch, ids_batch) in test.iterbatches(
self.test_batch_size, deterministic=True):
test_batch = NumpyDataset(X_batch, y_batch, w_batch, ids_batch)
y_pred_batch = self.predict_on_batch(support, test_batch)
y_preds.append(y_pred_batch)
y_pred = np.concatenate(y_preds)
return y_pred
def predict_proba(self, support, test):
"""Makes predictions on test given support.
TODO(rbharath): Does not currently support any transforms.
TODO(rbharath): Only for 1 task at a time currently. Is there a better way?
Parameters
----------
support: dc.data.Dataset
The support dataset
test: dc.data.Dataset
The test dataset
"""
y_preds = []
for (X_batch, y_batch, w_batch, ids_batch) in test.iterbatches(
self.test_batch_size, deterministic=True):
test_batch = NumpyDataset(X_batch, y_batch, w_batch, ids_batch)
y_pred_batch = self.predict_proba_on_batch(support, test_batch)
y_preds.append(y_pred_batch)
y_pred = np.concatenate(y_preds)
return y_pred
def predict_on_batch(self, support, test_batch):
"""Make predictions on batch of data."""
n_samples = len(test_batch)
X, y, w, ids = pad_batch(self.test_batch_size, test_batch.X, test_batch.y,
test_batch.w, test_batch.ids)
padded_test_batch = NumpyDataset(X, y, w, ids)
feed_dict = self.construct_feed_dict(padded_test_batch, support)
# Get scores
pred, scores = self.sess.run(
[self.pred_op, self.scores_op], feed_dict=feed_dict)
y_pred_batch = np.round(pred)
# Remove padded elements
y_pred_batch = y_pred_batch[:n_samples]
return y_pred_batch
def predict_proba_on_batch(self, support, test_batch):
"""Make predictions on batch of data."""
n_samples = len(test_batch)
X, y, w, ids = pad_batch(self.test_batch_size, test_batch.X, test_batch.y,
test_batch.w, test_batch.ids)
padded_test_batch = NumpyDataset(X, y, w, ids)
feed_dict = self.construct_feed_dict(padded_test_batch, support)
# Get scores
pred, scores = self.sess.run(
[self.pred_op, self.scores_op], feed_dict=feed_dict)
# pred corresponds to prob(example == 1)
y_pred_batch = np.zeros((n_samples, 2))
# Remove padded elements
pred = pred[:n_samples]
y_pred_batch[:, 1] = pred
y_pred_batch[:, 0] = 1 - pred
return y_pred_batch
def evaluate(self,
dataset,
metric,
n_pos,
n_neg,
n_trials=1000,
exclude_support=True):
"""Evaluate performance on dataset according to metrics
Evaluates the performance of the trained model by sampling supports randomly
for each task in dataset. For each sampled support, the accuracy of the
model with support provided is computed on all data for that task. If
exclude_support is True (by default), the support set is excluded from this
accuracy calculation. exclude_support should be set to false if model's
memorization capacity wants to be evaluated.
Since the accuracy on a task is dependent on the choice of random support,
the evaluation experiment is repeated n_trials times for each task.
(Each task gets n_trials experiments). The computed accuracies
are averaged across trials.
TODO(rbharath): Currently does not support any transformers.
Parameters
----------
dataset: dc.data.Dataset
Dataset to test on.
metrics: dc.metrics.Metric
Evaluation metric.
n_pos: int, optional
Number of positive samples per support.
n_neg: int, optional
Number of negative samples per support.
exclude_support: bool, optional
Whether support set should be excluded when computing model accuracy.
"""
# Get batches
test_tasks = range(len(dataset.get_task_names()))
task_scores = {task: [] for task in test_tasks}
support_generator = SupportGenerator(dataset, n_pos, n_neg, n_trials)
for ind, (task, support) in enumerate(support_generator):
print("Eval sample %d from task %s" % (ind, str(task)))
# TODO(rbharath): Add test for get_task_dataset_minus_support for
# multitask case with missing data...
if exclude_support:
print("Removing support datapoints for eval.")
task_dataset = get_task_dataset_minus_support(dataset, support, task)
else:
print("Keeping support datapoints for eval.")
task_dataset = get_task_dataset(dataset, task)
y_pred = self.predict_proba(support, task_dataset)
task_scores[task].append(
metric.compute_metric(task_dataset.y, y_pred, task_dataset.w))
# Join information for all tasks.
mean_task_scores = {}
std_task_scores = {}
for task in test_tasks:
mean_task_scores[task] = np.mean(np.array(task_scores[task]))
std_task_scores[task] = np.std(np.array(task_scores[task]))
return mean_task_scores, std_task_scores
<file_sep>import tempfile
import unittest
import os
import numpy as np
from deepchem.utils import rdkit_utils
class TestRdkitUtil(unittest.TestCase):
def setUp(self):
# TODO test more formats for ligand
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(
current_dir, '../../feat/tests/data/3ws9_protein_fixer_rdkit.pdb')
self.ligand_file = os.path.join(
current_dir, '../../feat/tests/data/3ws9_ligand.sdf')
def test_load_complex(self):
complexes = rdkit_utils.load_complex(
(self.protein_file, self.ligand_file),
add_hydrogens=False,
calc_charges=False)
assert len(complexes) == 2
def test_load_molecule(self):
# adding hydrogens and charges is tested in dc.utils
from rdkit.Chem.AllChem import Mol
for add_hydrogens in (True, False):
for calc_charges in (True, False):
mol_xyz, mol_rdk = rdkit_utils.load_molecule(
self.ligand_file, add_hydrogens, calc_charges)
num_atoms = mol_rdk.GetNumAtoms()
self.assertIsInstance(mol_xyz, np.ndarray)
self.assertIsInstance(mol_rdk, Mol)
self.assertEqual(mol_xyz.shape, (num_atoms, 3))
def test_get_xyz_from_mol(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
xyz, mol = rdkit_utils.load_molecule(ligand_file,
calc_charges=False,
add_hydrogens=False)
xyz2 = rdkit_utils.get_xyz_from_mol(mol)
equal_array = np.all(xyz == xyz2)
assert equal_array
def test_add_hydrogens_to_mol(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
xyz, mol = rdkit_utils.load_molecule(ligand_file,
calc_charges=False,
add_hydrogens=False)
original_hydrogen_count = 0
for atom_idx in range(mol.GetNumAtoms()):
atom = mol.GetAtoms()[atom_idx]
if atom.GetAtomicNum() == 1:
original_hydrogen_count += 1
assert mol is not None
mol = rdkit_utils.add_hydrogens_to_mol(mol, is_protein=False)
assert mol is not None
after_hydrogen_count = 0
for atom_idx in range(mol.GetNumAtoms()):
atom = mol.GetAtoms()[atom_idx]
if atom.GetAtomicNum() == 1:
after_hydrogen_count += 1
assert after_hydrogen_count >= original_hydrogen_count
def test_apply_pdbfixer(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
xyz, mol = rdkit_utils.load_molecule(ligand_file,
calc_charges=False,
add_hydrogens=False)
original_hydrogen_count = 0
for atom_idx in range(mol.GetNumAtoms()):
atom = mol.GetAtoms()[atom_idx]
if atom.GetAtomicNum() == 1:
original_hydrogen_count += 1
assert mol is not None
mol = rdkit_utils.apply_pdbfixer(mol,
hydrogenate=True,
is_protein=False)
assert mol is not None
after_hydrogen_count = 0
for atom_idx in range(mol.GetNumAtoms()):
atom = mol.GetAtoms()[atom_idx]
if atom.GetAtomicNum() == 1:
after_hydrogen_count += 1
assert after_hydrogen_count >= original_hydrogen_count
def test_compute_charges(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
xyz, mol = rdkit_utils.load_molecule(ligand_file,
calc_charges=False,
add_hydrogens=True)
rdkit_utils.compute_charges(mol)
has_a_charge = False
for atom_idx in range(mol.GetNumAtoms()):
atom = mol.GetAtoms()[atom_idx]
value = atom.GetProp(str("_GasteigerCharge"))
if value != 0:
has_a_charge = True
assert has_a_charge
def test_load_molecule2(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
xyz, mol = rdkit_utils.load_molecule(ligand_file,
calc_charges=False,
add_hydrogens=False)
assert xyz is not None
assert mol is not None
def test_write_molecule(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
xyz, mol = rdkit_utils.load_molecule(ligand_file,
calc_charges=False,
add_hydrogens=False)
with tempfile.TemporaryDirectory() as tmp:
outfile = os.path.join(tmp, "mol.sdf")
rdkit_utils.write_molecule(mol, outfile)
xyz, mol2 = rdkit_utils.load_molecule(outfile,
calc_charges=False,
add_hydrogens=False)
assert mol.GetNumAtoms() == mol2.GetNumAtoms()
for atom_idx in range(mol.GetNumAtoms()):
atom1 = mol.GetAtoms()[atom_idx]
atom2 = mol.GetAtoms()[atom_idx]
assert atom1.GetAtomicNum() == atom2.GetAtomicNum()
def test_merge_molecules_xyz(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
xyz, mol = rdkit_utils.load_molecule(ligand_file,
calc_charges=False,
add_hydrogens=False)
merged = rdkit_utils.merge_molecules_xyz([xyz, xyz])
for i in range(len(xyz)):
first_atom_equal = np.all(xyz[i] == merged[i])
second_atom_equal = np.all(xyz[i] == merged[i + len(xyz)])
assert first_atom_equal
assert second_atom_equal
def test_merge_molecules(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(current_dir,
"../../dock/tests/1jld_ligand.sdf")
xyz, mol = rdkit_utils.load_molecule(ligand_file,
calc_charges=False,
add_hydrogens=False)
num_mol_atoms = mol.GetNumAtoms()
# self.ligand_file is for 3ws9_ligand.sdf
oth_xyz, oth_mol = rdkit_utils.load_molecule(self.ligand_file,
calc_charges=False,
add_hydrogens=False)
num_oth_mol_atoms = oth_mol.GetNumAtoms()
merged = rdkit_utils.merge_molecules([mol, oth_mol])
merged_num_atoms = merged.GetNumAtoms()
assert merged_num_atoms == num_mol_atoms + num_oth_mol_atoms
def test_merge_molecular_fragments(self):
pass
def test_strip_hydrogens(self):
pass
def test_all_shortest_pairs(self):
from rdkit import Chem
mol = Chem.MolFromSmiles("CN=C=O")
valid_dict = {
(0, 1): (0, 1),
(0, 2): (0, 1, 2),
(0, 3): (0, 1, 2, 3),
(1, 2): (1, 2),
(1, 3): (1, 2, 3),
(2, 3): (2, 3)
}
assert rdkit_utils.compute_all_pairs_shortest_path(mol) == valid_dict
def test_pairwise_ring_info(self):
from rdkit import Chem
mol = Chem.MolFromSmiles("c1ccccc1")
predict_dict = rdkit_utils.compute_pairwise_ring_info(mol)
assert all(pair == [(6, True)] for pair in predict_dict.values())
mol = Chem.MolFromSmiles("c1c2ccccc2ccc1")
predict_dict = rdkit_utils.compute_pairwise_ring_info(mol)
assert all(pair == [(6, True)] for pair in predict_dict.values())
mol = Chem.MolFromSmiles("CN=C=O")
predict_dict = rdkit_utils.compute_pairwise_ring_info(mol)
assert not predict_dict
<file_sep>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 30 14:02:04 2017
@author: michael
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from deepchem.nn import activations
from deepchem.nn import initializations
from deepchem.nn import model_ops
# TODO(rbharath): This class does not yet have a
# TensorGraph equivalent, but one may not be required.
# Commented out for now, remove if OK.
#class AlternateWeaveLayer(WeaveLayer):
# """ Alternate implementation of weave module
# same variables, different graph structures
# """
#
# def call(self, x, mask=None):
# """Execute this layer on input tensors.
#
# x = [atom_features, pair_features, pair_split, atom_split, atom_to_pair]
#
# Parameters
# ----------
# x: list
# list of Tensors of form described above.
# mask: bool, optional
# Ignored. Present only to shadow superclass call() method.
#
# Returns
# -------
# A: Tensor
# Tensor of atom_features
# P: Tensor
# Tensor of pair_features
# """
# # Add trainable weights
# self.build()
#
# atom_features = x[0]
# pair_features = x[1]
#
# pair_split = x[2]
# atom_to_pair = x[4]
#
# AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
# AA = self.activation(AA)
# PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
# PA = self.activation(PA)
# PA = tf.segment_sum(PA, pair_split)
#
# A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
# A = self.activation(A)
#
# if self.update_pair:
# AP_ij = tf.matmul(
# tf.reshape(
# tf.gather(atom_features, atom_to_pair),
# [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
# AP_ij = self.activation(AP_ij)
# AP_ji = tf.matmul(
# tf.reshape(
# tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
# [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
# AP_ji = self.activation(AP_ji)
#
# PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
# PP = self.activation(PP)
# P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P
# P = self.activation(P)
# else:
# P = pair_features
#
# return A, P
# TODO(rbharath): This class does not yet have a
# TensorGraph equivalent, but one may not be required.
# Commented out for now, remove if OK.
#class WeaveConcat(Layer):
# """" Concat a batch of molecules into a batch of atoms
# """
#
# def __init__(self,
# batch_size,
# n_atom_input_feat=50,
# n_output=128,
# init='glorot_uniform',
# activation='tanh',
# **kwargs):
# """
# Parameters
# ----------
# batch_size: int
# number of molecules in a batch
# n_atom_input_feat: int, optional
# Number of features for each atom in input.
# n_output: int, optional
# Number of output features for each atom(concatenated)
# init: str, optional
# Weight initialization for filters.
# activation: str, optional
# Activation function applied
#
# """
# self.batch_size = batch_size
# self.n_atom_input_feat = n_atom_input_feat
# self.n_output = n_output
# self.init = initializations.get(init) # Set weight initialization
# self.activation = activations.get(activation) # Get activations
# super(WeaveConcat, self).__init__(**kwargs)
#
# def build(self):
# """"Construct internal trainable weights.
# """
#
# self.W = self.init([self.n_atom_input_feat, self.n_output])
# self.b = model_ops.zeros(shape=[
# self.n_output,
# ])
#
# self.trainable_weights = self.W + self.b
#
# def call(self, x, mask=None):
# """Execute this layer on input tensors.
#
# x = [atom_features, atom_mask]
#
# Parameters
# ----------
# x: list
# Tensors as listed above
# mask: bool, optional
# Ignored. Present only to shadow superclass call() method.
#
# Returns
# -------
# outputs: Tensor
# Tensor of concatenated atom features
# """
# self.build()
# atom_features = x[0]
# atom_masks = x[1]
# A = tf.split(atom_features, self.batch_size, axis=0)
# A_mask = tf.split(
# tf.cast(atom_masks, dtype=tf.bool), self.batch_size, axis=0)
# outputs = tf.concat(
# [tf.boolean_mask(A[i], A_mask[i]) for i in range(len(A))], axis=0)
# outputs = tf.matmul(outputs, self.W) + self.b
# outputs = self.activation(outputs)
# return outputs
# TODO(rbharath): This class does not yet have a
# TensorGraph equivalent, but one may not be required.
# Commented out for now, remove if OK.
#class AlternateWeaveGather(WeaveGather):
# """Alternate implementation of weave gather layer
# corresponding to AlternateWeaveLayer
# """
#
# def call(self, x, mask=None):
# """Execute this layer on input tensors.
#
# x = [atom_features, atom_split]
#
# Parameters
# ----------
# x: list
# Tensors as listed above
# mask: bool, optional
# Ignored. Present only to shadow superclass call() method.
#
# Returns
# -------
# outputs: Tensor
# Tensor of molecular features
# """
# # Add trainable weights
# self.build()
# outputs = x[0]
# atom_split = x[1]
#
# if self.gaussian_expand:
# outputs = self.gaussian_histogram(outputs)
#
# output_molecules = tf.segment_sum(outputs, atom_split)
#
# if self.gaussian_expand:
# output_molecules = tf.matmul(output_molecules, self.W) + self.b
# output_molecules = self.activation(output_molecules)
# return output_molecules
<file_sep># Dataset Examples
This folder countains examples of using DeepChem datasets to do things.
<file_sep>"""
Simple utils to save and load from disk.
"""
import joblib
import gzip
import pickle
import os
import tempfile
import tarfile
import zipfile
import logging
from urllib.request import urlretrieve
from typing import Any, Iterator, List, Optional, Tuple, Union, cast, IO
import pandas as pd
import numpy as np
import deepchem as dc
logger = logging.getLogger(__name__)
def pad_array(x: np.ndarray,
shape: Union[Tuple, int],
fill: float = 0.0,
both: bool = False) -> np.ndarray:
"""
Pad an array with a fill value.
Parameters
----------
x: np.ndarray
A numpy array.
shape: Tuple or int
Desired shape. If int, all dimensions are padded to that size.
fill: float, optional (default 0.0)
The padded value.
both: bool, optional (default False)
If True, split the padding on both sides of each axis. If False,
padding is applied to the end of each axis.
Returns
-------
np.ndarray
A padded numpy array
"""
x = np.asarray(x)
if not isinstance(shape, tuple):
shape = tuple(shape for _ in range(x.ndim))
pad = []
for i in range(x.ndim):
diff = shape[i] - x.shape[i]
assert diff >= 0
if both:
a, b = divmod(diff, 2)
b += a
pad.append((a, b))
else:
pad.append((0, diff))
pad = tuple(pad) # type: ignore
x = np.pad(x, pad, mode='constant', constant_values=fill)
return x
def get_data_dir() -> str:
"""Get the DeepChem data directory.
Returns
-------
str
The default path to store DeepChem data. If you want to
change this path, please set your own path to `DEEPCHEM_DATA_DIR`
as an environment variable.
"""
if 'DEEPCHEM_DATA_DIR' in os.environ:
return os.environ['DEEPCHEM_DATA_DIR']
return tempfile.gettempdir()
def download_url(url: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Download a file to disk.
Parameters
----------
url: str
The URL to download from
dest_dir: str
The directory to save the file in
name: str
The file name to save it as. If omitted, it will try to extract a file name from the URL
"""
if name is None:
name = url
if '?' in name:
name = name[:name.find('?')]
if '/' in name:
name = name[name.rfind('/') + 1:]
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
urlretrieve(url, os.path.join(dest_dir, name))
def untargz_file(file: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Untar and unzip a .tar.gz file to disk.
Parameters
----------
file: str
The filepath to decompress
dest_dir: str
The directory to save the file in
name: str
The file name to save it as. If omitted, it will use the file name
"""
if name is None:
name = file
tar = tarfile.open(name)
tar.extractall(path=dest_dir)
tar.close()
def unzip_file(file: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Unzip a .zip file to disk.
Parameters
----------
file: str
The filepath to decompress
dest_dir: str
The directory to save the file in
name: str
The directory name to unzip it to. If omitted, it will use the file name
"""
if name is None:
name = file
if dest_dir is None:
dest_dir = os.path.join(get_data_dir, name)
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(dest_dir)
class UniversalNamedTemporaryFile:
"""The implementation for cross platform NamedTemporaryFile.
`tempfile.NamedTemporaryFile` causes a permission error on Windows.
This implementation avoids the error, please see threads on the stackoverflow [1]_.
References
----------
.. [1] https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file
"""
def __init__(self, mode='wb', delete=True):
self._mode = mode
self._delete = delete
def __enter__(self):
# Generate a random temporary file name
file_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
# Ensure the file is created
open(file_name, "x").close()
# Open the file in the given mode
self._tempFile = open(file_name, self._mode)
return self._tempFile
def __exit__(self, exc_type, exc_val, exc_tb):
self._tempFile.close()
if self._delete:
os.remove(self._tempFile.name)
def load_image_files(input_files: List[str]) -> np.ndarray:
"""Loads a set of images from disk.
Parameters
----------
input_files: List[str]
List of image filenames.
Returns
-------
np.ndarray
A numpy array that contains loaded images. The shape is, `(N,...)`.
Notes
-----
This method requires Pillow to be installed.
The supported file types are PNG and TIF.
"""
try:
from PIL import Image
except ModuleNotFoundError:
raise ImportError("This function requires Pillow to be installed.")
images = []
for input_file in input_files:
_, extension = os.path.splitext(input_file)
extension = extension.lower()
if extension == ".png":
image = np.array(Image.open(input_file))
images.append(image)
elif extension == ".tif":
im = Image.open(input_file)
imarray = np.array(im)
images.append(imarray)
else:
raise ValueError("Unsupported image filetype for %s" % input_file)
return np.array(images)
def load_sdf_files(input_files: List[str],
clean_mols: bool = True,
tasks: List[str] = [],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load SDF file into dataframe.
Parameters
----------
input_files: List[str]
List of filenames
clean_mols: bool, default True
Whether to sanitize molecules.
tasks: List[str], default []
Each entry in `tasks` is treated as a property in the SDF file and is
retrieved with `mol.GetProp(str(task))` where `mol` is the RDKit mol
loaded from a given SDF entry.
shard_size: int, default None
The shard size to yield at one time.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
df_rows = []
for input_file in input_files:
# Tasks are either in .sdf.csv file or in the .sdf file itself for QM9 dataset
has_csv = os.path.isfile(input_file + ".csv")
# Structures are stored in .sdf file
logger.info("Reading structures from %s." % input_file)
suppl = Chem.SDMolSupplier(str(input_file),
sanitize=clean_mols,
removeHs=False,
strictParsing=False)
for ind, mol in enumerate(suppl):
if mol is None:
continue
smiles = Chem.MolToSmiles(mol)
df_row = [ind, smiles, mol]
if not has_csv: # Get task targets from .sdf file
for task in tasks:
df_row.append(mol.GetProp(str(task)))
conf = mol.GetConformer()
positions = conf.GetPositions()
pos_x, pos_y, pos_z = zip(*positions)
df_row.append(str(pos_x))
df_row.append(str(pos_y))
df_row.append(str(pos_z))
df_rows.append(df_row)
if shard_size is not None and len(df_rows) == shard_size:
if has_csv:
mol_df = pd.DataFrame(df_rows,
columns=('mol_id', 'smiles', 'mol',
'pos_x', 'pos_y', 'pos_z'))
raw_df = next(
load_csv_files([input_file + ".csv"], shard_size=None))
yield pd.concat([mol_df, raw_df], axis=1, join='inner')
else:
# Note: Here, the order of columns is based on the order in which the values
# are appended to `df_row`. Since pos_x, pos_y, pos_z are appended after appending
# tasks above, they occur after `tasks` here.
# FIXME Ideally, we should use something like a dictionary here to keep it independent
# of column ordering.
mol_df = pd.DataFrame(df_rows,
columns=('mol_id', 'smiles', 'mol') +
tuple(tasks) +
('pos_x', 'pos_y', 'pos_z'))
yield mol_df
# Reset aggregator
df_rows = []
# Handle final leftovers for this file
if len(df_rows) > 0:
if has_csv:
mol_df = pd.DataFrame(df_rows,
columns=('mol_id', 'smiles', 'mol',
'pos_x', 'pos_y', 'pos_z'))
raw_df = next(
load_csv_files([input_file + ".csv"], shard_size=None))
yield pd.concat([mol_df, raw_df], axis=1, join='inner')
else:
mol_df = pd.DataFrame(df_rows,
columns=('mol_id', 'smiles', 'mol') +
tuple(tasks) +
('pos_x', 'pos_y', 'pos_z'))
yield mol_df
df_rows = []
def load_csv_files(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load data as pandas dataframe from CSV files.
Parameters
----------
input_files: List[str]
List of filenames
shard_size: int, default None
The shard size to yield at one time.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
"""
# First line of user-specified CSV *must* be header.
shard_num = 1
for input_file in input_files:
if shard_size is None:
yield pd.read_csv(input_file)
else:
logger.info("About to start loading CSV from %s" % input_file)
for df in pd.read_csv(input_file, chunksize=shard_size):
logger.info("Loading shard %d of size %s." %
(shard_num, str(shard_size)))
df = df.replace(np.nan, str(""), regex=True)
shard_num += 1
yield df
def load_json_files(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load data as pandas dataframe.
Parameters
----------
input_files: List[str]
List of json filenames.
shard_size: int, default None
Chunksize for reading json files.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
Notes
-----
To load shards from a json file into a Pandas dataframe, the file
must be originally saved with ``df.to_json('filename.json', orient='records', lines=True)``
"""
shard_num = 1
for input_file in input_files:
if shard_size is None:
yield pd.read_json(input_file, orient='records', lines=True)
else:
logger.info("About to start loading json from %s." % input_file)
for df in pd.read_json(input_file,
orient='records',
chunksize=shard_size,
lines=True):
logger.info("Loading shard %d of size %s." %
(shard_num, str(shard_size)))
df = df.replace(np.nan, str(""), regex=True)
shard_num += 1
yield df
def load_pickle_file(input_file: str) -> Any:
"""Load from single, possibly gzipped, pickle file.
Parameters
----------
input_file: str
The filename of pickle file. This function can load from
gzipped pickle file like `XXXX.pkl.gz`.
Returns
-------
Any
The object which is loaded from the pickle file.
"""
if ".gz" in input_file:
with gzip.open(input_file, "rb") as unzipped_file:
return pickle.load(cast(IO[bytes], unzipped_file))
else:
with open(input_file, "rb") as opened_file:
return pickle.load(opened_file)
def load_pickle_files(input_files: List[str]) -> Iterator[Any]:
"""Load dataset from pickle files.
Parameters
----------
input_files: List[str]
The list of filenames of pickle file. This function can load from
gzipped pickle file like `XXXX.pkl.gz`.
Returns
-------
Iterator[Any]
Generator which yields the objects which is loaded from each pickle file.
"""
for input_file in input_files:
yield load_pickle_file(input_file)
def load_data(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[Any]:
"""Loads data from files.
Parameters
----------
input_files: List[str]
List of filenames.
shard_size: int, default None
Size of shard to yield
Returns
-------
Iterator[Any]
Iterator which iterates over provided files.
Notes
-----
The supported file types are SDF, CSV and Pickle.
"""
if len(input_files) == 0:
raise ValueError("The length of `filenames` must be more than 1.")
file_type = _get_file_type(input_files[0])
if file_type == "sdf":
if shard_size is not None:
logger.info("Ignoring shard_size for sdf input.")
for value in load_sdf_files(input_files):
yield value
elif file_type == "csv":
for value in load_csv_files(input_files, shard_size):
yield value
elif file_type == "pickle":
if shard_size is not None:
logger.info("Ignoring shard_size for pickle input.")
for value in load_pickle_files(input_files):
yield value
def _get_file_type(input_file: str) -> str:
"""Get type of input file. Must be csv/pkl/sdf/joblib file."""
filename, file_extension = os.path.splitext(input_file)
# If gzipped, need to compute extension again
if file_extension == ".gz":
filename, file_extension = os.path.splitext(filename)
if file_extension == ".csv":
return "csv"
elif file_extension == ".pkl":
return "pickle"
elif file_extension == ".joblib":
return "joblib"
elif file_extension == ".sdf":
return "sdf"
else:
raise ValueError("Unrecognized extension %s" % file_extension)
def save_to_disk(dataset: Any, filename: str, compress: int = 3):
"""Save a dataset to file.
Parameters
----------
dataset: str
A data saved
filename: str
Path to save data.
compress: int, default 3
The compress option when dumping joblib file.
"""
if filename.endswith('.joblib'):
joblib.dump(dataset, filename, compress=compress)
elif filename.endswith('.npy'):
np.save(filename, dataset)
else:
raise ValueError("Filename with unsupported extension: %s" % filename)
def load_from_disk(filename: str) -> Any:
"""Load a dataset from file.
Parameters
----------
filename: str
A filename you want to load data.
Returns
-------
Any
A loaded object from file.
"""
name = filename
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
extension = os.path.splitext(name)[1]
if extension == ".pkl":
return load_pickle_file(filename)
elif extension == ".joblib":
return joblib.load(filename)
elif extension == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(filename, header=0)
df = df.replace(np.nan, str(""), regex=True)
return df
elif extension == ".npy":
return np.load(filename, allow_pickle=True)
else:
raise ValueError("Unrecognized filetype for %s" % filename)
def load_dataset_from_disk(
save_dir: str
) -> Tuple[bool, Optional[Tuple["dc.data.DiskDataset", "dc.data.DiskDataset",
"dc.data.DiskDataset"]],
List["dc.trans.Transformer"]]:
"""Loads MoleculeNet train/valid/test/transformers from disk.
Expects that data was saved using `save_dataset_to_disk` below. Expects the
following directory structure for `save_dir`:
save_dir/
|
---> train_dir/
|
---> valid_dir/
|
---> test_dir/
|
---> transformers.pkl
Parameters
----------
save_dir: str
Directory name to load datasets.
Returns
-------
loaded: bool
Whether the load succeeded
all_dataset: Tuple[DiskDataset, DiskDataset, DiskDataset]
The train, valid, test datasets
transformers: Transformer
The transformers used for this dataset
See Also
--------
save_dataset_to_disk
"""
train_dir = os.path.join(save_dir, "train_dir")
valid_dir = os.path.join(save_dir, "valid_dir")
test_dir = os.path.join(save_dir, "test_dir")
if not os.path.exists(train_dir) or not os.path.exists(
valid_dir) or not os.path.exists(test_dir):
return False, None, list()
loaded = True
train = dc.data.DiskDataset(train_dir)
valid = dc.data.DiskDataset(valid_dir)
test = dc.data.DiskDataset(test_dir)
train.memory_cache_size = 40 * (1 << 20) # 40 MB
all_dataset = (train, valid, test)
transformers = load_transformers(save_dir)
return loaded, all_dataset, transformers
def save_dataset_to_disk(save_dir: str, train: "dc.data.DiskDataset",
valid: "dc.data.DiskDataset",
test: "dc.data.DiskDataset",
transformers: List["dc.trans.Transformer"]):
"""Utility used by MoleculeNet to save train/valid/test datasets.
This utility function saves a train/valid/test split of a dataset along
with transformers in the same directory. The saved datasets will take the
following structure:
save_dir/
|
---> train_dir/
|
---> valid_dir/
|
---> test_dir/
|
---> transformers.pkl
Parameters
----------
save_dir: str
Directory name to save datasets to.
train: DiskDataset
Training dataset to save.
valid: DiskDataset
Validation dataset to save.
test: DiskDataset
Test dataset to save.
transformers: List[Transformer]
List of transformers to save to disk.
See Also
--------
load_dataset_from_disk
"""
train_dir = os.path.join(save_dir, "train_dir")
valid_dir = os.path.join(save_dir, "valid_dir")
test_dir = os.path.join(save_dir, "test_dir")
train.move(train_dir)
valid.move(valid_dir)
test.move(test_dir)
save_transformers(save_dir, transformers)
def load_transformers(save_dir: str) -> List["dc.trans.Transformer"]:
"""Load the transformers for a MoleculeNet dataset from disk."""
with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f:
return pickle.load(f)
def save_transformers(save_dir: str,
transformers: List["dc.trans.Transformer"]):
"""Save the transformers for a MoleculeNet dataset to disk."""
with open(os.path.join(save_dir, "transformers.pkl"), 'wb') as f:
pickle.dump(transformers, f)
def remove_missing_entries(dataset):
"""Remove missing entries.
Some of the datasets have missing entries that sneak in as zero'd out
feature vectors. Get rid of them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
logger.info("Shard %d has %d missing entries." %
(i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
<file_sep>import os
import deepchem as dc
import numpy as np
import pytest
try:
import torch
from deepchem.models.torch_models.hf_models import HuggingFaceModel
except ModuleNotFoundError:
pass
@pytest.fixture
def hf_tokenizer(tmpdir):
filepath = os.path.join(tmpdir, 'smiles.txt')
with open(filepath, 'w') as f:
f.write(
'CN(c1ccccc1)c1ccccc1C(=O)NCC1(O)CCOCC1\nCC[NH+](CC)C1CCC([NH2+]C2CC2)(C(=O)[O-])C1\n'
)
f.write(
'COCC(CNC(=O)c1ccc2c(c1)NC(=O)C2)OC\nOCCn1cc(CNc2cccc3c2CCCC3)nn1\n'
)
f.write(
'CCCCCCc1ccc(C#Cc2ccc(C#CC3=CC=C(CCC)CC3)c(C3CCCCC3)c2)c(F)c1\nO=C(NCc1ccc(F)cc1)N1CC=C(c2c[nH]c3ccccc23)CC1\n'
)
from tokenizers import ByteLevelBPETokenizer
from transformers.models.roberta import RobertaTokenizerFast
tokenizer = ByteLevelBPETokenizer()
tokenizer.train(files=filepath,
vocab_size=1_000,
min_frequency=2,
special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>"])
tokenizer_path = os.path.join(tmpdir, 'tokenizer')
os.makedirs(tokenizer_path)
tokenizer.save_model(tokenizer_path)
tokenizer = RobertaTokenizerFast.from_pretrained(tokenizer_path)
return tokenizer
@pytest.mark.torch
def test_pretraining(hf_tokenizer, smiles_regression_dataset):
from deepchem.models.torch_models.hf_models import HuggingFaceModel
from transformers.models.roberta import RobertaConfig, RobertaForMaskedLM
config = RobertaConfig(vocab_size=hf_tokenizer.vocab_size)
model = RobertaForMaskedLM(config)
hf_model = HuggingFaceModel(model=model,
tokenizer=hf_tokenizer,
task='mlm',
device=torch.device('cpu'))
loss = hf_model.fit(smiles_regression_dataset, nb_epoch=1)
assert loss
@pytest.mark.torch
def test_hf_model_regression(hf_tokenizer, smiles_regression_dataset):
from transformers.models.roberta import (RobertaConfig,
RobertaForSequenceClassification)
config = RobertaConfig(vocab_size=hf_tokenizer.vocab_size,
problem_type='regression',
num_labels=1)
model = RobertaForSequenceClassification(config)
hf_model = HuggingFaceModel(model=model,
tokenizer=hf_tokenizer,
task='regression',
device=torch.device('cpu'))
hf_model.fit(smiles_regression_dataset, nb_epoch=1)
result = hf_model.predict(smiles_regression_dataset)
assert result.all()
score = hf_model.evaluate(smiles_regression_dataset,
metrics=dc.metrics.Metric(dc.metrics.mae_score))
assert score
@pytest.mark.torch
def test_hf_model_classification(hf_tokenizer, smiles_regression_dataset):
y = np.random.choice([0, 1], size=smiles_regression_dataset.y.shape)
dataset = dc.data.NumpyDataset(X=smiles_regression_dataset.X,
y=y,
w=smiles_regression_dataset.w,
ids=smiles_regression_dataset.ids)
from transformers import RobertaConfig, RobertaForSequenceClassification
config = RobertaConfig(vocab_size=hf_tokenizer.vocab_size)
model = RobertaForSequenceClassification(config)
hf_model = HuggingFaceModel(model=model,
task='classification',
tokenizer=hf_tokenizer,
device=torch.device('cpu'))
hf_model.fit(dataset, nb_epoch=1)
result = hf_model.predict(dataset)
assert result.all()
score = hf_model.evaluate(dataset,
metrics=dc.metrics.Metric(dc.metrics.f1_score))
assert score
@pytest.mark.torch
def test_load_from_pretrained(tmpdir, hf_tokenizer):
# Create pretrained model
from transformers.models.roberta import (RobertaConfig, RobertaForMaskedLM,
RobertaForSequenceClassification)
config = RobertaConfig(vocab_size=hf_tokenizer.vocab_size)
model = RobertaForMaskedLM(config)
pretrained_model = HuggingFaceModel(model=model,
tokenizer=hf_tokenizer,
task='mlm',
model_dir=tmpdir,
device=torch.device('cpu'))
pretrained_model.save_checkpoint()
# Create finetuning model
config = RobertaConfig(vocab_size=hf_tokenizer.vocab_size,
problem_type='regression',
num_labels=1)
model = RobertaForSequenceClassification(config)
finetune_model = HuggingFaceModel(model=model,
tokenizer=hf_tokenizer,
task='regression',
model_dir=tmpdir,
device=torch.device('cpu'))
# Load pretrained model
finetune_model.load_from_pretrained()
# check weights match
pretrained_model_state_dict = pretrained_model.model.state_dict()
finetune_model_state_dict = finetune_model.model.state_dict()
pretrained_base_model_keys = [
key for key in pretrained_model_state_dict.keys() if 'roberta' in key
]
matches = [
torch.allclose(pretrained_model_state_dict[key],
finetune_model_state_dict[key])
for key in pretrained_base_model_keys
]
assert all(matches)
@pytest.mark.torch
def test_model_save_reload(tmpdir, hf_tokenizer):
from transformers.models.roberta import (RobertaConfig,
RobertaForSequenceClassification)
config = RobertaConfig(vocab_size=hf_tokenizer.vocab_size)
model = RobertaForSequenceClassification(config)
hf_model = HuggingFaceModel(model=model,
tokenizer=hf_tokenizer,
task='classification',
model_dir=tmpdir,
device=torch.device('cpu'))
hf_model._ensure_built()
hf_model.save_checkpoint()
model = RobertaForSequenceClassification(config)
hf_model2 = HuggingFaceModel(model=model,
tokenizer=hf_tokenizer,
task='classification',
model_dir=tmpdir,
device=torch.device('cpu'))
hf_model2.restore()
old_state = hf_model.model.state_dict()
new_state = hf_model2.model.state_dict()
matches = [
torch.allclose(old_state[key], new_state[key])
for key in old_state.keys()
]
# all keys should match
assert all(matches)
@pytest.mark.torch
def test_load_from_hf_checkpoint():
from transformers.models.t5 import T5Config, T5Model
config = T5Config()
model = T5Model(config)
hf_model = HuggingFaceModel(model=model,
tokenizer=None,
task=None,
device=torch.device('cpu'))
old_state_dict = hf_model.model.state_dict()
hf_model_checkpoint = 't5-small'
hf_model.load_from_pretrained(hf_model_checkpoint, from_hf_checkpoint=True)
new_state_dict = hf_model.model.state_dict()
not_matches = [
not torch.allclose(old_state_dict[key], new_state_dict[key])
for key in old_state_dict.keys()
]
# keys should not match
assert all(not_matches)
<file_sep>from typing import List
try:
import torch
import torch.nn as nn
except ModuleNotFoundError:
raise ImportError('The module requires PyTorch to be installed')
from deepchem.models.torch_models.attention import SelfAttention
class GroverReadout(nn.Module):
"""Performs readout on a batch of graph
The readout module is used for performing readouts on batched graphs to
convert node embeddings/edge embeddings into graph embeddings. It is used
in the Grover architecture to generate a graph embedding from node and edge
embeddings. The generate embedding can be used in downstream tasks like graph
classification or graph prediction problems.
Parameters
----------
rtype: str
Readout type, can be 'mean' or 'self-attention'
in_features: int
Size fof input features
attn_hidden_size: int
If readout type is attention, size of hidden layer in attention network.
attn_out_size: int
If readout type is attention, size of attention out layer.
Example
-------
>>> import torch
>>> from deepchem.models.torch_models.readout import GroverReadout
>>> n_nodes, n_features = 6, 32
>>> readout = GroverReadout(rtype="mean")
>>> embedding = torch.ones(n_nodes, n_features)
>>> result = readout(embedding, scope=[(0, 6)])
>>> result.size()
torch.Size([1, 32])
"""
def __init__(self,
rtype: str = 'mean',
in_features: int = 128,
attn_hidden_size: int = 32,
attn_out_size: int = 32):
super(GroverReadout, self).__init__()
self.cached_zero_vector = nn.Parameter(torch.zeros(in_features),
requires_grad=False)
self.rtype = rtype
if rtype == "self_attention":
self.attn = SelfAttention(hidden_size=attn_hidden_size,
in_features=in_features,
out_features=attn_out_size)
def forward(self, graph_embeddings: torch.Tensor,
scope: List[List]) -> torch.Tensor:
"""Given a batch node/edge embedding and a scope list, produce the graph-level embedding by scope.
Parameters
----------
embeddings: torch.Tensor
The embedding matrix, num_nodes x in_features or num_edges x in_features.
scope: List[List]
A list, in which the element is a list [start, range]. `start` is the index,
`range` is the length of scope. (start + range = end)
Returns
----------
graph_embeddings: torch.Tensor
A stacked tensor containing graph embeddings of shape len(scope) x in_features if readout type is mean or len(scope) x attn_out_size when readout type is self-attention.
"""
embeddings: List[torch.Tensor] = []
for _, (a_start, a_size) in enumerate(scope):
if a_size == 0:
embeddings.append(self.cached_zero_vector)
else:
embedding = graph_embeddings.narrow(0, a_start, a_size)
if self.rtype == "self_attention":
embedding, attn = self.attn(embedding)
embedding = embedding.flatten()
elif self.rtype == "mean":
embedding = embedding.sum(dim=0) / a_size
embeddings.append(embedding)
graph_embeddings = torch.stack(embeddings, dim=0)
return graph_embeddings
<file_sep>"""
Contains class for random hyperparameter optimizations.
"""
import numpy as np
import os
import tempfile
import collections
import logging
import itertools
from typing import Dict, List, Optional, Tuple, Any, Callable
from deepchem.data import Dataset
from deepchem.trans import Transformer
from deepchem.models import Model
from deepchem.metrics import Metric
from deepchem.hyper.base_classes import HyperparamOpt
from deepchem.hyper.base_classes import _convert_hyperparam_dict_to_filename
logger = logging.getLogger(__name__)
class RandomHyperparamOpt(HyperparamOpt):
"""
Provides simple random hyperparameter search capabilities.
This class performs a random hyperparameter search over the specified
hyperparameter space. This implementation is simple and samples
hyperparameters from the hyperparameter space passed. However it does not
use parallelization to speed up the search.
Examples
--------
This example shows the type of constructor function expects.
>>> import sklearn
>>> import deepchem as dc
>>> optimizer = dc.hyper.RandomHyperparamOpt(lambda **p: dc.models.GraphConvModel(**p), max_iter=5)
Here's a more sophisticated example that shows how to optimize only
some parameters of a model. In this case, we have some parameters we
want to optimize, and others which we don't. To handle this type of
search, we create a `model_builder` which hard codes some arguments.
>>> import deepchem as dc
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression as LR
>>> # generating data
>>> X = np.arange(1, 11, 1).reshape(-1, 1)
>>> y = np.hstack((np.zeros(5), np.ones(5)))
>>> dataset = dc.data.NumpyDataset(X, y)
>>> # splitting dataset into train and test
>>> splitter = dc.splits.RandomSplitter()
>>> train_dataset, test_dataset = splitter.train_test_split(dataset)
>>> # metric to evaluate result of a set of parameters
>>> metric = dc.metrics.Metric(dc.metrics.accuracy_score)
>>> # defining `model_builder`
>>> def model_builder(**model_params):
... penalty = model_params['penalty']
... solver = model_params['solver']
... lr = LR(penalty=penalty, solver=solver, max_iter=100)
... return dc.models.SklearnModel(lr)
>>> # the parameters which are to be optimized
>>> params = {
... 'penalty': ['l1', 'l2'],
... 'solver': ['liblinear', 'saga']
... }
>>> # Creating optimizer and searching over hyperparameters
>>> optimizer = dc.hyper.RandomHyperparamOpt(model_builder, max_iter=100)
>>> best_model, best_hyperparams, all_results = optimizer.hyperparam_search(params, train_dataset, test_dataset, metric)
>>> best_hyperparams['penalty'], best_hyperparams['solver'] # doctest: +SKIP
('l2', 'saga')
Parameters
----------
model_builder: constructor function.
This parameter must be constructor function which returns an
object which is an instance of `dc.models.Model`. This function
must accept two arguments, `model_params` of type `dict` and
`model_dir`, a string specifying a path to a model directory.
max_iter: int
Maximum number of iterations to perform
"""
def __init__(self, model_builder: Callable[..., Model], max_iter: int):
super(RandomHyperparamOpt, self).__init__(model_builder=model_builder)
self.max_iter = max_iter
def hyperparam_search(
self,
params_dict: Dict,
train_dataset: Dataset,
valid_dataset: Dataset,
metric: Metric,
output_transformers: List[Transformer] = [],
nb_epoch: int = 10,
use_max: bool = True,
logfile: str = 'results.txt',
logdir: Optional[str] = None,
**kwargs,
) -> Tuple[Model, Dict[str, Any], Dict[str, Any]]:
"""Perform random hyperparams search according to `params_dict`.
Each key of the `params_dict` is a model_param. The
values should either be a list of potential values of that hyperparam
or a callable which can generate random samples.
Parameters
----------
params_dict: Dict
Maps each hyperparameter name (string) to either a list of possible
parameter values or a callable which can generate random samples.
train_dataset: Dataset
dataset used for training
valid_dataset: Dataset
dataset used for validation (optimization on valid scores)
metric: Metric
metric used for evaluation
output_transformers: list[Transformer]
Transformers for evaluation. This argument is needed since
`train_dataset` and `valid_dataset` may have been transformed
for learning and need the transform to be inverted before
the metric can be evaluated on a model.
nb_epoch: int, (default 10)
Specifies the number of training epochs during each iteration of optimization.
Not used by all model types.
use_max: bool, optional
If True, return the model with the highest score. Else return
model with the minimum score.
logdir: str, optional
The directory in which to store created models. If not set, will
use a temporary directory.
logfile: str, optional (default `results.txt`)
Name of logfile to write results to. If specified, this is must
be a valid file name. If not specified, results of hyperparameter
search will be written to `logdir/results.txt`.
Returns
-------
Tuple[`best_model`, `best_hyperparams`, `all_scores`]
`(best_model, best_hyperparams, all_scores)` where `best_model` is
an instance of `dc.model.Model`, `best_hyperparams` is a
dictionary of parameters, and `all_scores` is a dictionary mapping
string representations of hyperparameter sets to validation
scores.
"""
# hyperparam_list should either be an Iterable sequence or a random sampler with rvs method
for hyperparam in params_dict.values():
assert isinstance(hyperparam,
collections.abc.Iterable) or callable(hyperparam)
if use_max:
best_validation_score = -np.inf
else:
best_validation_score = np.inf
best_model = None
all_scores = {}
if logdir is not None:
if not os.path.exists(logdir):
os.makedirs(logdir, exist_ok=True)
log_file = os.path.join(logdir, logfile)
hyperparameter_combs = RandomHyperparamOpt.generate_random_hyperparam_values(
params_dict, self.max_iter)
for ind, model_params in enumerate(hyperparameter_combs):
logger.info("Fitting model %d/%d" % (ind + 1, self.max_iter))
logger.info("hyperparameters: %s" % str(model_params))
hp_str = _convert_hyperparam_dict_to_filename(model_params)
if logdir is not None:
model_dir = os.path.join(logdir, hp_str)
logger.info("model_dir is %s" % model_dir)
try:
os.makedirs(model_dir)
except OSError:
if not os.path.isdir(model_dir):
logger.info(
"Error creating model_dir, using tempfile directory"
)
model_dir = tempfile.mkdtemp()
else:
model_dir = tempfile.mkdtemp()
model_params['model_dir'] = model_dir
model = self.model_builder(**model_params)
# mypy test throws error, so ignoring it in try
try:
model.fit(train_dataset, nb_epoch=nb_epoch) # type: ignore
# Not all models have nb_epoch
except TypeError:
model.fit(train_dataset)
try:
model.save()
# Some models autosave
except NotImplementedError:
pass
multitask_scores = model.evaluate(valid_dataset, [metric],
output_transformers)
valid_score = multitask_scores[metric.name]
# Update best validation score so far
if (use_max and valid_score >= best_validation_score) or (
not use_max and valid_score <= best_validation_score):
best_validation_score = valid_score
best_hyperparams = model_params
best_model = model
all_scores[hp_str] = valid_score
# if `hyp_str` not in `all_scores`, store it in `all_scores`
if hp_str not in all_scores:
all_scores[hp_str] = valid_score
logger.info("Model %d/%d, Metric %s, Validation set %s: %f" %
(ind + 1, nb_epoch, metric.name, ind, valid_score))
logger.info("\tbest_validation_score so far: %f" %
best_validation_score)
if best_model is None:
logger.info("No models trained correctly.")
# arbitrarily return last model trained
if logdir is not None:
with open(log_file, 'w+') as f:
f.write(
"No model trained correctly. Arbitary models returned")
best_model, best_hyperparams = model, model_params
return best_model, best_hyperparams, all_scores
multitask_scores = best_model.evaluate(train_dataset, [metric],
output_transformers)
train_score = multitask_scores[metric.name]
logger.info("Best hyperparameters: %s" % str(best_hyperparams))
logger.info("best train_score: %f" % train_score)
logger.info("best validation_score: %f" % best_validation_score)
if logdir is not None:
with open(log_file, 'w+') as f:
f.write("Best Hyperparameters dictionary %s\n" %
str(best_hyperparams))
f.write("Best validation score %f\n" % best_validation_score)
f.write("Best train_score: %f\n" % train_score)
return best_model, best_hyperparams, all_scores
@classmethod
def generate_random_hyperparam_values(cls, params_dict: Dict,
n: int) -> List[Dict[str, Any]]:
"""Generates `n` random hyperparameter combinations of hyperparameter values
Parameters
----------
params_dict: Dict
A dictionary of hyperparameters where parameter which takes discrete
values are specified as iterables and continuous parameters are of
type callables.
n: int
Number of hyperparameter combinations to generate
Returns
-------
A list of generated hyperparameters
Example
-------
>>> from scipy.stats import uniform
>>> from deepchem.hyper import RandomHyperparamOpt
>>> n = 1
>>> params_dict = {'a': [1, 2, 3], 'b': [5, 7, 8], 'c': uniform(10, 5).rvs}
>>> RandomHyperparamOpt.generate_random_hyperparam_values(params_dict, n) # doctest: +SKIP
[{'a': 3, 'b': 7, 'c': 10.619700740985433}]
"""
hyperparam_keys, hyperparam_values = [], []
for key, values in params_dict.items():
if callable(values):
# If callable, sample it for a maximum n times
values = [values() for i in range(n)]
hyperparam_keys.append(key)
hyperparam_values.append(values)
hyperparam_combs = []
for iterable_hyperparam_comb in itertools.product(*hyperparam_values):
hyperparam_comb = list(iterable_hyperparam_comb)
hyperparam_combs.append(hyperparam_comb)
indices = np.random.permutation(len(hyperparam_combs))[:n]
params_subset = []
for index in indices:
param = {}
for key, hyperparam_value in zip(hyperparam_keys,
hyperparam_combs[index]):
param[key] = hyperparam_value
params_subset.append(param)
return params_subset
<file_sep>import logging
try:
import jax.numpy as jnp
import haiku as hk
except:
raise ImportError('These classes require Jax and Haiku to be installed.')
logger = logging.getLogger(__name__)
class Linear(hk.Module):
"""Protein folding specific Linear Module.
This differs from the standard Haiku Linear in a few ways:
* It supports inputs of arbitrary rank
* Initializers are specified by strings
This code is adapted from DeepMind's AlphaFold code release
(https://github.com/deepmind/alphafold).
Examples
--------
>>> import deepchem as dc
>>> import haiku as hk
>>> import jax
>>> import deepchem.models.jax_models.layers
>>> def forward_model(x):
... layer = dc.models.jax_models.layers.Linear(2)
... return layer(x)
>>> f = hk.transform(forward_model)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([8, 28 * 28])
>>> params = f.init(rng, x)
>>> output = f.apply(params, rng, x)
"""
def __init__(self,
num_output: int,
initializer: str = 'linear',
use_bias: bool = True,
bias_init: float = 0.,
name: str = 'linear'):
"""Constructs Linear Module.
Parameters
----------
num_output: int
number of output channels.
initializer: str (default 'linear')
What initializer to use, should be one of {'linear', 'relu', 'zeros'}
use_bias: bool (default True)
Whether to include trainable bias
bias_init: float (default 0)
Value used to initialize bias.
name: str (default 'linear')
name of module, used for name scopes.
"""
super().__init__(name=name)
self.num_output = num_output
self.initializer = initializer
self.use_bias = use_bias
self.bias_init = bias_init
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Connects Module.
Parameters
----------
inputs: jnp.ndarray
Tensor of shape [..., num_channel]
Returns
-------
output of shape [..., num_output]
"""
n_channels = int(inputs.shape[-1])
weight_shape = [n_channels, self.num_output]
if self.initializer == 'linear':
weight_init = hk.initializers.VarianceScaling(mode='fan_in',
scale=1.)
elif self.initializer == 'relu':
weight_init = hk.initializers.VarianceScaling(mode='fan_in',
scale=2.)
elif self.initializer == 'zeros':
weight_init = hk.initializers.Constant(0.0)
weights = hk.get_parameter('weights', weight_shape, inputs.dtype,
weight_init)
# this is equivalent to einsum('...c,cd->...d', inputs, weights)
# but turns out to be slightly faster
inputs = jnp.swapaxes(inputs, -1, -2)
output = jnp.einsum('...cb,cd->...db', inputs, weights)
output = jnp.swapaxes(output, -1, -2)
if self.use_bias:
bias = hk.get_parameter('bias', [self.num_output], inputs.dtype,
hk.initializers.Constant(self.bias_init))
output += bias
return output
<file_sep>import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.feat.molecule_featurizers.atomic_coordinates import AtomicCoordinates
class BPSymmetryFunctionInput(MolecularFeaturizer):
"""Calculate symmetry function for each atom in the molecules
This method is described in [1]_.
Examples
--------
>>> import deepchem as dc
>>> smiles = ['C1C=CC=CC=1']
>>> featurizer = dc.feat.BPSymmetryFunctionInput(max_atoms=10)
>>> features = featurizer.featurize(smiles)
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape # (max_atoms, 4)
(10, 4)
References
----------
.. [1] Behler, Jörg, and <NAME>. "Generalized neural-network
representation of high-dimensional potential-energy surfaces." Physical
review letters 98.14 (2007): 146401.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self, max_atoms: int):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
"""
self.max_atoms = max_atoms
self.coordfeat = AtomicCoordinates(use_bohr=True)
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""Calculate symmetry function.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
A numpy array of symmetry function. The shape is `(max_atoms, 4)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
coordinates = self.coordfeat._featurize(datapoint)
atom_numbers = np.array(
[atom.GetAtomicNum() for atom in datapoint.GetAtoms()])
atom_numbers = np.expand_dims(atom_numbers, axis=1)
assert atom_numbers.shape[0] == coordinates.shape[0]
features = np.concatenate([atom_numbers, coordinates], axis=1)
return pad_array(features, (self.max_atoms, 4))
<file_sep>import pytest
try:
import torch
import torch.nn as nn
except ModuleNotFoundError:
pass
@pytest.mark.torch
def testScaledDotProductAttention():
from deepchem.models import ScaledDotProductAttention as SDPA
attn = SDPA()
x = torch.ones(2, 5)
# Linear layers for making query, key, value
Q, K, V = nn.Parameter(torch.ones(5)), nn.Parameter(
torch.ones(5)), nn.Parameter(torch.ones(5))
query, key, value = Q * x, K * x, V * x
mask = torch.Tensor([1, 0])
x_out, attn_score = attn(query, key, value, mask=mask)
torch.testing.assert_close(x_out, torch.ones(2, 5))
torch.testing.assert_close(attn_score, torch.Tensor([[1, 0], [1, 0]]))
@pytest.mark.torch
def testSelfAttention():
from deepchem.models import SelfAttention as SA
n, in_feature, out_feature = 10, 4, 8
attn = SA(in_feature, out_feature, hidden_size=16)
x = torch.randn((n, in_feature))
x, attn = attn(x)
assert x.size() == (out_feature, in_feature)
<file_sep>from typing import Optional, List, Tuple, Any
import tensorflow as tf
from deepchem.feat.molecule_featurizers.molgan_featurizer import GraphMatrix
from deepchem.models import WGAN
from deepchem.models.layers import MolGANEncoderLayer
from tensorflow import keras
from tensorflow.keras import layers
class BasicMolGANModel(WGAN):
"""
Model for de-novo generation of small molecules based on work of <NAME> et al. [1]_.
It uses a GAN directly on graph data and a reinforcement learning objective to induce the network to generate molecules with certain chemical properties.
Utilizes WGAN infrastructure; uses adjacency matrix and node features as inputs.
Inputs need to be one-hot representation.
Examples
--------
>>>
>> import deepchem as dc
>> from deepchem.models import BasicMolGANModel as MolGAN
>> from deepchem.models.optimizers import ExponentialDecay
>> from tensorflow import one_hot
>> smiles = ['CCC', 'C1=CC=CC=C1', 'CNC' ]
>> # create featurizer
>> feat = dc.feat.MolGanFeaturizer()
>> # featurize molecules
>> features = feat.featurize(smiles)
>> # Remove empty objects
>> features = list(filter(lambda x: x is not None, features))
>> # create model
>> gan = MolGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000))
>> dataset = dc.data.NumpyDataset([x.adjacency_matrix for x in features],[x.node_features for x in features])
>> def iterbatches(epochs):
>> for i in range(epochs):
>> for batch in dataset.iterbatches(batch_size=gan.batch_size, pad_batches=True):
>> adjacency_tensor = one_hot(batch[0], gan.edges)
>> node_tensor = one_hot(batch[1], gan.nodes)
>> yield {gan.data_inputs[0]: adjacency_tensor, gan.data_inputs[1]:node_tensor}
>> gan.fit_gan(iterbatches(8), generator_steps=0.2, checkpoint_interval=5000)
>> generated_data = gan.predict_gan_generator(1000)
>> # convert graphs to RDKitmolecules
>> nmols = feat.defeaturize(generated_data)
>> print("{} molecules generated".format(len(nmols)))
>> # remove invalid moles
>> nmols = list(filter(lambda x: x is not None, nmols))
>> # currently training is unstable so 0 is a common outcome
>> print ("{} valid molecules".format(len(nmols)))
References
----------
.. [1] <NAME> et al. "MolGAN: An implicit generative model
for small molecular graphs", https://arxiv.org/abs/1805.11973
"""
def __init__(self,
edges: int = 5,
vertices: int = 9,
nodes: int = 5,
embedding_dim: int = 10,
dropout_rate: float = 0.0,
**kwargs):
"""
Initialize the model
Parameters
----------
edges: int, default 5
Number of bond types includes BondType.Zero
vertices: int, default 9
Max number of atoms in adjacency and node features matrices
nodes: int, default 5
Number of atom types in node features matrix
embedding_dim: int, default 10
Size of noise input array
dropout_rate: float, default = 0.
Rate of dropout used across whole model
name: str, default ''
Name of the model
"""
self.edges = edges
self.vertices = vertices
self.nodes = nodes
self.embedding_dim = embedding_dim
self.dropout_rate = dropout_rate
super(BasicMolGANModel, self).__init__(**kwargs)
def get_noise_input_shape(self) -> Tuple[int]:
"""
Return shape of the noise input used in generator
Returns
-------
Tuple
Shape of the noise input
"""
return (self.embedding_dim,)
def get_data_input_shapes(self) -> List:
"""
Return input shape of the discriminator
Returns
-------
List
List of shapes used as an input for distriminator.
"""
return [
(self.vertices, self.vertices, self.edges),
(self.vertices, self.nodes),
]
def create_generator(self) -> keras.Model:
"""
Create generator model.
Take noise data as an input and processes it through number of
dense and dropout layers. Then data is converted into two forms
one used for training and other for generation of compounds.
The model has two outputs:
1. edges
2. nodes
The format differs depending on intended use (training or sample generation).
For sample generation use flag, sample_generation=True while calling generator
i.e. gan.generators[0](noise_input, training=False, sample_generation=True).
For training the model, set `sample_generation=False`
"""
return BasicMolGANGenerator(vertices=self.vertices,
edges=self.edges,
nodes=self.nodes,
dropout_rate=self.dropout_rate,
embedding_dim=self.embedding_dim)
def create_discriminator(self) -> keras.Model:
"""
Create discriminator model based on MolGAN layers.
Takes two inputs:
1. adjacency tensor, containing bond information
2. nodes tensor, containing atom information
The input vectors need to be in one-hot encoding format.
Use MolGAN featurizer for that purpose. It will be simplified
in the future release.
"""
adjacency_tensor = layers.Input(shape=(self.vertices, self.vertices,
self.edges))
node_tensor = layers.Input(shape=(self.vertices, self.nodes))
graph = MolGANEncoderLayer(
units=[(128, 64), 128],
dropout_rate=self.dropout_rate,
edges=self.edges)([adjacency_tensor, node_tensor])
dense = layers.Dense(units=128, activation="tanh")(graph)
dense = layers.Dropout(self.dropout_rate)(dense)
dense = layers.Dense(units=64, activation="tanh")(dense)
dense = layers.Dropout(self.dropout_rate)(dense)
output = layers.Dense(units=1)(dense)
return keras.Model(inputs=[
adjacency_tensor,
node_tensor,
],
outputs=[output])
def predict_gan_generator(self,
batch_size: int = 1,
noise_input: Optional[List] = None,
conditional_inputs: List = [],
generator_index: int = 0) -> List[GraphMatrix]:
"""
Use the GAN to generate a batch of samples.
Parameters
----------
batch_size: int
the number of samples to generate. If either noise_input or
conditional_inputs is specified, this argument is ignored since the batch
size is then determined by the size of that argument.
noise_input: array
the value to use for the generator's noise input. If None (the default),
get_noise_batch() is called to generate a random input, so each call will
produce a new set of samples.
conditional_inputs: list of arrays
NOT USED.
the values to use for all conditional inputs. This must be specified if
the GAN has any conditional inputs.
generator_index: int
NOT USED.
the index of the generator (between 0 and n_generators-1) to use for
generating the samples.
Returns
-------
List[GraphMatrix]
Returns a list of GraphMatrix object that can be converted into
RDKit molecules using MolGANFeaturizer defeaturize function.
"""
if noise_input is not None:
batch_size = len(noise_input)
if noise_input is None:
noise_input = self.get_noise_batch(batch_size)
print(f"Generating {batch_size} samples")
adjacency_matrix, nodes_features = self.generators[0](
noise_input, training=False, sample_generation=True)
graphs = [
GraphMatrix(i, j)
for i, j in zip(adjacency_matrix.numpy(), nodes_features.numpy())
]
return graphs
class BasicMolGANGenerator(tf.keras.Model):
"""
Generator class for BasicMolGAN model.
Using subclassing rather than functional API due to requirement
to swap between two outputs depending on situation.
In order to get output that used for sample generation
(conversion to rdkit molecules) pass sample_generation=True argument while
calling the model i.e. adjacency_matrix, nodes_features = self.generators[0](
noise_input, training=False, sample_generation=True)
This is automatically done in predict_gan_generator().
"""
def __init__(self,
vertices: int = 9,
edges: int = 5,
nodes: int = 5,
dropout_rate: float = 0.,
embedding_dim: int = 10,
name: str = "SimpleMolGANGenerator",
**kwargs):
"""
Initialize model.
Parameters
----------
vertices : int, optional
number of max atoms dataset molecules (incl. empty atom), by default 9
edges : int, optional
number of bond types in molecules, by default 5
nodes : int, optional
number of atom types in molecules, by default 5
dropout_rate : float, optional
rate of dropout, by default 0.
embedding_dim : int, optional
noise input dimensions, by default 10
name : str, optional
name of the model, by default "SimpleMolGANGenerator"
"""
super(BasicMolGANGenerator, self).__init__(name=name, **kwargs)
self.vertices = vertices
self.edges = edges
self.nodes = nodes
self.dropout_rate = dropout_rate
self.embedding_dim = embedding_dim
self.dense1 = layers.Dense(128,
activation="tanh",
input_shape=(self.embedding_dim,))
self.dropout1 = layers.Dropout(self.dropout_rate)
self.dense2 = layers.Dense(256, activation="tanh")
self.dropout2 = layers.Dropout(self.dropout_rate)
self.dense3 = layers.Dense(512, activation="tanh")
self.dropout3 = layers.Dropout(self.dropout_rate)
# edges logits used during training
self.edges_dense = layers.Dense(units=self.edges * self.vertices *
self.vertices,
activation=None)
self.edges_reshape = layers.Reshape(
(self.edges, self.vertices, self.vertices))
self.edges_matrix_transpose1 = layers.Permute((1, 3, 2))
self.edges_matrix_transpose2 = layers.Permute((2, 3, 1))
self.edges_dropout = layers.Dropout(self.dropout_rate)
# nodes logits used during training
self.nodes_dense = layers.Dense(units=(self.vertices * self.nodes),
activation=None)
self.nodes_reshape = layers.Reshape((self.vertices, self.nodes))
self.nodes_dropout = layers.Dropout(self.dropout_rate)
def call(self,
inputs: Any,
training: bool = False,
sample_generation: bool = False) -> List[Any]:
"""
Call generator model
Parameters
----------
inputs : Any
List of inputs, typically noise_batch
training : bool, optional
used by dropout layers, by default False
sample_generation : bool, optional
decide which output to use, by default False
Returns
-------
List[Any, Any]
Tensors containing either softmax values for training
or argmax for sample generation (used for creation of rdkit molecules).
"""
x = self.dense1(inputs)
x = self.dropout1(x)
x = self.dense2(x)
x = self.dropout2(x)
x = self.dense3(x)
x = self.dropout3(x)
# edges logits
edges_logits = self.edges_dense(x)
edges_logits = self.edges_reshape(edges_logits)
matrix_transpose = self.edges_matrix_transpose1(edges_logits)
edges_logits = (edges_logits + matrix_transpose) / 2
edges_logits = self.edges_matrix_transpose2(edges_logits)
edges_logits = self.edges_dropout(edges_logits)
# nodes logits
nodes_logits = self.nodes_dense(x)
nodes_logits = self.nodes_reshape(nodes_logits)
nodes_logits = self.nodes_dropout(nodes_logits)
if sample_generation is False:
# training of the model
edges = tf.nn.softmax(edges_logits)
nodes = tf.nn.softmax(nodes_logits)
else:
# generating compounds
e_gumbel_logits = edges_logits - tf.math.log(-tf.math.log(
tf.random.uniform(tf.shape(edges_logits),
dtype=edges_logits.dtype)))
e_gumbel_argmax = tf.one_hot(
tf.argmax(e_gumbel_logits, axis=-1),
depth=e_gumbel_logits.shape[-1],
dtype=e_gumbel_logits.dtype,
)
edges = tf.argmax(e_gumbel_argmax, axis=-1)
# nodes logits used during compound generation
n_gumbel_logits = nodes_logits - tf.math.log(-tf.math.log(
tf.random.uniform(tf.shape(nodes_logits),
dtype=nodes_logits.dtype)))
n_gumbel_argmax = tf.one_hot(
tf.argmax(n_gumbel_logits, axis=-1),
depth=n_gumbel_logits.shape[-1],
dtype=n_gumbel_logits.dtype,
)
nodes = tf.argmax(n_gumbel_argmax, axis=-1)
return [edges, nodes]
<file_sep>import random
import copy
import torch
import numpy as np
from torch_geometric.nn import GINEConv, GCNConv, GATConv, SAGEConv, global_add_pool, global_mean_pool, global_max_pool
from torch_geometric.nn.aggr import AttentionalAggregation, Set2Set
from torch_geometric.nn.inits import uniform
import torch.nn as nn
from torch.functional import F
from deepchem.data import Dataset
from deepchem.models.losses import SoftmaxCrossEntropy, EdgePredictionLoss, GraphNodeMaskingLoss, GraphEdgeMaskingLoss, DeepGraphInfomaxLoss, GraphContextPredLoss
from deepchem.models.torch_models import ModularTorchModel
from deepchem.feat.graph_data import BatchGraphData, GraphData, shortest_path_length
from typing import Iterable, List, Tuple
from deepchem.metrics import to_one_hot
num_node_type = 120 # including the extra mask tokens
num_chirality_tag = 3
num_edge_type = 6 # including aromatic and self-loop edge, and extra masked tokens
class GNN(torch.nn.Module):
"""
GNN module for the GNNModular model.
This module is responsible for the graph neural network layers in the GNNModular model.
Parameters
----------
node_type_embedding: torch.nn.Embedding
Embedding layer for node types.
chirality_embedding: torch.nn.Embedding
Embedding layer for chirality tags.
gconvs: torch.nn.ModuleList
ModuleList of graph convolutional layers.
batch_norms: torch.nn.ModuleList
ModuleList of batch normalization layers.
dropout: int
Dropout probability.
jump_knowledge: str
The type of jump knowledge to use. [1] Must be one of "last", "sum", "max", "concat" or "none".
"last": Use the node representation from the last GNN layer.
"concat": Concatenate the node representations from all GNN layers.
"max": Take the element-wise maximum of the node representations from all GNN layers.
"sum": Take the element-wise sum of the node representations from all GNN layers.
init_emb: bool
Whether to initialize the embedding layers with Xavier uniform initialization.
References
----------
.. [1] <NAME>. et al. Representation Learning on Graphs with Jumping Knowledge Networks. Preprint at https://doi.org/10.48550/arXiv.1806.03536 (2018).
Example
-------
>>> from deepchem.models.torch_models.gnn import GNNModular
>>> from deepchem.feat.graph_data import BatchGraphData
>>> from deepchem.feat.molecule_featurizers import SNAPFeaturizer
>>> featurizer = SNAPFeaturizer()
>>> smiles = ["C1=CC=CC=C1", "C1=CC=CC=C1C=O", "C1=CC=CC=C1C(=O)O"]
>>> features = featurizer.featurize(smiles)
>>> modular = GNNModular(emb_dim = 8, task = "edge_pred")
>>> batched_graph = BatchGraphData(features).numpy_to_torch(device=modular.device)
>>> gnnmodel = modular.gnn
>>> print(gnnmodel(batched_graph)[0].shape)
torch.Size([23, 8])
"""
def __init__(self,
node_type_embedding,
chirality_embedding,
gconvs,
batch_norms,
dropout,
jump_knowledge,
init_emb=False):
super(GNN, self).__init__()
self.node_type_embedding = node_type_embedding
self.chirality_embedding = chirality_embedding
self.gconv = gconvs
self.batch_norms = batch_norms
self.dropout = dropout
self.num_layer = len(gconvs)
self.jump_knowledge = jump_knowledge
# may mess with loading pretrained weights
if init_emb:
torch.nn.init.xavier_uniform_(self.node_type_embedding.weight.data)
torch.nn.init.xavier_uniform_(self.chirality_embedding.weight.data)
def forward(self, data: BatchGraphData):
"""
Forward pass for the GNN module.
Parameters
----------
data: BatchGraphData
Batched graph data.
"""
node_feats = data.node_features[:, 0].long() # type: ignore
chiral_feats = data.node_features[:, 1].long() # type: ignore
node_emb = self.node_type_embedding(node_feats)
chir_emb = self.chirality_embedding(chiral_feats)
x = node_emb + chir_emb
h_list = [x]
for i, conv_layer in enumerate(self.gconv):
if isinstance(conv_layer, (GINEConv, GATConv)):
h = conv_layer(h_list[i], data.edge_index, data.edge_features)
elif isinstance(conv_layer, (GCNConv, SAGEConv)):
h = conv_layer(h_list[i], data.edge_index)
h = self.batch_norms[i](h)
h = F.dropout(F.relu(h), self.dropout, training=self.training)
if i == self.num_layer - 1:
# remove relu for the last layer
h = F.dropout(h, self.dropout, training=self.training)
else:
h = F.dropout(F.relu(h), self.dropout, training=self.training)
h_list.append(h)
if self.jump_knowledge == "concat":
node_representation = torch.cat(h_list, dim=1)
# reshapes node_representation to (num_nodes, num_layers * emb_dim)
elif self.jump_knowledge == "last":
node_representation = h_list[-1]
elif self.jump_knowledge == "max":
h_list = [h.unsqueeze_(0) for h in h_list]
node_representation = torch.max(torch.cat(h_list, dim=0), dim=0)[0]
elif self.jump_knowledge == "sum":
h_list = [h.unsqueeze_(0) for h in h_list]
node_representation = torch.sum(torch.cat(h_list, dim=0), dim=0)[0]
return (node_representation, data)
class GNNHead(torch.nn.Module):
"""
Prediction head module for the GNNModular model.
Parameters
----------
pool: Union[function,torch.nn.Module]
Pooling function or nn.Module to use
head: torch.nn.Module
Prediction head to use
task: str
The type of task. Must be one of "regression", "classification".
num_tasks: int
Number of tasks.
num_classes: int
Number of classes for classification.
"""
def __init__(self, pool, head, task, num_tasks, num_classes):
super().__init__()
self.pool = pool
self.head = head
self.task = task
self.num_tasks = num_tasks
self.num_classes = num_classes
def forward(self, data):
"""
Forward pass for the GNN head module.
Parameters
----------
data: tuple
A tuple containing the node representations and the input graph data.
node_representation is a torch.Tensor created after passing input through the GNN layers.
input_batch is the original input BatchGraphData.
"""
node_representation, input_batch = data
pooled = self.pool(node_representation, input_batch.graph_index)
out = self.head(pooled)
if self.task == "classification":
out = torch.reshape(out, (-1, self.num_tasks, self.num_classes))
return out
class LocalGlobalDiscriminator(nn.Module):
"""
This discriminator module is a linear layer without bias, used to measure the similarity between local node representations (`x`) and global graph representations (`summary`).
The goal of the discriminator is to distinguish between positive and negative pairs of local and global representations.
Examples
--------
>>> import torch
>>> from deepchem.models.torch_models.gnn import LocalGlobalDiscriminator
>>> discriminator = LocalGlobalDiscriminator(hidden_dim=64)
>>> x = torch.randn(32, 64) # Local node representations
>>> summary = torch.randn(32, 64) # Global graph representations
>>> similarity_scores = discriminator(x, summary)
>>> print(similarity_scores.shape)
torch.Size([32])
"""
def __init__(self, hidden_dim):
"""
`self.weight` is a learnable weight matrix of shape `(hidden_dim, hidden_dim)`.
nn.Parameters are tensors that require gradients and are optimized during the training process.
Parameters
----------
hidden_dim : int
The size of the hidden dimension for the weight matrix.
"""
super().__init__()
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
self.reset_parameters()
def reset_parameters(self):
size = self.weight.size(0)
uniform(size, self.weight)
def forward(self, x, summary):
"""
Computes the product of `summary` and `self.weight`, and then calculates the element-wise product of `x` and the resulting matrix `h`.
It then sums over the `hidden_dim` dimension, resulting in a tensor of shape `(batch_size,)`, which represents the similarity scores between the local and global representations.
Parameters
----------
x : torch.Tensor
Local node representations of shape `(batch_size, hidden_dim)`.
summary : torch.Tensor
Global graph representations of shape `(batch_size, hidden_dim)`.
Returns
-------
torch.Tensor
A tensor of shape `(batch_size,)`, representing the similarity scores between the local and global representations.
"""
h = torch.matmul(summary, self.weight)
return torch.sum(x * h, dim=1)
class GNNModular(ModularTorchModel):
"""
Modular GNN which allows for easy swapping of GNN layers.
Parameters
----------
gnn_type: str
The type of GNN layer to use. Must be one of "gin", "gcn", "graphsage", or "gat".
num_layer: int
The number of GNN layers to use.
emb_dim: int
The dimensionality of the node embeddings.
num_tasks: int
The number of tasks.
graph_pooling: str
The type of graph pooling to use. Must be one of "sum", "mean", "max", "attention" or "set2set".
"sum" may cause issues with positive prediction loss.
dropout: float, optional (default 0)
The dropout probability.
jump_knowledge: str, optional (default "last")
The type of jump knowledge to use. [1] Must be one of "last", "sum", "max", or "concat".
"last": Use the node representation from the last GNN layer.
"concat": Concatenate the node representations from all GNN layers. This will increase the dimensionality of the node representations by a factor of `num_layer`.
"max": Take the element-wise maximum of the node representations from all GNN layers.
"sum": Take the element-wise sum of the node representations from all GNN layers. This may cause issues with positive prediction loss.
task: str, optional (default "regression")
The type of task.
Unsupervised tasks:
edge_pred: Edge prediction. Predicts whether an edge exists between two nodes.
mask_nodes: Masking nodes. Predicts the masked node.
mask_edges: Masking edges. Predicts the masked edge.
infomax: Infomax. Maximizes mutual information between local node representations and a pooled global graph representation.
context_pred: Context prediction. Predicts the surrounding context of a node.
Supervised tasks:
"regression" or "classification".
mask_rate: float, optional (default 0.1)
The rate at which to mask nodes or edges for mask_nodes and mask_edges tasks.
mask_edge: bool, optional (default True)
Whether to also mask connecting edges for mask_nodes tasks.
context_size: int, optional (default 1)
The size of the context to use for context prediction tasks.
neighborhood_size: int, optional (default 3)
The size of the neighborhood to use for context prediction tasks.
context_mode: str, optional (default "cbow")
The context mode to use for context prediction tasks. Must be one of "cbow" or "skipgram".
neg_samples: int, optional (default 1)
The number of negative samples to use for context prediction.
Examples
--------
>>> import numpy as np
>>> import deepchem as dc
>>> from deepchem.feat.molecule_featurizers import SNAPFeaturizer
>>> from deepchem.models.torch_models.gnn import GNNModular
>>> featurizer = SNAPFeaturizer()
>>> smiles = ["C1=CC=CC=C1", "C1=CC=CC=C1C=O", "C1=CC=CC=C1C(=O)O"]
>>> features = featurizer.featurize(smiles)
>>> dataset = dc.data.NumpyDataset(features, np.zeros(len(features)))
>>> model = GNNModular(task="edge_pred")
>>> loss = model.fit(dataset, nb_epoch=1)
References
----------
.. [1] <NAME>. et al. Representation Learning on Graphs with Jumping Knowledge Networks. Preprint at https://doi.org/10.48550/arXiv.1806.03536 (2018).
.. [2] <NAME>. et al. Strategies for Pre-training Graph Neural Networks. Preprint at https://doi.org/10.48550/arXiv.1905.12265 (2020).
"""
def __init__(self,
gnn_type: str = "gin",
num_layer: int = 3,
emb_dim: int = 64,
num_tasks: int = 1,
num_classes: int = 2,
graph_pooling: str = "mean",
dropout: int = 0,
jump_knowledge: str = "last",
task: str = "edge_pred",
mask_rate: float = .1,
mask_edge: bool = True,
context_size: int = 1,
neighborhood_size: int = 3,
context_mode: str = "cbow",
neg_samples: int = 1,
**kwargs):
self.gnn_type = gnn_type
self.num_layer = num_layer
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.num_classes = num_classes
if task == "classification":
self.output_dim = num_classes * num_tasks
self.criterion = SoftmaxCrossEntropy()._create_pytorch_loss()
elif task == "regression":
self.output_dim = num_tasks
self.criterion = F.mse_loss
elif task == "edge_pred":
self.output_dim = num_tasks
self.edge_pred_loss = EdgePredictionLoss()._create_pytorch_loss()
elif task == "mask_nodes":
self.mask_rate = mask_rate
self.mask_edge = mask_edge
self.node_mask_loss = GraphNodeMaskingLoss()._create_pytorch_loss(
self.mask_edge)
elif task == "mask_edges":
self.mask_rate = mask_rate
self.edge_mask_loss = GraphEdgeMaskingLoss()._create_pytorch_loss()
elif task == "infomax":
self.infomax_loss = DeepGraphInfomaxLoss()._create_pytorch_loss()
elif task == "context_pred":
self.context_size = context_size
self.neighborhood_size = neighborhood_size
self.neg_samples = neg_samples
self.context_mode = context_mode
self.context_pred_loss = GraphContextPredLoss(
)._create_pytorch_loss(context_mode, neg_samples)
self.graph_pooling = graph_pooling
self.dropout = dropout
self.jump_knowledge = jump_knowledge
self.task = task
self.components = self.build_components()
self.model = self.build_model()
super().__init__(self.model, self.components, **kwargs)
def build_components(self):
"""
Builds the components of the GNNModular model. It initializes the encoders, batch normalization layers, pooling layers, and head layers based on the provided configuration. The method returns a dictionary containing the following components:
Components list, type and description:
--------------------------------------
node_type_embedding: torch.nn.Embedding, an embedding layer for node types.
chirality_embedding: torch.nn.Embedding, an embedding layer for chirality tags.
gconvs: torch_geometric.nn.conv.MessagePassing, a list of graph convolutional layers (encoders) based on the specified GNN type (GIN, GCN, or GAT).
batch_norms: torch.nn.BatchNorm1d, a list of batch normalization layers corresponding to the encoders.
pool: Union[function,torch_geometric.nn.aggr.Aggregation], a pooling layer based on the specified graph pooling type (sum, mean, max, attention, or set2set).
head: nn.Linear, a linear layer for the head of the model.
These components are then used to construct the GNN and GNN_head modules for the GNNModular model.
"""
encoders, batch_norms = self.build_gnn(self.num_layer)
components = {
'node_type_embedding':
torch.nn.Embedding(num_node_type, self.emb_dim),
'chirality_embedding':
torch.nn.Embedding(num_chirality_tag, self.emb_dim),
'gconvs':
encoders,
'batch_norms':
batch_norms
}
self.gnn = GNN(components['node_type_embedding'],
components['chirality_embedding'], components['gconvs'],
components['batch_norms'], self.dropout,
self.jump_knowledge)
if self.task in ("mask_nodes", "mask_edges"):
linear_pred_nodes = torch.nn.Linear(self.emb_dim, num_node_type -
1) # -1 to remove mask token
linear_pred_edges = torch.nn.Linear(self.emb_dim, num_edge_type -
1) # -1 to remove mask token
components.update({
'linear_pred_nodes': linear_pred_nodes,
'linear_pred_edges': linear_pred_edges
})
# for supervised tasks, add prediction head
elif self.task in ("regression", "classification"):
if self.graph_pooling == "sum":
pool = global_add_pool
elif self.graph_pooling == "mean":
pool = global_mean_pool
elif self.graph_pooling == "max":
pool = global_max_pool
elif self.graph_pooling == "attention":
if self.jump_knowledge == "concat":
pool = AttentionalAggregation(
gate_nn=torch.nn.Linear((self.num_layer + 1) *
self.emb_dim, 1))
else:
pool = AttentionalAggregation(
gate_nn=torch.nn.Linear(self.emb_dim, 1))
elif self.graph_pooling == "set2set":
set2setiter = 3
if self.jump_knowledge == "concat":
pool = Set2Set((self.num_layer + 1) * self.emb_dim,
set2setiter)
else:
pool = Set2Set(self.emb_dim, processing_steps=set2setiter)
if self.graph_pooling == "set2set":
mult = 2
else:
mult = 1
if self.jump_knowledge == "concat":
head = torch.nn.Linear(
mult * (self.num_layer + 1) * self.emb_dim, self.output_dim)
else:
head = torch.nn.Linear(mult * self.emb_dim, self.output_dim)
components.update({'pool': pool, 'head': head})
self.gnn_head = GNNHead(components['pool'], components['head'],
self.task, self.num_tasks, self.num_classes)
elif self.task == 'infomax':
descrim = LocalGlobalDiscriminator(self.emb_dim)
components.update({
'discriminator': descrim,
'pool': global_mean_pool
})
elif self.task == 'context_pred':
if self.graph_pooling == "sum":
pool = global_add_pool
elif self.graph_pooling == "mean":
pool = global_mean_pool
elif self.graph_pooling == "max":
pool = global_max_pool
elif self.graph_pooling == "attention":
raise NotImplementedError(
"Attentional pooling not implemented for context prediction task."
)
elif self.graph_pooling == "set2set":
raise NotImplementedError(
"Set2set pooling not implemented for context prediction task."
)
if self.jump_knowledge == "concat": # concat changes the emb_dim
c_gconvs, c_batch_norms = self.build_gnn(self.num_layer)
else:
c_gconvs, c_batch_norms = self.build_gnn(
self.neighborhood_size - self.context_size)
context_gnn_components = {
'c_node_type_embedding':
torch.nn.Embedding(num_node_type, self.emb_dim),
'c_chirality_embedding':
torch.nn.Embedding(num_chirality_tag, self.emb_dim),
'c_gconvs':
c_gconvs,
'c_batch_norms':
c_batch_norms
}
self.context_gnn = GNN(
context_gnn_components['c_node_type_embedding'],
context_gnn_components['c_chirality_embedding'],
context_gnn_components['c_gconvs'],
context_gnn_components['c_batch_norms'], self.dropout,
self.jump_knowledge)
components.update({'pool': pool, **context_gnn_components})
return components
def build_gnn(self, num_layer):
"""
Build graph neural network encoding layers by specifying the number of GNN layers.
Parameters
----------
num_layer : int
The number of GNN layers to be created.
Returns
-------
tuple of (torch.nn.ModuleList, torch.nn.ModuleList)
A tuple containing two ModuleLists:
1. encoders: A ModuleList of GNN layers (currently only GIN is supported).
2. batch_norms: A ModuleList of batch normalization layers corresponding to each GNN layer.
"""
encoders = []
batch_norms = []
for layer in range(num_layer):
if self.gnn_type == "gin":
encoders.append(
GINEConv(
torch.nn.Linear(self.emb_dim, self.emb_dim),
edge_dim=2, # edge type, edge direction
aggr="add"))
elif self.gnn_type == "gcn":
encoders.append(GCNConv(self.emb_dim, self.emb_dim))
elif self.gnn_type == "gat":
encoders.append(GATConv(self.emb_dim, self.emb_dim))
elif self.gnn_type == "sage":
encoders.append(SAGEConv(self.emb_dim, self.emb_dim))
else:
raise ValueError("Unsuppported GNN type.")
batch_norms.append(torch.nn.BatchNorm1d(self.emb_dim))
encoders = torch.nn.ModuleList(encoders)
batch_norms = torch.nn.ModuleList(batch_norms)
return encoders, batch_norms
def build_model(self):
"""
Builds the appropriate model based on the specified task.
For the edge prediction task, the model is simply the GNN module because it is an unsupervised task and does not require a prediction head.
Supervised tasks such as node classification and graph regression require a prediction head, so the model is a sequential module consisting of the GNN module followed by the GNN_head module.
"""
# unsupervised tasks do not need a pred head
if self.task in ("edge_pred", "mask_nodes", "mask_edges", "infomax",
"context_pred"):
return self.gnn
elif self.task in ("regression", "classification"):
return torch.nn.Sequential(self.gnn, self.gnn_head)
else:
raise ValueError(f"Task {self.task} is not supported.")
def loss_func(self, inputs, labels, weights):
"""
The loss function executed in the training loop, which is based on the specified task.
"""
if self.task == "edge_pred":
node_emb, inputs = self.model(inputs)
loss = self.edge_pred_loss(node_emb, inputs)
elif self.task == "mask_nodes":
loss = self.masked_node_loss_loader(inputs)
elif self.task == "mask_edges":
loss = self.masked_edge_loss_loader(inputs)
elif self.task == "infomax":
loss = self.infomax_loss_loader(inputs)
elif self.task == "regression":
loss = self.regression_loss_loader(inputs, labels)
elif self.task == "classification":
loss = self.classification_loss_loader(inputs, labels)
elif self.task == "context_pred":
loss = self.context_pred_loss_loader(inputs)
return (loss * weights).mean()
def regression_loss_loader(self, inputs, labels):
out = self.model(inputs)
reg_loss = self.criterion(out, labels)
return reg_loss
def classification_loss_loader(self, inputs, labels):
out = self.model(inputs)
out = F.softmax(out, dim=2)
class_loss = self.criterion(out, labels)
return class_loss
def masked_node_loss_loader(self, inputs):
"""
Produces the loss between the predicted node features and the true node features for masked nodes. Set mask_edge to True to also predict the edge types for masked edges.
"""
node_emb, inputs = self.model(inputs)
pred_node = self.components['linear_pred_nodes'](
node_emb[inputs.masked_node_indices])
if self.mask_edge:
masked_edge_index = inputs.edge_index[:,
inputs.connected_edge_indices]
edge_rep = node_emb[masked_edge_index[0]] + node_emb[
masked_edge_index[1]]
pred_edge = self.components['linear_pred_edges'](edge_rep)
else:
pred_edge = None
return self.node_mask_loss(pred_node, pred_edge, inputs)
def masked_edge_loss_loader(self, inputs):
"""
Produces the loss between the predicted edge types and the true edge types for masked edges.
"""
node_emb, inputs = self.model(inputs)
masked_edge_index = inputs.edge_index[:, inputs.masked_edge_idx]
edge_emb = node_emb[masked_edge_index[0]] + node_emb[
masked_edge_index[1]]
pred_edge = self.components['linear_pred_edges'](edge_emb)
return self.edge_mask_loss(pred_edge, inputs)
def infomax_loss_loader(self, inputs):
"""
Loss that maximizes mutual information between local node representations and a pooled global graph representation. The positive and negative scores represent the similarity between local node representations and global graph representations of simlar and dissimilar graphs, respectively.
Parameters
----------
inputs: BatchedGraphData
BatchedGraphData object containing the node features, edge indices, and graph indices for the batch of graphs.
"""
node_emb, inputs = self.model(inputs)
summary_emb = torch.sigmoid(self.components['pool'](node_emb,
inputs.graph_index))
positive_expanded_summary_emb = summary_emb[inputs.graph_index]
shifted_summary_emb = summary_emb[cycle_index(len(summary_emb), 1)]
negative_expanded_summary_emb = shifted_summary_emb[inputs.graph_index]
positive_score = self.components['discriminator'](
node_emb, positive_expanded_summary_emb)
negative_score = self.components['discriminator'](
node_emb, negative_expanded_summary_emb)
return self.infomax_loss(positive_score, negative_score)
def context_pred_loss_loader(self, inputs):
"""
Loads the context prediction loss for the given input by taking the batched subgraph and context graphs and computing the context prediction loss for each subgraph and context graph pair.
Parameters
----------
inputs : tuple
A tuple containing the following elements:
- substruct_batch (BatchedGraphData): Batched subgraph, or neighborhood, graphs.
- s_overlap (List[int]): List of overlapping subgraph node indices between the subgraph and context graphs.
- context_graphs (BatchedGraphData): Batched context graphs.
- c_overlap (List[int]): List of overlapping context node indices between the subgraph and context graphs.
- overlap_size (List[int]): List of the number of overlapping nodes between the subgraph and context graphs.
Returns
-------
context_pred_loss : torch.Tensor
The context prediction loss
"""
substruct_batch = inputs[0]
s_overlap = inputs[1]
context_graphs = inputs[2]
c_overlap = inputs[3]
overlap_size = inputs[4]
substruct_rep = self.gnn(substruct_batch)[0][
s_overlap] # 0 for node representation index
overlapped_node_rep = self.context_gnn(context_graphs)[0][
c_overlap] # 0 for node representation index
context_rep = self.components['pool'](overlapped_node_rep, c_overlap)
# negative contexts are obtained by shifting the indicies of context embeddings
neg_context_rep = torch.cat([
context_rep[cycle_index(len(context_rep), i + 1)]
for i in range(self.neg_samples)
],
dim=0)
context_pred_loss = self.context_pred_loss(substruct_rep,
overlapped_node_rep,
context_rep, neg_context_rep,
overlap_size)
return context_pred_loss
def _overlap_batcher(self, substruct_graphs, s_overlap, context_graphs,
c_overlap):
"""
This method provides batching for the context prediction task.
It handles the batching of the overlapping indicies between the subgraph and context graphs.
Parameters
----------
substruct_graphs: BatchedGraphData
Batched subgraph, or neighborhood, graphs.
s_overlap: List[List[int]]
List of lists of overlapping subgraph node indicies between the subgraph and context graphs.
context_graphs: BatchedGraphData
Batched context graphs.
c_overlap: List[List[int]]
List of lists of overlapping context node indicies between the subgraph and context graphs.
Returns
-------
flat_s_overlap: List[int]
List of overlapping subgraph node indicies between the subgraph and context graphs.
flat_c_overlap: List[int]
List of overlapping context node indicies between the subgraph and context graphs.
overlap_size: List[int]
List of the number of overlapping nodes between the subgraph and context graphs.
"""
cumsum_substruct = 0
cumsum_context = 0
for i, (sub, context) in enumerate(zip(substruct_graphs,
context_graphs)):
num_nodes_substruct = len(sub.node_features)
num_nodes_context = len(context.node_features)
s_overlap[i] = [s + cumsum_substruct for s in s_overlap[i]]
c_overlap[i] = [c + cumsum_context for c in c_overlap[i]]
cumsum_substruct += num_nodes_substruct
cumsum_context += num_nodes_context
flat_s_overlap = [item for sublist in s_overlap for item in sublist]
flat_c_overlap = [item for sublist in c_overlap for item in sublist]
overlap_size = [len(s) for s in c_overlap]
return flat_s_overlap, flat_c_overlap, overlap_size
def _prepare_batch(self, batch):
"""
Prepares the batch for the model by converting the GraphData numpy arrays to BatchedGraphData torch tensors and moving them to the device, then transforming the input to the appropriate format for the task.
Parameters
----------
batch: tuple
A tuple containing the inputs, labels, and weights for the batch.
Returns
-------
inputs: BatchGraphData
The inputs for the batch, converted to a BatchGraphData object, moved to the device, and transformed to the appropriate format for the task.
labels: torch.Tensor
The labels for the batch, moved to the device.
weights: torch.Tensor
The weights for the batch, moved to the device.
"""
inputs, labels, weights = batch
if self.task in ("regression", "classification", "infomax"):
inputs = BatchGraphData(inputs[0]).numpy_to_torch(self.device)
if self.task == "edge_pred":
inputs = BatchGraphData(inputs[0]).numpy_to_torch(self.device)
inputs = negative_edge_sampler(inputs)
elif self.task == "mask_nodes":
inputs = BatchGraphData(inputs[0]).numpy_to_torch(self.device)
inputs = mask_nodes(inputs, self.mask_rate)
elif self.task == "mask_edges":
inputs = BatchGraphData(inputs[0]).numpy_to_torch(self.device)
inputs = mask_edges(inputs, self.mask_rate)
elif self.task == "context_pred":
sampled_g = [
context_pred_sampler(graph, self.context_size,
self.neighborhood_size)
for graph in inputs[0]
]
try:
subgraphs_list = [x[0] for x in sampled_g]
s_overlap_list = [x[1] for x in sampled_g]
context_list = [x[2] for x in sampled_g]
c_overlap_list = [x[3] for x in sampled_g]
except ValueError:
raise ValueError(
"Not enough nodes in graph to sample context, use a smaller context or a larger neighborhood size."
)
s_overlap, c_overlap, overlap_size = self._overlap_batcher(
subgraphs_list, s_overlap_list, context_list, c_overlap_list)
s_overlap = torch.tensor(s_overlap).to(self.device)
c_overlap = torch.tensor(c_overlap).to(self.device)
b_subgraphs = BatchGraphData(subgraphs_list).numpy_to_torch(
self.device)
b_context = BatchGraphData(context_list).numpy_to_torch(self.device)
inputs = (b_subgraphs, s_overlap, b_context, c_overlap,
overlap_size)
_, labels, weights = super()._prepare_batch(([], labels, weights))
if (len(labels) != 0) and (len(weights) != 0):
labels = labels[0]
weights = weights[0]
return inputs, labels, weights
def default_generator(
self,
dataset: Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
"""
This default generator is modified from the default generator in dc.models.tensorgraph.tensor_graph.py to support multitask classification. If the task is classification, the labels y_b are converted to a one-hot encoding and reshaped according to the number of tasks and classes.
"""
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if self.task == 'classification' and y_b is not None:
y_b = to_one_hot(y_b.flatten(), self.num_classes).reshape(
-1, self.num_tasks, self.num_classes)
yield ([X_b], [y_b], [w_b])
def negative_edge_sampler(input_graph: BatchGraphData):
"""
NegativeEdge is a function that adds negative edges to the input graph data. It randomly samples negative edges (edges that do not exist in the original graph) and adds them to the input graph data.
The number of negative edges added is equal to half the number of edges in the original graph. This is useful for tasks like edge prediction, where the model needs to learn to differentiate between existing and non-existing edges.
Parameters
----------
input_graph: dc.feat.graph_data.BatchGraphData
The input graph data.
Returns
-------
BatchGraphData
A new BatchGraphData object with the additional attribute `negative_edge_index`.
Example
-------
>>> import numpy as np
>>> import torch
>>> from deepchem.feat.graph_data import BatchGraphData, GraphData
>>> from deepchem.models.torch_models.gnn import negative_edge_sampler
>>> num_nodes_list, num_edge_list = [3, 4, 5], [2, 4, 5]
>>> num_node_features, num_edge_features = 32, 32
>>> edge_index_list = [
... np.array([[0, 1], [1, 2]]),
... np.array([[0, 1, 2, 3], [1, 2, 0, 2]]),
... np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]),
... ]
>>> graph_list = [
... GraphData(node_features=np.random.random_sample(
... (num_nodes_list[i], num_node_features)),
... edge_index=edge_index_list[i],
... edge_features=np.random.random_sample(
... (num_edge_list[i], num_edge_features)),
... node_pos_features=None) for i in range(len(num_edge_list))
... ]
>>> batched_graph = BatchGraphData(graph_list)
>>> batched_graph = batched_graph.numpy_to_torch()
>>> neg_sampled = negative_edge_sampler(batched_graph)
"""
data = copy.deepcopy(input_graph)
num_nodes = data.num_nodes
num_edges = data.num_edges
edge_set = set([
str(data.edge_index[0, i].cpu().item()) + "," +
str(data.edge_index[1, i].cpu().item())
for i in range(data.edge_index.shape[1])
])
redandunt_sample = torch.randint(0, num_nodes, (2, 5 * num_edges))
sampled_ind = []
sampled_edge_set = set([])
for i in range(5 * num_edges):
node1 = redandunt_sample[0, i].cpu().item()
node2 = redandunt_sample[1, i].cpu().item()
edge_str = str(node1) + "," + str(node2)
if edge_str not in edge_set and edge_str not in sampled_edge_set and not node1 == node2:
sampled_edge_set.add(edge_str)
sampled_ind.append(i)
if len(sampled_ind) == num_edges / 2:
break
data.negative_edge_index = redandunt_sample[:, sampled_ind] # type: ignore
return data
def mask_nodes(input_graph: BatchGraphData,
mask_rate,
masked_node_indices=None,
mask_edge=True):
"""
Mask nodes and their connected edges in a BatchGraphData object.
This function assumes that the first node feature is the atomic number, for example with the SNAPFeaturizer. It will set masked nodes' features to 0.
Parameters
----------
input_graph: dc.feat.BatchGraphData
Assume that the edge ordering is the default PyTorch geometric ordering, where the two directions of a single edge occur in pairs.
Eg. data.edge_index = tensor([[0, 1, 1, 2, 2, 3],
[1, 0, 2, 1, 3, 2]])
masked_node_indices: list, optional
If None, then randomly samples num_nodes * mask rate number of node indices. Otherwise, a list of node indices that sets the nodes to be masked (for debugging only).
mask_edge: bool, optional
Will mask the edges connected to the masked nodes.
Returns
-------
data: dc.feat.BatchGraphData
Creates new attributes in the original data object:
- data.mask_node_idx
- data.mask_node_label
- data.mask_edge_idx
- data.mask_edge_label
"""
data = copy.deepcopy(input_graph)
if masked_node_indices is None:
# sample x distinct nodes to be masked, based on mask rate. But
# will sample at least 1 node
num_nodes = data.node_features.size()[0] # type: ignore
sample_size = int(num_nodes * mask_rate + 1)
masked_node_indices = random.sample(range(num_nodes), sample_size)
# create mask node label by copying node feature of mask node
mask_node_labels_list = []
for node_idx in masked_node_indices:
mask_node_labels_list.append(data.node_features[node_idx].view(1, -1))
data.mask_node_label = torch.cat( # type: ignore
mask_node_labels_list, dim=0)[:, 0].long()
data.masked_node_indices = torch.tensor(masked_node_indices) # type: ignore
# modify the original node feature of the masked node
num_node_feats = data.node_features.size()[1] # type: ignore
for node_idx in masked_node_indices:
data.node_features[node_idx] = torch.zeros((1, num_node_feats))
# zeros are meant to represent the masked features. This is distinct from the
# original implementation, where the masked features are represented by the
# the last feature token 119.
# link to source: https://github.com/snap-stanford/pretrain-gnns/blob/08f126ac13623e551a396dd5e511d766f9d4f8ff/chem/util.py#L241
if mask_edge:
# create mask edge labels by copying edge features of edges that are connected to
# mask nodes
connected_edge_indices = []
for edge_idx, (u, v) in enumerate(
data.edge_index.cpu().numpy().T): # type: ignore
for node_idx in masked_node_indices:
if node_idx in set(
(u, v)) and edge_idx not in connected_edge_indices:
connected_edge_indices.append(edge_idx)
if len(connected_edge_indices) > 0:
# create mask edge labels by copying edge features of the edges connected to
# the mask nodes
mask_edge_labels_list = []
for edge_idx in connected_edge_indices[::2]: # because the
# edge ordering is such that two directions of a single
# edge occur in pairs, so to get the unique undirected
# edge indices, we take every 2nd edge index from list
mask_edge_labels_list.append(
data.edge_features[edge_idx].view( # type: ignore
1, -1))
data.mask_edge_label = torch.cat( # type: ignore
mask_edge_labels_list, dim=0)[:, 0].long() # type: ignore
# modify the original edge features of the edges connected to the mask nodes
num_edge_feat = data.edge_features.size()[1] # type: ignore
for edge_idx in connected_edge_indices:
data.edge_features[edge_idx] = torch.zeros( # type: ignore
(1, num_edge_feat)) # type: ignore
# zeros are meant to represent the masked features. This is distinct from the
# original implementation, where the masked features are represented by the
# the last feature token 4.
# link to source: https://github.com/snap-stanford/pretrain-gnns/blob/08f126ac13623e551a396dd5e511d766f9d4f8ff/chem/util.py#L268
data.connected_edge_indices = torch.tensor( # type: ignore
connected_edge_indices[::2])
else:
data.mask_edge_label = torch.empty( # type: ignore
(0, 2)).to(torch.int64)
data.connected_edge_indices = torch.tensor( # type: ignore
connected_edge_indices).to(torch.int64)
return data
def mask_edges(input_graph: BatchGraphData,
mask_rate: float,
masked_edge_indices=None):
"""
Mask edges in a BatchGraphData object.
This is separate from the mask_nodes function because we want to be able to mask edges without masking any nodes.
Parameters
----------
input_graph: dc.feat.BatchGraphData
Assume that the edge ordering is the default PyTorch geometric ordering, where the two directions of a single edge occur in pairs.
Eg. data.edge_index = tensor([[0, 1, 1, 2, 2, 3],
[1, 0, 2, 1, 3, 2]])
masked_edge_indices : list, optional
If None, then randomly sample num_edges * mask_rate + 1 number of edge indices. Otherwise should correspond to the 1st direction of an edge pair. ie all indices should be an even number
Returns
-------
data: dc.feat.BatchGraphData
Creates new attributes in the original object:
- data.mask_edge_idx: indices of masked edges
- data.mask_edge_labels: corresponding ground truth edge feature for each masked edge
- data.edge_attr: modified in place: the edge features (both directions) that correspond to the masked edges have the masked edge feature
"""
data = copy.deepcopy(input_graph)
if masked_edge_indices is None:
# sample x distinct edges to be masked, based on mask rate. But
# will sample at least 1 edge
num_edges = int(data.edge_index.size()[1] / # type: ignore
2) # num unique edges
sample_size = int(num_edges * mask_rate + 1)
# during sampling, we only pick the 1st direction of a particular
# edge pair
masked_edge_indices = [
2 * i for i in random.sample(range(num_edges), sample_size)
]
data.masked_edge_idx = torch.tensor( # type: ignore
np.array(masked_edge_indices))
# create ground truth edge features for the edges that correspond to
# the masked indices
mask_edge_labels_list = []
for idx in masked_edge_indices:
mask_edge_labels_list.append( # yapf: disable
data.edge_features[idx].view( # type: ignore
1, -1))
data.mask_edge_label = torch.cat( # type: ignore
mask_edge_labels_list, dim=0)
# created new masked edge_attr, where both directions of the masked
# edges have masked edge type. For message passing in gcn
# append the 2nd direction of the masked edges
all_masked_edge_indices = masked_edge_indices + [
i + 1 for i in masked_edge_indices
]
num_edge_feat = data.edge_features.size()[1] # type: ignore
for idx in all_masked_edge_indices:
data.edge_features[idx] = torch.zeros( # type: ignore
(1, num_edge_feat))
# zeros are meant to represent the masked features. This is distinct from the
# original implementation, where the masked features are represented by 0s and
# an additional mask feature
# link to source: https://github.com/snap-stanford/pretrain-gnns/blob/08f126ac13623e551a396dd5e511d766f9d4f8ff/bio/util.py#L101
return data
def cycle_index(num, shift):
"""
Creates a 1-dimensional tensor of integers with a specified length (`num`) and a cyclic shift (`shift`). The tensor starts with integers from `shift` to `num - 1`, and then wraps around to include integers from `0` to `shift - 1` at the end.
Parameters
----------
num: int
Length of the tensor.
shift: int
Amount to shift the tensor by.
Example
-------
>>> num = 10
>>> shift = 3
>>> arr = cycle_index(num, shift)
>>> print(arr)
tensor([3, 4, 5, 6, 7, 8, 9, 0, 1, 2])
"""
arr = torch.arange(num) + shift
arr[-shift:] = torch.arange(shift)
return arr
def context_pred_sampler(input_graph: GraphData,
l1: int,
l2: int,
root_idx=None):
"""
Generate subgraph and context graph for context prediction.
This function takes an input graph and generates a subgraph and a context graph for context prediction. The subgraph is the entire input graph, while the context graph is a ring around the root node outside of l1 and inside of l2.
Parameters
----------
input_graph : GraphData
The input graph for which the subgraph and context graph are to be generated.
l1 : int
The inner diameter of the context graph.
l2 : int
The outer diameter of the context graph.
root_idx : int, optional
The index of the root node. If not provided, a random node will be selected as the root.
Returns
-------
subgraph : GraphData
The subgraph generated from the input graph.
s_overlap : list
The indices of overlapping nodes between substruct and context, with respect to the substruct ordering.
context_G : GraphData
The context graph generated from the input graph.
c_overlap : list
The indices of overlapping nodes between substruct and context, with respect to the context ordering.
Examples
--------
>>> import numpy as np
>>> from deepchem.feat.graph_data import GraphData
>>> from deepchem.models.torch_models.gnn import context_pred_sampler
>>> x = np.array([[1, 0], [0, 1], [1, 1], [0, 0], [1, 0], [0, 1], [1, 1], [0, 0]])
>>> edge_index = np.array([[0, 1, 2, 3, 4, 5, 6, 7], [1, 0, 3, 2, 5, 4, 7, 6]])
>>> edge_feats = np.array([[1, 0], [0, 1], [1, 1], [0, 0], [1, 0], [0, 1], [1, 1], [0, 0]])
>>> graph_index = np.array([0, 0, 1, 1, 2, 2, 3, 3])
>>> data = GraphData(node_features=x,
... edge_index=edge_index,
... edge_features=edge_feats,
... graph_index=graph_index)
>>> l1 = 1
>>> l2 = 2
>>> subgraph, s_overlap, context_G, c_overlap = context_pred_sampler(data, l1, l2)
"""
data = copy.deepcopy(input_graph)
root_idx = random.sample(range(data.num_nodes), 1)[0]
# Take the entire graph, but can be modified to take a subgraph of k-hops from the root node
x_substruct = data.node_features
edge_attr_substruct = data.edge_features
edge_index_substruct = data.edge_index
subgraph = GraphData(node_features=x_substruct,
edge_features=edge_attr_substruct,
edge_index=edge_index_substruct)
# Get node idx between root and the inner diameter l1
l1_node_idxes = shortest_path_length(data, root_idx, l1).keys()
# Get node idx between root and the outer diameter l2
l2_node_idxes = shortest_path_length(data, root_idx, l2).keys()
# takes a ring around the root node outside of l1 and inside of l2
context_node_idxes = set(l1_node_idxes).symmetric_difference(
set(l2_node_idxes))
if len(context_node_idxes) > 0:
context_G, context_node_map = data.subgraph(context_node_idxes)
# Get indices of overlapping nodes between substruct and context, WRT context ordering
s_overlap = list(context_node_idxes)
c_overlap = [context_node_map[old_idx] for old_idx in s_overlap]
return (subgraph, s_overlap, context_G, c_overlap)
else:
return (subgraph, [], None, [])
<file_sep>import numpy as np
from deepchem.feat.base_classes import Featurizer
from deepchem.utils import get_partial_charge
from typing import Sequence
class AtomicConformation:
"""This class represents a collection of atoms arranged in 3D space.
An instance of this class may represent any collection of atoms: a molecule,
a fragment of a molecule, multiple interacting molecules, a material, etc.
For each atom it stores a position and a list of scalar properties. Arbitrary
properties are supported, but convenience methods are provided for accessing
certain standard ones: atomic number, formal charge, and partial charge.
Instances of this class are most often created by AtomicConformationFeaturizer.
Attributes
----------
positions: ndarray
the positions of all atoms in Angstroms, stored in an array of shape (N, 3)
where N is the number of atoms
properties: ndarray
the property values for all atoms, stored in an array of shape (N, M) where
N is the number of atoms and M is the number of properties
property_names: ndarray
an array of length M with the names of the properties
"""
def __init__(self, positions: np.ndarray, properties: np.ndarray,
property_names: Sequence[str]):
"""Create an AtomicConformation for a set of atoms.
Parameters
----------
positions: ndarray
the positions of all atoms in Angstroms, stored in an array of shape (N, 3)
where N is the number of atoms
properties: ndarray
the property values for all atoms, stored in an array of shape (N, M) where
N is the number of atoms and M is the number of properties
property_names: Sequence[str]
the names of the properties
"""
self.positions = positions
self.properties = properties
self.property_names = np.array(property_names)
@property
def num_atoms(self) -> int:
"""Get the number of atoms in this object."""
return self.positions.shape[0]
def get_property(self, name: str) -> np.ndarray:
"""Get a column of the properties array corresponding to a particular property.
If there is no property with the specified name, this raises a ValueError.
Parameters
----------
name: str
the name of the property to get
Returns
-------
a numpy array containing the requested column of the properties array. This
is a 1D array of length num_atoms.
"""
indices = np.where(self.property_names == name)[0]
if len(indices) == 0:
raise ValueError("No property called '%s'" % name)
return self.properties[:, indices[0]]
@property
def atomic_number(self) -> np.ndarray:
"""Get the column of the properties array containing atomic numbers.
If there is no property with the name 'atomic number', this raises a ValueError.
Returns
-------
a numpy array containing the requested column of the properties array. This
is a 1D array of length num_atoms.
"""
return self.get_property('atomic number')
@property
def formal_charge(self) -> np.ndarray:
"""Get the column of the properties array containing formal charges.
If there is no property with the name 'formal charge', this raises a ValueError.
Returns
-------
a numpy array containing the requested column of the properties array. This
is a 1D array of length num_atoms.
"""
return self.get_property('formal charge')
@property
def partial_charge(self) -> np.ndarray:
"""Get the column of the properties array containing partial charges.
If there is no property with the name 'partial charge', this raises a ValueError.
Returns
-------
a numpy array containing the requested column of the properties array. This
is a 1D array of length num_atoms.
"""
return self.get_property('partial charge')
class AtomicConformationFeaturizer(Featurizer):
"""This featurizer represents each sample as an AtomicConformation object,
representing a 3D arrangement of atoms.
It expects each datapoint to be a string, which may be either a filename or a
SMILES string. It is processed as follows.
If the string ends in .pdb, .sdf, or .mol2, it is assumed to be a file in the
corresponding format. The positions and elements of all atoms contained in
the file are loaded. RDKit is used to compute formal and partial charges.
Otherwise, it is assumed to be a SMILES string. RDKit is used to generate a
3D conformation and to compute formal and partial charges.
Examples
--------
>>> import deepchem as dc
>>> smiles = ['CCC']
>>> featurizer = dc.feat.AtomicConformationFeaturizer()
>>> features = featurizer.featurize(smiles)
>>> features[0].num_atoms
11
>>> sum(features[0].atomic_number == 6)
3
>>> sum(features[0].atomic_number == 1)
8
>>> type(features[0].formal_charge)
<class 'numpy.ndarray'>
>>> features[0].formal_charge.shape
(11,)
>>> type(features[0].partial_charge)
<class 'numpy.ndarray'>
>>> features[0].partial_charge.shape
(11,)
"""
def _featurize(self, datapoint: str, **kwargs) -> AtomicConformation:
"""Calculate features for a single datapoint.
Parameters
----------
datapoint: str
This is expected to be either a filename or a SMILES string.
"""
from rdkit import Chem
from rdkit.Chem import AllChem
if datapoint.endswith('.pdb'):
mols = [Chem.MolFromPDBFile(datapoint, removeHs=False)]
elif datapoint.endswith('.sdf'):
supplier = Chem.SDMolSupplier(datapoint, removeHs=False)
mols = [mol for mol in supplier]
elif datapoint.endswith('.mol2'):
mols = [Chem.MolFromMol2File(datapoint, removeHs=False)]
else:
mol = Chem.MolFromSmiles(datapoint)
# SMILES is unique, so set a canonical order of atoms
new_order = Chem.rdmolfiles.CanonicalRankAtoms(mol)
mol = Chem.rdmolops.RenumberAtoms(mol, new_order)
# Add hydrogens and generate a conformation.
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDGv3())
mols = [mol]
# Record properties of the molecules.
positions = []
properties = []
for mol in mols:
positions.append(mol.GetConformer(0).GetPositions())
AllChem.ComputeGasteigerCharges(mol)
for atom in mol.GetAtoms():
atomic_num = atom.GetAtomicNum()
formal_charge = atom.GetFormalCharge()
partial_charge = get_partial_charge(atom)
properties.append([atomic_num, formal_charge, partial_charge])
# Create the output object.
names = ['atomic number', 'formal charge', 'partial charge']
return AtomicConformation(
np.concatenate(positions).astype(np.float32),
np.array(properties, dtype=np.float32), names)
<file_sep>"""Tensorflow Ops for Atomicnet."""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
### AtomicNet fully-connected layer ops ###
def InitializeWeightsBiases(prev_layer_size,
size,
weights=None,
biases=None,
name=None):
"""Initializes weights and biases to be used in a fully-connected layer.
Parameters
----------
prev_layer_size: int
Number of features in previous layer.
size: int
Number of nodes in this layer.
weights: tf.Tensor, optional (Default None)
Weight tensor.
biases: tf.Tensor, optional (Default None)
Bias tensor.
name: str
Name for this op, optional (Defaults to 'fully_connected' if None)
Returns
-------
weights: tf.Variable
Initialized weights.
biases: tf.Variable
Initialized biases.
"""
if weights is None:
weights = tf.truncated_normal([prev_layer_size, size], stddev=0.01)
if biases is None:
biases = tf.zeros([size])
with tf.name_scope(name, 'fully_connected', [weights, biases]):
w = tf.Variable(weights, name='w')
b = tf.Variable(biases, name='b')
return w, b
def AtomicNNLayer(tensor, size, weights, biases, name=None):
"""Fully connected layer with pre-initialized weights and biases.
Parameters
----------
tensor: tf.Tensor
Input tensor.
size: int
Number of nodes in this layer.
weights: tf.Variable
Initialized weights.
biases: tf.Variable
Initialized biases.
name: str
Name for this op, optional (Defaults to 'fully_connected' if None)
Returns
-------
retval: tf.Tensor
A new tensor representing the output of the fully connected layer.
Raises
------
ValueError: If input tensor is not 2D.
"""
if len(tensor.get_shape()) != 2:
raise ValueError(
'Dense layer input must be 2D, not %dD' % len(tensor.get_shape()))
with tf.name_scope(name, 'fully_connected', [tensor, weights, biases]):
return tf.nn.xw_plus_b(tensor, weights, biases)
### Atomicnet coordinate transform ops ###
def gather_neighbors(X, nbr_indices, B, N, M, d):
"""Gathers the neighbor subsets of the atoms in X.
B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features
Parameters
----------
X: tf.Tensor of shape (B, N, d)
Coordinates/features tensor.
atom_indices: tf.Tensor of shape (B, M)
Neighbor list for single atom.
Returns
-------
neighbors: tf.Tensor of shape (B, M, d)
Neighbor coordinates/features tensor for single atom.
"""
example_tensors = tf.unstack(X, axis=0)
example_nbrs = tf.unstack(nbr_indices, axis=0)
all_nbr_coords = []
for example, (example_tensor, example_nbr) in enumerate(
zip(example_tensors, example_nbrs)):
nbr_coords = tf.gather(example_tensor, example_nbr)
all_nbr_coords.append(nbr_coords)
neighbors = tf.stack(all_nbr_coords)
return neighbors
def DistanceTensor(X, Nbrs, boxsize, B, N, M, d):
"""Calculates distance tensor for batch of molecules.
B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features
Parameters
----------
X: tf.Tensor of shape (B, N, d)
Coordinates/features tensor.
Nbrs: tf.Tensor of shape (B, N, M)
Neighbor list tensor.
boxsize: float or None
Simulation box length [Angstrom].
Returns
-------
D: tf.Tensor of shape (B, N, M, d)
Coordinates/features distance tensor.
"""
atom_tensors = tf.unstack(X, axis=1)
nbr_tensors = tf.unstack(Nbrs, axis=1)
D = []
if boxsize is not None:
for atom, atom_tensor in enumerate(atom_tensors):
nbrs = gather_neighbors(X, nbr_tensors[atom], B, N, M, d)
nbrs_tensors = tf.unstack(nbrs, axis=1)
for nbr, nbr_tensor in enumerate(nbrs_tensors):
_D = tf.subtract(nbr_tensor, atom_tensor)
_D = tf.subtract(_D, boxsize * tf.round(tf.math.divide(_D, boxsize)))
D.append(_D)
else:
for atom, atom_tensor in enumerate(atom_tensors):
nbrs = gather_neighbors(X, nbr_tensors[atom], B, N, M, d)
nbrs_tensors = tf.unstack(nbrs, axis=1)
for nbr, nbr_tensor in enumerate(nbrs_tensors):
_D = tf.subtract(nbr_tensor, atom_tensor)
D.append(_D)
D = tf.stack(D)
D = tf.transpose(D, perm=[1, 0, 2])
D = tf.reshape(D, [B, N, M, d])
return D
def DistanceMatrix(D):
"""Calcuates the distance matrix from the distance tensor
B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features
Parameters
----------
D: tf.Tensor of shape (B, N, M, d)
Distance tensor.
Returns
-------
R: tf.Tensor of shape (B, N, M)
Distance matrix.
"""
R = tf.reduce_sum(tf.multiply(D, D), 3)
R = tf.sqrt(R)
return R
### Atomicnet symmetry function kernel ops ###
def GaussianDistanceMatrix(R, rs, e):
"""Calculates gaussian distance matrix.
B = batch_size, N = max_num_atoms, M = max_num_neighbors
Parameters
----------
R [B, N, M]: tf.Tensor
Distance matrix.
rs: tf.Variable
Gaussian distance matrix mean.
e: tf.Variable
Gaussian distance matrix width (e = .5/std**2).
Returns
-------
retval [B, N, M]: tf.Tensor
Gaussian distance matrix.
"""
return tf.exp(-e * (R - rs)**2)
def RadialCutoff(R, rc):
"""Calculates radial cutoff matrix.
B = batch_size, N = max_num_atoms, M = max_num_neighbors
Parameters
----------
R [B, N, M]: tf.Tensor
Distance matrix.
rc: tf.Variable
Interaction cutoff [Angstrom].
Returns
-------
FC [B, N, M]: tf.Tensor
Radial cutoff matrix.
"""
T = 0.5 * (tf.cos(np.pi * R / (rc)) + 1)
E = tf.zeros_like(T)
cond = tf.less_equal(R, rc)
FC = tf.where(cond, T, E)
return FC
### Atomicnet symmetry function ops ###
def RadialSymmetryFunction(R, rc, rs, e):
"""Calculates radial symmetry function.
B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_filters
Parameters
----------
R: tf.Tensor of shape (B, N, M)
Distance matrix.
rc: float
Interaction cutoff [Angstrom].
rs: float
Gaussian distance matrix mean.
e: float
Gaussian distance matrix width.
Returns
-------
retval: tf.Tensor of shape (B, N, M)
Radial symmetry function (before summation)
"""
with tf.name_scope(None, "NbrRadialSymmetryFunction", [rc, rs, e]):
rc = tf.Variable(rc)
rs = tf.Variable(rs)
e = tf.Variable(e)
K = GaussianDistanceMatrix(R, rs, e)
FC = RadialCutoff(R, rc)
return tf.multiply(K, FC)
### Atomcnet symmetry function layer ops ###
def AtomicConvolutionLayer(X, Nbrs, Nbrs_Z, atom_types, radial_params, boxsize,
B, N, M, d):
"""Atomic convoluation layer
N = max_num_atoms, M = max_num_neighbors, B = batch_size, d = num_features
l = num_radial_filters * num_atom_types
Parameters
----------
X: tf.Tensor of shape (B, N, d)
Coordinates/features.
Nbrs: tf.Tensor of shape (B, N, M)
Neighbor list.
Nbrs_Z: tf.Tensor of shape (B, N, M)
Atomic numbers of neighbor atoms.
atom_types: list or None
Of length a, where a is number of atom types for filtering.
radial_params: list
Of length l, where l is number of radial filters learned.
boxsize: float or None
Simulation box length [Angstrom].
N: int
Maximum number of atoms
M: int
Maximum number of neighbors
d: int
Number of coordinates/features/filters
Returns
-------
layer: tf.Tensor of shape (l, B, N)
A new tensor representing the output of the atomic conv layer
"""
D = DistanceTensor(X, Nbrs, boxsize, B, N, M, d)
R = DistanceMatrix(D)
sym = []
rsf_zeros = tf.zeros((B, N, M))
for param in radial_params:
# We apply the radial pooling filter before atom type conv
# to reduce computation
rsf = RadialSymmetryFunction(R, *param)
if not atom_types:
cond = tf.not_equal(Nbrs_Z, 0.0)
sym.append(tf.reduce_sum(tf.where(cond, rsf, rsf_zeros), 2))
else:
for j in range(len(atom_types)):
cond = tf.equal(Nbrs_Z, atom_types[j])
sym.append(tf.reduce_sum(tf.where(cond, rsf, rsf_zeros), 2))
layer = tf.stack(sym)
layer = tf.transpose(layer, [1, 2, 0])
m, v = tf.nn.moments(layer, axes=[0])
layer = tf.nn.batch_normalization(layer, m, v, None, None, 1e-3)
return layer
### Misc convenience ops ###
def create_symmetry_parameters(radial):
rp = []
for _, r0 in enumerate(radial[0]):
for _, r1 in enumerate(radial[1]):
for _, r2 in enumerate(radial[2]):
rp.append([r0, r1, r2])
return rp
<file_sep>"""
Script to train DeepMHC model on the BD2013 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from bd13_datasets import aa_charset, load_bd2013_human
from deepmhc import DeepMHC
import numpy as np
import deepchem as dc
from scipy.stats import spearmanr
# Args
pad_len = 13
num_amino_acids = len(aa_charset)
mhc_allele = "HLA-A*02:01"
dropout_p = 0.5
batch_size = 64
nb_epochs = 100
tasks, datasets, transformers = load_bd2013_human(
mhc_allele=mhc_allele, seq_len=9, pad_len=pad_len)
metric = dc.metrics.Metric(metric=dc.metrics.pearsonr, mode="regression")
train_dataset, _, test_dataset = datasets
model = DeepMHC(num_amino_acids=num_amino_acids, pad_length=pad_len)
model.fit(train_dataset, nb_epoch=nb_epochs)
print("Evaluating models...")
train_scores = model.evaluate(train_dataset, [metric], transformers)
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Train scores")
print(train_scores['pearsonr'])
print("Test scores")
print(test_scores['pearsonr'])
<file_sep>from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.utils.molecule_feature_utils import one_hot_encode
from deepchem.utils.typing import RDKitMol, RDKitAtom
import numpy as np
from typing import Tuple, Any
from dataclasses import dataclass
@dataclass
class MATEncoding:
"""
Dataclass specific to the Molecular Attention Transformer [1]_.
This dataclass class wraps around three different matrices for a given molecule: Node Features, Adjacency Matrix, and the Distance Matrix.
Parameters
----------
node_features: np.ndarray
Node Features matrix for the molecule. For MAT, derived from the construct_node_features_matrix function.
adjacency_matrix: np.ndarray
Adjacency matrix for the molecule. Derived from rdkit.Chem.rdmolops.GetAdjacencyMatrix
distance_matrix: np.ndarray
Distance matrix for the molecule. Derived from rdkit.Chem.rdmolops.GetDistanceMatrix
"""
node_features: np.ndarray
adjacency_matrix: np.ndarray
distance_matrix: np.ndarray
class MATFeaturizer(MolecularFeaturizer):
"""
This class is a featurizer for the Molecule Attention Transformer [1]_.
The returned value is a numpy array which consists of molecular graph descriptions:
- Node Features
- Adjacency Matrix
- Distance Matrix
References
---------
.. [1] <NAME> et al. "Molecule Attention Transformer`<https://arxiv.org/abs/2002.08264>`"
Examples
--------
>>> import deepchem as dc
>>> feat = dc.feat.MATFeaturizer()
>>> out = feat.featurize("CCC")
Note
----
This class requires RDKit to be installed.
"""
def __init__(self):
pass
def construct_mol(self, mol: RDKitMol) -> RDKitMol:
"""
Processes an input RDKitMol further to be able to extract id-specific Conformers from it using mol.GetConformer().
Parameters
----------
mol: RDKitMol
RDKit Mol object.
Returns
----------
mol: RDKitMol
A processed RDKitMol object which is embedded, UFF Optimized and has Hydrogen atoms removed. If the former conditions are not met and there is a value error, then 2D Coordinates are computed instead.
"""
try:
from rdkit.Chem import AllChem
from rdkit import Chem
except ModuleNotFoundError:
pass
try:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, maxAttempts=5000)
AllChem.UFFOptimizeMolecule(mol)
mol = Chem.RemoveHs(mol)
except ValueError:
AllChem.Compute2DCoords(mol)
return mol
def atom_features(self, atom: RDKitAtom) -> np.ndarray:
"""Deepchem already contains an atom_features function, however we are defining a new one here due to the need to handle features specific to MAT.
Since we need new features like Atom GetNeighbors and IsInRing, and the number of features required for MAT is a fraction of what the Deepchem atom_features function computes, we can speed up computation by defining a custom function.
Parameters
----------
atom: RDKitAtom
RDKit Atom object.
Returns
----------
ndarray
Numpy array containing atom features.
"""
attrib = []
attrib += one_hot_encode(atom.GetAtomicNum(),
[5, 6, 7, 8, 9, 15, 16, 17, 35, 53, 999])
attrib += one_hot_encode(len(atom.GetNeighbors()), [0, 1, 2, 3, 4, 5])
attrib += one_hot_encode(atom.GetTotalNumHs(), [0, 1, 2, 3, 4])
attrib += one_hot_encode(atom.GetFormalCharge(),
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
attrib.append(atom.IsInRing())
attrib.append(atom.GetIsAromatic())
return np.array(attrib, dtype=np.float32)
def construct_node_features_matrix(self, mol: RDKitMol) -> np.ndarray:
"""This function constructs a matrix of atom features for all atoms in a given molecule using the atom_features function.
Parameters
----------
mol: RDKitMol
RDKit Mol object.
Returns
----------
Atom_features: ndarray
Numpy array containing atom features.
"""
return np.array([self.atom_features(atom) for atom in mol.GetAtoms()])
def _add_dummy_node(
self, node_features: np.ndarray, adj_matrix: np.ndarray,
dist_matrix: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Adds a single dummy node to the molecule, which is consequently reflected in the Node Features Matrix, Adjacency Matrix and the Distance Matrix.
Parameters
----------
node_features: np.ndarray
Node Features matrix for a given molecule.
adjacency_matrix: np.ndarray
Adjacency matrix for a given molecule.
distance_matrix: np.ndarray
Distance matrix for a given molecule.
Returns
----------
Atom_features: Tuple[np.ndarray, np.ndarray, np.ndarray]
A tuple containing three numpy arrays: node_features, adjacency_matrix, distance_matrix.
"""
if node_features is not None:
m = np.zeros(
(node_features.shape[0] + 1, node_features.shape[1] + 1))
m[1:, 1:] = node_features
m[0, 0] = 1.0
node_features = m
if adj_matrix is not None:
m = np.zeros((adj_matrix.shape[0] + 1, adj_matrix.shape[1] + 1))
m[1:, 1:] = adj_matrix
adj_matrix = m
if dist_matrix is not None:
m = np.full((dist_matrix.shape[0] + 1, dist_matrix.shape[1] + 1),
1e6)
m[1:, 1:] = dist_matrix
dist_matrix = m
return node_features, adj_matrix, dist_matrix
def _pad_array(self, array: np.ndarray, shape: Any) -> np.ndarray:
"""Pads an array to the desired shape.
Parameters
----------
array: np.ndarray
Array to be padded.
shape: int or Tuple
Shape the array is padded to.
Returns
----------
array: np.ndarray
Array padded to input shape.
"""
result = np.zeros(shape=shape)
slices = tuple(slice(s) for s in array.shape)
result[slices] = array
return result
def _pad_sequence(self, sequence: np.ndarray) -> np.ndarray:
"""Pads a given sequence using the pad_array function.
Parameters
----------
sequence: np.ndarray
Arrays in this sequence are padded to the largest shape in the sequence.
Returns
----------
array: np.ndarray
Sequence with padded arrays.
"""
shapes = np.stack([np.array(t.shape) for t in sequence])
max_shape = tuple(np.max(shapes, axis=0))
return np.stack([self._pad_array(t, shape=max_shape) for t in sequence])
def _featurize(self, datapoint: RDKitMol, **kwargs) -> MATEncoding:
"""
Featurize the molecule.
Parameters
----------
datapoint: RDKitMol
RDKit mol object.
Returns
-------
MATEncoding
A MATEncoding dataclass instance consisting of processed node_features, adjacency_matrix and distance_matrix.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
from rdkit import Chem
datapoint = self.construct_mol(datapoint)
node_features = self.construct_node_features_matrix(datapoint)
adjacency_matrix = Chem.GetAdjacencyMatrix(datapoint)
distance_matrix = Chem.GetDistanceMatrix(datapoint)
node_features, adjacency_matrix, distance_matrix = self._add_dummy_node(
node_features, adjacency_matrix, distance_matrix)
node_features = self._pad_sequence(node_features)
adjacency_matrix = self._pad_sequence(adjacency_matrix)
distance_matrix = self._pad_sequence(distance_matrix)
return MATEncoding(node_features, adjacency_matrix, distance_matrix)
<file_sep>import unittest
import pytest
import numpy as np
from flaky import flaky
try:
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, GRU, Reshape, Softmax
has_tensorflow = True
except:
has_tensorflow = False
import deepchem as dc
from deepchem.models.optimizers import Adam, PolynomialDecay
class TestA2C(unittest.TestCase):
@flaky
@pytest.mark.tensorflow
def test_roulette(self):
"""Test training a policy for the roulette environment."""
# This is modeled after the Roulette-v0 environment from OpenAI Gym.
# The player can bet on any number from 0 to 36, or walk away (which ends the
# game). The average reward for any bet is slightly negative, so the best
# strategy is to walk away.
class RouletteEnvironment(dc.rl.Environment):
def __init__(self):
super(RouletteEnvironment, self).__init__([(1,)], 38)
self._state = [np.array([0])]
def step(self, action):
if action == 37:
self._terminated = True # Walk away.
return 0.0
wheel = np.random.randint(37)
if wheel == 0:
if action == 0:
return 35.0
return -1.0
if action != 0 and wheel % 2 == action % 2:
return 1.0
return -1.0
def reset(self):
self._terminated = False
env = RouletteEnvironment()
# This policy just learns a constant probability for each action, and a constant for the value.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_prob', 'value'])
def create_model(self, **kwargs):
class TestModel(tf.keras.Model):
def __init__(self):
super(TestModel, self).__init__(**kwargs)
self.action = tf.Variable(
np.ones(env.n_actions, np.float32))
self.value = tf.Variable([0.0], tf.float32)
def call(self, inputs, **kwargs):
prob = tf.nn.softmax(
tf.reshape(self.action, (-1, env.n_actions)))
return (prob, self.value)
return TestModel()
# Optimize it.
a2c = dc.rl.A2C(env,
TestPolicy(),
max_rollout_length=20,
optimizer=Adam(learning_rate=0.001))
a2c.fit(100000)
# It should have learned that the expected value is very close to zero, and that the best
# action is to walk away. (To keep the test fast, we allow that to be either of the two
# top actions).
action_prob, value = a2c.predict([[0]])
assert -0.5 < value[0] < 0.5
assert action_prob.argmax() == 37
assert 37 in np.argsort(action_prob.flatten())[-2:]
assert a2c.select_action([[0]],
deterministic=True) == action_prob.argmax()
# Verify that we can create a new A2C object, reload the parameters from the first one, and
# get the same result.
new_a2c = dc.rl.A2C(env, TestPolicy(), model_dir=a2c._model.model_dir)
new_a2c.restore()
action_prob2, value2 = new_a2c.predict([[0]])
assert value2 == value
# Do the same thing, only using the "restore" argument to fit().
new_a2c = dc.rl.A2C(env, TestPolicy(), model_dir=a2c._model.model_dir)
new_a2c.fit(0, restore=True)
action_prob2, value2 = new_a2c.predict([[0]])
assert value2 == value
@pytest.mark.tensorflow
def test_recurrent_states(self):
"""Test a policy that involves recurrent layers."""
# The environment just has a constant state.
class TestEnvironment(dc.rl.Environment):
def __init__(self):
super(TestEnvironment, self).__init__((10,), 10)
self._state = np.random.random(10).astype(np.float32)
def step(self, action):
self._state = np.random.random(10).astype(np.float32)
return 0.0
def reset(self):
pass
# The policy includes a single recurrent layer.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy,
self).__init__(['action_prob', 'value', 'rnn_state'],
[np.zeros(10)])
def create_model(self, **kwargs):
state = Input(shape=(10,))
rnn_state = Input(shape=(10,))
reshaped = Reshape((1, 10))(state)
gru, rnn_final_state = GRU(10,
return_state=True,
return_sequences=True,
time_major=True)(
reshaped,
initial_state=rnn_state)
output = Softmax()(Reshape((10,))(gru))
value = dc.models.layers.Variable([0.0])([state])
return tf.keras.Model(inputs=[state, rnn_state],
outputs=[output, value, rnn_final_state])
# We don't care about actually optimizing it, so just run a few rollouts to make
# sure fit() doesn't crash, then check the behavior of the GRU state.
env = TestEnvironment()
a2c = dc.rl.A2C(env, TestPolicy())
a2c.fit(100)
# On the first call, the initial state should be all zeros.
prob1, value1 = a2c.predict(env.state,
use_saved_states=True,
save_states=False)
# It should still be zeros since we didn't save it last time.
prob2, value2 = a2c.predict(env.state,
use_saved_states=True,
save_states=True)
# It should be different now.
prob3, value3 = a2c.predict(env.state,
use_saved_states=True,
save_states=False)
# This should be the same as the previous one.
prob4, value4 = a2c.predict(env.state,
use_saved_states=True,
save_states=False)
# Now we reset it, so we should get the same result as initially.
prob5, value5 = a2c.predict(env.state,
use_saved_states=False,
save_states=True)
assert np.array_equal(prob1, prob2)
assert np.array_equal(prob1, prob5)
assert np.array_equal(prob3, prob4)
assert not np.array_equal(prob2, prob3)
@flaky
@pytest.mark.slow
@pytest.mark.tensorflow
def test_hindsight(self):
"""Test Hindsight Experience Replay."""
# The environment is a plane in which the agent moves by steps until it reaches a randomly
# positioned goal. No reward is given until it reaches the goal. That makes it very hard
# to learn by standard methods, since it may take a very long time to receive any feedback
# at all. Using hindsight makes it much easier.
class TestEnvironment(dc.rl.Environment):
def __init__(self):
super(TestEnvironment, self).__init__((4,), 4)
self.moves = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def reset(self):
self._state = np.concatenate([[0, 0],
np.random.randint(-50, 50, 2)])
self._terminated = False
self.count = 0
def step(self, action):
new_state = self._state.copy()
new_state[:2] += self.moves[action]
self._state = new_state
self.count += 1
reward = 0
if np.array_equal(new_state[:2], new_state[2:]):
self._terminated = True
reward = 1
elif self.count == 1000:
self._terminated = True
return reward
def apply_hindsight(self, states, actions, goal):
new_states = []
rewards = []
goal_pos = goal[:2]
for state, action in zip(states, actions):
new_state = state.copy()
new_state[2:] = goal_pos
new_states.append(new_state)
pos_after_action = new_state[:2] + self.moves[action]
if np.array_equal(pos_after_action, goal_pos):
rewards.append(1)
break
else:
rewards.append(0)
return new_states, rewards
# A simple policy with two hidden layers.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_prob', 'value'])
def create_model(self, **kwargs):
state = Input(shape=(4,))
dense1 = Dense(6, activation=tf.nn.relu)(state)
dense2 = Dense(6, activation=tf.nn.relu)(dense1)
output = Dense(4, activation=tf.nn.softmax,
use_bias=False)(dense2)
value = Dense(1)(dense2)
return tf.keras.Model(inputs=state, outputs=[output, value])
# Optimize it.
env = TestEnvironment()
a2c = dc.rl.A2C(env,
TestPolicy(),
use_hindsight=True,
optimizer=Adam(learning_rate=0.001))
a2c.fit(1000000)
# Try running it a few times and see if it succeeds.
pass_count = 0
for i in range(5):
env.reset()
while not env.terminated:
env.step(a2c.select_action(env.state))
if np.array_equal(env.state[:2], env.state[2:]):
pass_count += 1
assert pass_count >= 3
@pytest.mark.tensorflow
def test_continuous(self):
"""Test A2C on an environment with a continous action space."""
# The state consists of two numbers: a current value and a target value.
# The policy just needs to learn to output the target value (or at least
# move toward it).
class TestEnvironment(dc.rl.Environment):
def __init__(self):
super(TestEnvironment, self).__init__((2,), action_shape=(1,))
def reset(self):
target = np.random.uniform(-50, 50)
self._state = np.array([0, target], dtype=np.float32)
self._terminated = False
self.count = 0
def step(self, action):
target = self._state[1]
dist = np.abs(target - action[0])
old_dist = np.abs(target - self._state[0])
new_state = np.array([action[0], target], dtype=np.float32)
self._state = new_state
self.count += 1
reward = old_dist - dist
self._terminated = (self.count == 10)
return reward
# A simple policy with no hidden layers.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy,
self).__init__(['action_mean', 'action_std', 'value'])
def create_model(self, **kwargs):
class TestModel(tf.keras.Model):
def __init__(self):
super(TestModel, self).__init__(**kwargs)
self.mean = Dense(1, kernel_initializer='zeros')
self.std = tf.constant([10.0])
self.value = Dense(1)
def call(self, inputs, **kwargs):
return (self.mean(inputs[0]), self.std,
self.value(inputs[0]))
return TestModel()
# Optimize it.
env = TestEnvironment()
learning_rate = PolynomialDecay(initial_rate=0.005,
final_rate=0.0005,
decay_steps=25000)
a2c = dc.rl.A2C(env,
TestPolicy(),
discount_factor=0,
optimizer=Adam(learning_rate=learning_rate))
a2c.fit(25000)
# Try running it and see if it reaches the target
env.reset()
while not env.terminated:
env.step(a2c.select_action(env.state, deterministic=True))
distance = np.abs(env.state[0] - env.state[1])
tolerance = max(1.0, 0.1 * np.abs(env.state[1]))
assert distance < tolerance
|
39b021eabbb8e3be1734cf92fd641965a796b0eb
|
[
"reStructuredText",
"Markdown",
"Python",
"Text",
"Dockerfile",
"Shell"
] | 658
|
Python
|
deepchem/deepchem
|
ee6e67ebcf7bf04259cf13aff6388e2b791fea3d
|
066cbf42316b2f6bec0166727e0264a485d5266f
|
refs/heads/master
|
<file_sep>from sklearn.neighbors import KNeighborsClassifier
from utils.base import get_offset_font
from utils.base import normalization
from utils.base import make_xml
from utils.base import get_labels_by_GlyphNames
from utils.base import get_labels_by_glyphID
from utils.base import matrix
class KnnFont:
def __init__(self, trainFontMode):
self.trainDataSetPath = './dataset/dataBase.json'
self.trainFontMode = trainFontMode
self.Y = list()
@classmethod
def viewFontFileCodeByGlyphsNames(cls, path, mode):
return get_labels_by_GlyphNames(f'{path}.{mode}')
@classmethod
def viewFontFileCodeByGlyphsId(cls, path, mode):
glyphName = cls.viewFontFileCodeByGlyphsNames(path, mode)
return [get_labels_by_glyphID(f'{path}.{mode}', glyphID) for glyphID in range(len(glyphName))]
def initFontXML(self, path):
make_xml(path, self.trainFontMode)
def read(self):
data = list()
label = list()
with open(self.trainDataSetPath, 'r') as f:
while True:
item = f.readline().strip()
if item:
item = eval(item)
try:
label.append(item[0])
data.append(item[1])
except TypeError:
length = item
else:
break
return data, label, length
def save(self):
with open(self.trainDataSetPath, 'w') as f:
length = 0
for item in self.Y:
if len(item[1]) > length:
length = len(item[1])
f.write(f'{item}\n')
else:
f.write(f'{length}')
def fit(self, pathList, labelList):
for path in pathList:
self.initFontXML(path)
for path, label in zip(pathList, labelList):
offset = get_offset_font(f'{path}.xml')
offset = normalization(offset)
for i in offset:
self.Y.append([label[i[0]], i[1]])
else:
self.save()
def predict(self, fontFile, n_neighbors):
trainData, trainLabel, trainLength = self.read()
self.initFontXML(fontFile)
offset = get_offset_font(f'{fontFile}.xml')
offset = normalization(offset)
testData, testLength = list(), 0
testLabel = list()
for item in offset:
if len(item[1]) > testLength:
testLength = len(item[1])
testData.append(item[1])
testLabel.append(item[0])
trainLabel, trainData = matrix(trainData, trainLabel, max(trainLength, testLength))
testLabel, testData = matrix(testData, testLabel, max(trainLength, testLength))
model = KNeighborsClassifier(n_neighbors=n_neighbors)
model.fit(trainData, trainLabel)
result = model.predict(testData)
return dict(zip(testLabel, result))
if __name__ == '__main__':
a = KnnFont(trainFontMode='ttf')
paths = [
'dataset/base1',
'dataset/base2',
'dataset/base3'
]
labels = [
{'u115D5': '5', 'u69921': '2', 'u6F365': '6', 'u9B7D8': '8', 'uA4E70': '3', 'uB194D': '7', 'uC2D3D': '.',
'uC511C': '9', 'uDB77F': '0', 'uDEE4A': '1', 'uF943D': '4', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''},
{'u15FA1': '9', 'u172A4': '.', 'u266DB': '0', 'u3CAF0': '2', 'u7CBCE': '1', 'u823F9': '8', 'uB0891': '5',
'uB6A1B': '3', 'uBD774': '6', 'uEA641': '4', 'uF2D43': '7', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''},
{'u155BA': '8', 'u1AD7B': '2', 'u2E093': '7', 'u4FF4F': '6', 'u6D990': '4', 'u86D44': '9', 'u8F418': '3',
'uA3D11': '0', 'uAB1B1': '1', 'uCF187': '5', 'uF1C20': '.', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''}
]
# a.fit(paths, labels)
a.predict('ickey', n_neighbors=3)
# print(KnnFont.viewFontFileCodeByGlyphsNames('dataset/base3', 'ttf'))
<file_sep>from Model import KnnFont
paths = [
'dataset/base1',
'dataset/base2',
'dataset/base3'
]
labels = [
{'u115D5': '5', 'u69921': '2', 'u6F365': '6', 'u9B7D8': '8', 'uA4E70': '3', 'uB194D': '7', 'uC2D3D': '.',
'uC511C': '9', 'uDB77F': '0', 'uDEE4A': '1', 'uF943D': '4', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''},
{'u15FA1': '9', 'u172A4': '.', 'u266DB': '0', 'u3CAF0': '2', 'u7CBCE': '1', 'u823F9': '8', 'uB0891': '5',
'uB6A1B': '3', 'uBD774': '6', 'uEA641': '4', 'uF2D43': '7', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''},
{'u155BA': '8', 'u1AD7B': '2', 'u2E093': '7', 'u4FF4F': '6', 'u6D990': '4', 'u86D44': '9', 'u8F418': '3',
'uA3D11': '0', 'uAB1B1': '1', 'uCF187': '5', 'uF1C20': '.', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''}
]
model = KnnFont(trainFontMode='ttf')
# model.fit(paths, labels)
result = model.predict('ickey', n_neighbors=3)
print(result)
# print(KnnFont.viewFontFileCodeByGlyphsNames('dataset/base3', 'ttf'))
# print(KnnFont.viewFontFileCodeByGlyphsId('ickey', 'ttf'))
<file_sep>from xml.dom.minidom import parse
from fontTools.ttLib import TTFont
from numpy import asarray
def get_labels_by_GlyphNames(file):
font = TTFont(file)
return font.getGlyphNames()
def get_labels_by_glyphID(file, glyphID):
font = TTFont(file)
return font.getGlyphName(glyphID=glyphID)
def make_xml(file, mode='woff'):
font = TTFont(f'{file}.{mode}')
font.saveXML(f'{file}.xml')
# 从xml文件中拿到各个字体的坐标
def get_offset_font(filename):
data = parse(filename)
collection = data.documentElement
labels = collection.getElementsByTagName("TTGlyph")
data_list = []
max_len = 0
for label in labels:
contour = label.getElementsByTagName("contour")
# 拿到字体在woff文件中的unicode,y_max,y_min,x_max,x_min
offset = [[label.getAttribute("name"),
int(label.getAttribute("yMax")) if label.getAttribute("yMax") else 0,
int(label.getAttribute("yMin")) if label.getAttribute("yMin") else 0,
int(label.getAttribute("xMax")) if label.getAttribute("xMax") else 0,
int(label.getAttribute("xMin")) if label.getAttribute("xMin") else 0]]
for item in contour:
pt = item.getElementsByTagName("pt")
# 遍历一个字体中的所有x,y坐标
for xy in pt:
if xy.hasAttribute("x"):
offset.append(int(xy.getAttribute("x")))
if xy.hasAttribute("y"):
offset.append(int(xy.getAttribute("y")))
else:
data_list.append(offset)
max_len = max_len if max_len > len(offset) else len(offset)
return data_list
# 重整理坐标,使得都从0,0开始, 并在列表的最后添加最大尺寸
def normalization(offset_list):
new_offset = []
max_size = {'x': 0, 'y': 0}
for item in offset_list:
if len(item) > 1:
head, rear = item[0], item[1:]
y_min, x_min = head[2], head[4]
head[1] = head[1] - y_min
head[2] = head[2] - y_min
head[3] = head[3] - x_min
head[4] = head[4] - x_min
max_size['x'] = head[3] if head[3] > max_size['x'] else max_size['x']
max_size['y'] = head[1] if head[1] > max_size['y'] else max_size['y']
for i in range(len(rear)):
if i % 2 == 0:
rear[i] = rear[i] - x_min
else:
rear[i] = rear[i] - y_min
new_offset.append([head[0], rear])
else:
new_offset.append([item[0][0], []])
return new_offset
def matrix(offset, labels, length=0):
data = []
for item in offset:
offsetList = item
data.append(offsetList)
for i in range(len(data)):
data[i] = data[i] + [0] * (length - len(data[i]))
else:
labels = asarray(labels)
data = asarray(data)
return labels, data
<file_sep># DealFontCrawler
解决字体反爬的简单通用模型
### 技术点
1. 自动获取字体坐标
2. 自动匹配不同大小的字体坐标矩阵
3. 使用相对坐标归一化来应对坐标处于坐标不同位置的情况
4. 直接产生键值对便于调用
### 优点
1. 只需进行少量的数据标注工作就能建立字体映射模型
2. 直接写入需要解密的woff文件或ttf文件即可获得映射
### 不足
1. 建立训练集的字体映射的时候可能会比较麻烦,不能做到自动化
### 训练的代码示例以及步骤介绍
训练步骤
1. 在dataset文件夹放入训练集文件.(woff / ttf)
2. 在代码写好训练集的路径,多少个训练文件对应多少个路径 **(并不用写上后缀名)**
3. 在每个对应的训练集标注好字体映射关系
4. 定义好训练集的字体文件类型如: woff, ttf
5. 进行训练,然后自动在dataset文件夹保存训练后的向量(训练完以后,在dataset文件夹内的字体文件和xml文件可根据需要进行删除)
在获取字体映射的过程中,类KnnFont封装好2个获取映射编码的函数分别是 KnnFont.viewFontFileCodeByGlyphsNames 和
KnnFont.viewFontFileCodeByGlyphsId, 这两种方法对应的是Font字体编辑器上的两种排序 Glyph Name和
Glyph Index。这样就可便于根据需要标注训练集的映射。
注意:Font字体编辑器看到的编码名字对应到程序上可能不是这个名称。例如在目录ickey.ttf文件中,在字体编辑器查看的编码
**".null"** 在程序中却是 **".notdef"**; **"uni0001"** 在程序中却是 **"uni0000"**;
**"uniFFFD"** 在程序中却是 **"uni0001"** 等等现象。所以作者建议先使用内置的那两种方法中的其中一种,先查看对应
的编码名再建立映射。
```python
from Model import KnnFont
paths = [
'dataset/base1',
'dataset/base2',
'dataset/base3'
]
labels = [
{'u115D5': '5', 'u69921': '2', 'u6F365': '6', 'u9B7D8': '8', 'uA4E70': '3', 'uB194D': '7', 'uC2D3D': '.',
'uC511C': '9', 'uDB77F': '0', 'uDEE4A': '1', 'uF943D': '4', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''},
{'u15FA1': '9', 'u172A4': '.', 'u266DB': '0', 'u3CAF0': '2', 'u7CBCE': '1', 'u823F9': '8', 'uB0891': '5',
'uB6A1B': '3', 'uBD774': '6', 'uEA641': '4', 'uF2D43': '7', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''},
{'u155BA': '8', 'u1AD7B': '2', 'u2E093': '7', 'u4FF4F': '6', 'u6D990': '4', 'u86D44': '9', 'u8F418': '3',
'uA3D11': '0', 'uAB1B1': '1', 'uCF187': '5', 'uF1C20': '.', '.notdef': '', 'space': ' ', 'uni0000': '',
'uni0001': ''}
]
print(KnnFont.viewFontFileCodeByGlyphsNames('dataset/base3', 'ttf'))
print(KnnFont.viewFontFileCodeByGlyphsId('dataset/base3', 'ttf'))
model = KnnFont(trainFontMode='ttf')
model.fit(paths, labels)
```
### 预测的代码示例
预测步骤分为2步:
1. 传入待预测的字体文件路径,(不带文件后缀)
2. n_neighbors 定义n的个数,一般是设为训练字体文件的数目
```python
from Model import KnnFont
model = KnnFont(trainFontMode='ttf')
result = model.predict('ickey', n_neighbors=3)
print(result)
```
```
>> {'.notdef': '', 'space': '', 'u33C3E': '.', 'u630D5': '3', 'u71DEA': '6', 'u77B76': '2', 'u7BB58': '5', 'u8941E': '8', 'uA7174': '4', 'uB83C0': '0', 'uBF8F2': '1', 'uDD740': '9', 'uE8F4A': '7', 'uni0000': '', 'uni0001': ''}
```
### 依赖库
```
pip install fontTools xml numpy sklearn
```
|
ab659bd7aeb161cd7afde20466e0923760114209
|
[
"Markdown",
"Python"
] | 4
|
Python
|
Yakuho/DealFontCrawler
|
eee732d79e35b8947bfd35e3661fe0c96204aa03
|
ac6a6dc8f3fc1d7b68d777170623ac1a96c960b0
|
refs/heads/main
|
<file_sep>/* eslint-disable no-undef */
const assert = require('assert');
const validator = require('../validator');
const samples = require('../samples');
const common = require('../common');
describe('Validator test', () => {
it('No input test should return sample test', () => {
assert.equal(
validator.validate({ instructions: '' }).body,
samples.sampleInput
);
});
it('3 dimension map coordinates should return error', () => {
assert.equal(
validator.validate({ instructions: '10 10 10' }).message,
common.MSG_INVALID_COORDINATES_DIMENSIONS
);
});
it('no robot coordinates should return error', () => {
assert.equal(
validator.validate({ instructions: '10 10' }).message,
common.MSG_ROBOT_INITIAL_COORDINATES_MISSING
);
});
it('odd lines (robot, movements) should return error', () => {
assert.equal(
validator
.validate({ instructions: '10 10 \n 1 1 E' })
.message.includes(common.MSG_EVEN_LINES_NEEDED),
true
);
});
it('unexpected cardinal point should return error', () => {
assert.equal(
validator
.validate({ instructions: '10 10 \n 1 1 Q \n F' })
.message.includes(common.MSG_USE_VALID_CARDINAL_POINT),
true
);
});
it('robot initial position outside the map should return error', () => {
assert.equal(
validator.validate({ instructions: '10 10 \n 12 1 N \n F' }).message,
common.MSG_COORDINATES_OUTSIDE_THE_MAP
);
});
it('robot initial position negative numbers should return error', () => {
assert.equal(
validator.validate({ instructions: '10 10 \n -1 1 N \n F' }).message,
common.MSG_COORDINATES_NEED_TO_BE_POSITIVE
);
});
it('map coordinates not numbers should return error', () => {
assert.equal(
validator.validate({ instructions: 'x 10 \n x y N \n F' }).message,
common.MSG_COORDINATES_NEED_TO_BE_NUMBERS
);
});
it('invalid initial robot coordinates should return error', () => {
assert.equal(
validator.validate({ instructions: '10 10 \n 10 10 \n F' }).message,
common.MSG_INVALID_ROBOT_INITIAL_POSITION
);
});
});
<file_sep>const express = require('express');
const bodyParser = require('body-parser');
const path = require('path');
const validator = require('./validator');
const movement = require('./movement');
const common = require('./common');
const app = express();
const appPort = process.env.PORT || 3000;
app.use(bodyParser.urlencoded({ extended: true }));
app.get('/', (req, res) => {
res.sendFile(path.join(__dirname + '/index.html'));
});
app.post('/map', (req, res) => {
const validatorResponse = validator.validate({
instructions: req.body.instructions,
});
if (validatorResponse.statusCode !== 200) {
res.status(validatorResponse.statusCode).send(
`<p>There was an error with the provided instructions: <br>
<i>${validatorResponse.message}</i></p><br>
<button onclick='window.history.back()'>Try again!</button>`
);
}
const movementResponse = movement.moveRobots({
instructions: validatorResponse.body,
});
if (movementResponse.statusCode !== 200) {
res.send(
`<p>There was an error with the robot movement: <br>
<i>${movementResponse.message}</i></p><br>
<button onclick='window.history.back()'>Modify your input!</button>`
);
}
res.send(
`<p>The resulting robots coordinates are: <br>
<textarea>${common.buildRobotsOutput(
movementResponse.body
)}</textarea></p><br>
<button onclick='window.history.back()'>Try again with another input!</button>
<button onclick='window.location="/getPreviousMaps"'>Visit previous dead robots by map</button>`
);
});
app.get('/getPreviousMaps', (req, res) => {
const previousMaps = common.getMapHistory();
let rows = '';
for (let i = 0; i < previousMaps.length; i++) {
rows += `<tr>
<th>${previousMaps[i].marsMap}</th>
<th>${previousMaps[i].scentsOfDeath}</th>
</tr>`;
}
res.send(
`<p>Previous maps: <br>
<table>
<tr>
<th>Mars Map</th>
<th>Dead scents</th>
</tr>
${rows}
</table>
<button onClick='window.location="/"'> Home </button>
<form action="/clearHistory" method='post'><button type='submit' formmethod='post'> Clear map History </button></form>
`
);
});
app.post('/clearHistory', (req, res) => {
const resClearHistory = common.resetMapHistory();
let content = '';
if (resClearHistory.statusCode !== 200) {
content += `<p>There was an error when trying to clear the map history: <br> <i>${resClearHistory.message}</i></p><br>`;
} else {
content += 'Successfully cleared map history!';
}
res.send(
`${content} <button onClick='window.location="/"'>Go back to main page</button>`
);
});
app.listen(appPort, () => {
console.log(`App running at port ${appPort}...`);
});
<file_sep># robots-in-mars
Robots in Mars is a simple program which handle robot movements across a 2 dimension given map.
Requirements:
- Robot's initial orientation can be N, S, E, W (for north, south, east, and west).
- Maximum size of the map should be 50x50
- Each robot is processed sequentially
- If a robot falls off the edge of the grid the word "LOST" should be printed after the position and orientation.
Live example in [Heroku](https://robots-in-mars.herokuapp.com/)
Local execution:
- Run `npm install` and `npm start`
- Tests: `npm test`
<file_sep>/* eslint-disable no-undef */
const assert = require('assert');
const movement = require('../movement');
const samples = require('../samples');
const common = require('../common');
describe('Movement test', () => {
it('Sample input should return 200', () => {
assert.equal(
movement.moveRobots({ instructions: samples.sampleInput }).statusCode,
200
);
});
it('Moving 360 degrees should return same initial position', () => {
const initialPosition = [1, 1, 'E'];
assert.equal(
JSON.stringify(
movement.moveRobots({
instructions: {
marsMap: [3, 3],
robots: [
{
initialPosition: initialPosition,
movements: ['R', 'R', 'R', 'R'],
},
],
},
}).body['0']
),
JSON.stringify(initialPosition)
);
});
});
after(async () => {
await common.resetMapHistory();
});
<file_sep>const Robot = require('./robot');
const common = require('./common');
const fs = require('fs');
let scentsHistory;
function retrievePreviousScents({ mapCoordinates }) {
scentsHistory = JSON.parse(fs.readFileSync('scentsOfDeath.json', 'utf8'));
return scentsHistory.filter(
(item) => JSON.stringify(item.marsMap) === JSON.stringify(mapCoordinates)
);
}
function storeNewScents({ mapCoordinates, newScents }) {
const existingMap = scentsHistory.find(
(item) => JSON.stringify(item.marsMap) === JSON.stringify(mapCoordinates)
);
if (existingMap === undefined) {
scentsHistory.push({
marsMap: mapCoordinates,
scentsOfDeath: newScents,
});
} else {
scentsHistory.forEach((obj) => {
if (JSON.stringify(obj.marsMap) === JSON.stringify(mapCoordinates)) {
// Only update if the items are different.
if (JSON.stringify(obj.scentsOfDeath) !== JSON.stringify(newScents)) {
obj.scentsOfDeath = newScents;
}
}
});
}
// Only update if there are any dead scents.
if (newScents.length > 0) {
fs.writeFile(
'scentsOfDeath.json',
JSON.stringify(scentsHistory),
function (err) {
if (err) {
console.log(`Error writing json to file: ${err}`);
}
}
);
}
}
function moveRobots({ instructions }) {
try {
let deadRobotsScents = retrievePreviousScents({
mapCoordinates: instructions.marsMap,
});
deadRobotsScents =
deadRobotsScents.length > 0
? [...deadRobotsScents['0'].scentsOfDeath]
: [];
const robotsFinalPositions = [];
for (const robotObj of instructions.robots) {
const robot = new Robot({
marsMap: instructions.marsMap,
robot: robotObj,
deadScents: [...deadRobotsScents],
});
const { finalPosition, deadScentInstruction } = robot.move();
if (deadScentInstruction) {
deadRobotsScents.push(deadScentInstruction);
}
deadRobotsScents = Array.from(
[...deadRobotsScents.map(JSON.stringify)],
JSON.parse
);
robotsFinalPositions.push(finalPosition);
}
storeNewScents({
mapCoordinates: instructions.marsMap,
newScents: deadRobotsScents,
});
return common.buildApiResponse({
code: 200,
message: 'success',
body: robotsFinalPositions,
});
} catch (e) {
return common.buildApiResponse({
code: 500,
message: `Error moving robot: ${e}`,
});
}
}
module.exports = {
moveRobots,
};
<file_sep>const samples = require('./samples');
const common = require('./common');
let fullInstructions = new Object();
const validCardinalPoints = ['N', 'S', 'E', 'W'];
const validMovements = ['L', 'R', 'F']; // Left, Right, Forward at the moment, open to changes.
function coordinatesToIntList(coords) {
const coordsList = [];
for (const item of coords) {
const intCoordinate = Number(item);
if (Number.isNaN(intCoordinate)) {
return common.buildApiResponse({
code: 400,
message: common.MSG_COORDINATES_NEED_TO_BE_NUMBERS,
});
} else if (intCoordinate < 0) {
return common.buildApiResponse({
code: 400,
message: common.MSG_COORDINATES_NEED_TO_BE_POSITIVE,
});
} else if (intCoordinate > 50) {
return common.buildApiResponse({
code: 400,
message: common.MSG_COORDINATES_MAX_SIZE_EXCEEDED,
});
}
coordsList.push(intCoordinate);
}
return coordsList;
}
function getMovements(movements) {
movements.forEach((movement) => {
if (!validMovements.includes(movement)) {
return common.buildApiResponse({
code: 400,
message: `The movement ${movement} is not included in the system.
Please only use one of the following: ${validMovements.join(',')}`,
});
}
});
return movements;
}
function getInitialPosition(initialPosition) {
if (initialPosition.length !== 3) {
return common.buildApiResponse({
code: 400,
message: common.MSG_INVALID_ROBOT_INITIAL_POSITION,
});
}
const coordX = Number(initialPosition[0]);
const coordY = Number(initialPosition[1]);
const orientation = initialPosition[2];
switch (true) {
case Number.isNaN(coordX):
case Number.isNaN(coordY):
return common.buildApiResponse({
code: 400,
message: common.MSG_COORDINATES_NEED_TO_BE_NUMBERS,
});
case coordX < 0:
case coordY < 0:
return common.buildApiResponse({
code: 400,
message: common.MSG_COORDINATES_NEED_TO_BE_POSITIVE,
});
case coordX > fullInstructions.marsMap[0]:
case coordY > fullInstructions.marsMap[1]:
return common.buildApiResponse({
code: 400,
message: common.MSG_COORDINATES_OUTSIDE_THE_MAP,
});
case !validCardinalPoints.includes(initialPosition[2]):
return common.buildApiResponse({
code: 400,
message:
`The cardinal point specified (${initialPosition[2]}) does not exist` +
common.MSG_USE_VALID_CARDINAL_POINT,
});
default:
break;
}
return [coordX, coordY, orientation];
}
function appendRobot(directions) {
const robotObj = new Object();
const initialPos = getInitialPosition(directions[0].split(' '));
if (initialPos.statusCode === 400) {
return initialPos;
}
robotObj.initialPosition = initialPos;
const movements = getMovements(directions[1].split(''));
if (movements.status !== undefined) {
return initialPos;
}
robotObj.movements = movements;
fullInstructions.robots.push(robotObj);
return;
}
function buildRobotsInstructions(data) {
// Let's clean the data first (empty lines and line breaks).
data = data.filter((item) => item !== '').map((item) => item.trim());
const dataLength = data.length;
if (dataLength === 0) {
return common.buildApiResponse({
code: 400,
message: common.MSG_ROBOT_INITIAL_COORDINATES_MISSING,
});
} else if (dataLength % 2 !== 0) {
return common.buildApiResponse({
code: 400,
message:
`${data.length} directions found.` + common.MSG_EVEN_LINES_NEEDED,
});
}
fullInstructions.robots = [];
for (let i = 0; i < dataLength; i += 2) {
let appendRes = appendRobot(data.slice(i, i + 2));
if (appendRes) {
return appendRes;
}
}
return;
}
function buildMapCoordinates(data) {
const coordinatesList = data.trim().split(' ');
if (coordinatesList.length != 2) {
return common.buildApiResponse({
code: 400,
message: common.MSG_INVALID_COORDINATES_DIMENSIONS,
});
}
const coordsParsed = coordinatesToIntList(coordinatesList);
if (coordsParsed.status !== undefined) {
return coordsParsed;
}
fullInstructions.marsMap = coordsParsed;
return;
}
function validate({ instructions }) {
try {
if (instructions === '') {
return common.buildApiResponse({
code: 200,
message: common.MSG_SUCCESS,
body: samples.sampleInput,
});
}
instructions = instructions.split('\n');
const mapCoordsRes = buildMapCoordinates(instructions[0]);
if (mapCoordsRes) {
return mapCoordsRes;
}
const robotInstructionsRes = buildRobotsInstructions(instructions.slice(1));
if (robotInstructionsRes) {
return robotInstructionsRes;
}
return common.buildApiResponse({
code: 200,
message: common.MSG_SUCCESS,
body: fullInstructions,
});
} catch (e) {
return common.buildApiResponse({
code: 500,
message: `internal server error: ${e}`,
});
}
}
module.exports = {
validate,
};
|
efe3f34b8cee9081c86d9048096e08f86ca296af
|
[
"JavaScript",
"Markdown"
] | 6
|
JavaScript
|
j-hernandez93/robots-in-mars
|
ee6f6e7dd7571f3c5575642231b84d3335826a86
|
6dbe9971ef543570635f1aa80babbbfd851cfa9d
|
refs/heads/master
|
<repo_name>lezhnev74/Eximporter<file_sep>/README.md
[](https://travis-ci.org/lezhnev74/Eximporter)
[](https://packagist.org/packages/lezhnev74/eximport)
[](https://packagist.org/packages/lezhnev74/eximport)
[](https://packagist.org/packages/lezhnev74/eximport)
[](https://packagist.org/packages/lezhnev74/eximport)
# Eximporter
Object oriented Excel importer with input validation
It lets you to import any excel file, validate each cell and do your business logic with good and failed ones.
P.s. It will automatically skip rows with all nulls in cells.

## Example
```php
use Eximporter\Eximporter;
use Eximporter\Exceptions\BadFile;
$file = "./tests/resources/test_05.xlsx";
try {
$importer = new Eximporter($file);
$importer->setValidationRules([
// you can set rules by names
'description' => 'required',
// you can set manual closures as rules (as an array)
'amount' => ['custom_rule' => function($cellvalue){ return $cell_value > 100; }]
// you can add few rules in a row
'title' => [
'required|regexp:#^[0-9]+$#',
[
'custom_rule_2' => function($cell_value) { return strlen($cell_value)<10; }
]
],
]);
// set handlers (closures) to handle each good or bad (validation failed) row
$importer->setHandlerForBadRow(function ($row, $bad_cells) {
foreach ($bad_cells as $cell_title => $validation_result) {
echo $cell_title . " failed validators: " . implode(", ", $validation_result->getFailed());
echo "\n";
// example output:
// Amount failed validators: custom1
// Description failed validators: required
// ...
}
});
// set handlers for good rows
$importer->setHandlerForGoodRow(function ($row) {
// business logic with $row
});
// ok let's go
$importer->read();
// you can access counters
echo $importer->getGoodRowsCount();
echo $importer->getBadRowsCount();
} catch (BadFile $e) {
// unable to open this file
}
```
## Usage
This package is intended to be used in projects with Excel import functions.
It let's you easily add a validation layer and filtering options for your data.
It is based on PHPOffice/PHPExcel under the hood.
## Installation
```
composer require lezhnev74/eximport
```
Requirements:
* PHP7
* PHP extension php_zip enabled
* PHP extension php_xml enabled
* PHP extension php_gd2 enabled (if not compiled in)
* (read more)[https://github.com/PHPOffice/PHPExcel/wiki/Developer-Documentation-Prerequisites]
## Credits
<NAME>
<EMAIL><file_sep>/tests/TestHandlers.php
<?php
use Eximporter\Eximporter;
use PHPUnit\Framework\TestCase;
class TestHandlers extends TestCase
{
public function testRequiredValidator()
{
// this file is not a valid spreadsheet
$file = "./tests/resources/test_05.xlsx";
$importer = new Eximporter($file);
$importer->setValidationRules([
'description' => 'required',
'amount' => ['regexp:#^[0-9]+$#', ['custom1' => function ($value) { return $value > 100; }]],
]);
// set handler for each bad cell
$importer->setHandlerForBadRow(function ($row, $bad_cells) {
foreach ($bad_cells as $cell_title => $validation_result) {
echo $cell_title . " failed validators: " . implode(", ", $validation_result->getFailed());
echo "\n";
}
});
// set handlers for good rows
$importer->setHandlerForGoodRow(function ($row) {
// business logic with $row
});
$importer->read();
}
}<file_sep>/src/Eximporter.php
<?php
namespace Eximporter;
use Eximporter\Exceptions\BadFile;
use Eximporter\Validators\Manager;
/**
* Class Eximporter
* Allows to import spreadsheet and validate it's rows
* It allows easily assign custom validation via closures
*
* @package Eximporter
*/
class Eximporter
{
// source file with data
private $input_file;
// objects from underlying PHPExcel library
private $object_reader;
private $object_excel;
// object to handle validation
// tightly coupled, but works
private $validation_manager = null;
// closure to handle validated rows
private $good_row_handler = null;
// closure to handle rows which failed validation
private $bad_row_handler = null;
// rows counters
private $good_rows_counter = 0;
private $bad_rows_counter = 0;
private $skipped_rows_counter = 0;
function __construct($input_file)
{
$this->input_file = $input_file;
try {
$this->object_reader = \PHPExcel_IOFactory::createReaderForFile($this->input_file);
$this->object_reader->setReadDataOnly(true); // we won't write to a file
$this->object_reader->canRead($this->input_file); // check that file is readable
$this->object_excel = $this->object_reader->load($this->input_file);
} catch (\PHPExcel_Reader_Exception $e) {
throw new BadFile($this->input_file, $e->getMessage(), $e->getCode());
}
$this->initValidators();
}
/**
* Init the validation manager
*/
private function initValidators()
{
$this->validation_manager = new Manager();
}
/**
* Assign validation rules
*
* Rules can be a string - will be exploded by | character to array
* Or it can be an array
* Each element can be a string - validator name or a closure (f($cell_value){ return bool; }) with custom validation logic
* You can address columns by it's title or by it's number from 0 to N
*
* @param $rules
*/
public function setValidationRules($rules)
{
$this->validation_manager->setRules($rules);
}
/**
* Set closure to be called for each good row
*
* @param $closure
*/
public function setHandlerForGoodRow($closure)
{
$this->good_row_handler = $closure;
}
/**
* Set closure to be called for each bad row, bad means failed validation
*
* @param $closure
*/
public function setHandlerForBadRow($closure)
{
$this->bad_row_handler = $closure;
}
/**
* Execute reading data from the source file
*/
public function read()
{
// detect sheets
$sheets = $this->object_excel->getAllSheets();
// work for each sheet
foreach ($sheets as $sheet) {
$column_titles = $this->detectColumnTitles($sheet);
// let's iterate starting from second row (skip header's row)
foreach ($sheet->getRowIterator(2) as $row) {
$this->handleRow($row, $column_titles);
}
}
}
/**
* Detect what titles sheet has
* Every title has index like ["A" => "string title"]
*
* @param $sheet
*
* @return array
*/
private function detectColumnTitles($sheet)
{
$titles = [];
// get title of each column
// expect first line to contain titles
foreach ($sheet->getRowIterator(1, 1) as $head_row) {
// ok let's iterate over cells
foreach ($head_row->getCellIterator() as $i => $cell) {
$titles[$i] = $cell->getValue();
}
}
return $titles;
}
/**
* Validate row and call a handler
*
* @param $row
* @param $column_titles
*
* @return void
*/
private function handleRow($row, $column_titles)
{
$cells = [];
// populate cells values
foreach ($row->getCellIterator() as $i => $cell) {
$cell_value = $cell->getCalculatedValue();
if (isset($column_titles[$i])) {
$cells[$column_titles[$i]] = $cell_value;
}
}
// if all of the cells are nulls then skip this row
if (!count(array_filter($cells, function ($cell) { return !is_null($cell); }))) {
$this->skipped_rows_counter++;
return;
}
// now validate cell values
$bad_cell_results = [];
foreach ($cells as $title => $value) {
$result = $this->validation_manager->validate($title, $value);
if ($result->isFailed()) {
$bad_cell_results[$title] = $result;
}
}
// call handlers for good or bad rows
if (!count($bad_cell_results)) {
$this->good_rows_counter++;
if (is_callable($this->good_row_handler)) {
$this->good_row_handler->call($this, $cells);
}
} else {
$this->bad_rows_counter++;
if (is_callable($this->bad_row_handler)) {
$this->bad_row_handler->call($this, $cells, $bad_cell_results);
}
}
}
/**
* Return the counter of rows which passed validation
*/
public function getGoodRowsCount() {
return $this->good_rows_counter;
}
/**
* Return the counter of rows which failed validation
*/
public function getBadRowsCount() {
return $this->bad_rows_counter;
}
}<file_sep>/tests/TestInputFile.php
<?php
use Eximporter\Eximporter;
use PHPUnit\Framework\TestCase;
use Eximporter\Exceptions\BadFile;
class TestInputFile extends TestCase
{
public function testTriggerBadFileException()
{
$this->expectException(BadFile::class);
// this file is not a valid spreadsheet
$file = "./tests/resources/missedfile";
new Eximporter($file);
}
public function testNormalLoadAnyRealFile() {
// this file is not a valid spreadsheet
$file = "./tests/resources/text.file.any";
new Eximporter($file);
}
}<file_sep>/src/Validators/ValidatorInterface.php
<?php
namespace Eximporter\Validators;
/**
* Interface Validator
* Describes how every validator should handle validation
*
* @package Eximporter\Validators
*/
interface ValidatorInterface {
/**
* ValidatorInterface constructor.
*
* @param $name
*/
function __construct($name);
/**
* The method to validate any cell-value
*
* @param $value
*
* @return bool
*/
public function validate($value);
/**
* Attach an argument for this validator
* For example string culd be like this: "length:10,20" - means validator "length" has argument "10,20"
*
* @param $mixed
*
* @return void
*/
public function attachArgument($argument);
/**
* Return the name
*
* @return string
*/
public function getName();
}<file_sep>/tests/TestValidateFields.php
<?php
use Eximporter\Eximporter;
use PHPUnit\Framework\TestCase;
use Eximporter\Exceptions\BadFile;
class TestValidateFields extends TestCase
{
public function testRequiredValidator()
{
// this file is not a valid spreadsheet
$file = "./tests/resources/test_required_03.xlsx";
$importer = new Eximporter($file);
$importer->setValidationRules([
'description' => 'required',
]);
$importer->read();
$this->assertEquals($importer->getGoodRowsCount(), 2);
$this->assertEquals($importer->getBadRowsCount(), 1);
}
public function testRegexpValidator()
{
// this file is not a valid spreadsheet
$file = "./tests/resources/test_required_03.xlsx";
$importer = new Eximporter($file);
$importer->setValidationRules([
'title' => 'required|regexp:#^[a-zA-Z]{4}$#',
'description' => 'required',
]);
$importer->read();
$this->assertEquals($importer->getGoodRowsCount(), 1);
$this->assertEquals($importer->getBadRowsCount(), 2);
}
public function testCustomClosureValidator()
{
// this file is not a valid spreadsheet
$file = "./tests/resources/test_required_03.xlsx";
$importer = new Eximporter($file);
$importer->setValidationRules([
'title' => [
[
'custom_rule' => function ($value) {
return mb_substr($value, -1, 1) != 'e';
},
],
],
'description' => [
[
'custom_rule_2' => function ($value) {
return mb_strlen($value) >= 6;
},
],
],
]);
$importer->read();
$this->assertEquals($importer->getGoodRowsCount(), 1);
}
public function testCombinedValidators()
{
// this file is not a valid spreadsheet
$file = "./tests/resources/test_05.xlsx";
$importer = new Eximporter($file);
$importer->setValidationRules([
'title' => 'required',
'description' => 'required',
'amount' => 'regexp:#^[0-9]+$#',
]);
$importer->read();
$this->assertEquals($importer->getGoodRowsCount(), 4);
}
public function testCombined2Validators()
{
// this file is not a valid spreadsheet
$file = "./tests/resources/test_05.xlsx";
$importer = new Eximporter($file);
$importer->setValidationRules([
'title' => 'required',
'description' => 'required',
'amount' => ['regexp:#^[0-9]+$#', ['custom1' => function ($value) { return $value > 100; }]],
]);
$importer->read();
$this->assertEquals($importer->getGoodRowsCount(), 2);
}
public function testDetectFailedValidators()
{
// this file is not a valid spreadsheet
$file = "./tests/resources/test_05.xlsx";
$importer = new Eximporter($file);
$importer->setValidationRules([
'title' => 'required',
'description' => 'required',
'amount' => ['regexp:#^[0-9]+$#', ['custom1' => function ($value) { return $value > 100; }]],
]);
$importer->read();
}
}<file_sep>/src/Validators/Closure.php
<?php
namespace Eximporter\Validators;
/**
* Class Closure
* Custom validator who incapsulates given closure and behaves as any normal validator
*
* @package Eximporter\Validators
*/
class Closure implements ValidatorInterface {
private $callable = null;
private $name;
function __construct($name) {
$this->name = $name;
}
/**
* Get name of this validator
*
* @return mixed
*/
function getName() {
return $this->name;
}
/**
* Set custom closure
*
* @param callable $callable
*/
public function setCallable($callable) {
$this->callable = $callable;
}
public function validate($value)
{
return $this->callable->call($this,$value);
}
public function attachArgument($argument)
{
// not in use for this validator
}
}<file_sep>/src/Exceptions/MissedValidator.php
<?php
namespace Eximporter\Exceptions;
use Exception;
/**
* Class MissedValidator
* Detects validation rule with missed implementation
*
* @package Eximport\Exceptions
*/
class MissedValidator extends Exception
{
}<file_sep>/src/Exceptions/BadFile.php
<?php
namespace Eximporter\Exceptions;
use Exception;
/**
* Class BadFile
* Detects bad input file
*
* @package Eximport\Exceptions
*/
class BadFile extends Exception
{
protected $input_file;
public function __construct($input_file, $message, $code, Exception $previous = null)
{
$this->input_file = $input_file;
parent::__construct($message, $code, $previous);
}
/**
* Get input file which is not good
*
* @return string
*/
public function file() { return $this->input_file; }
}<file_sep>/src/Validators/Required.php
<?php
namespace Eximporter\Validators;
/**
* Class Required
* Validates string has length > 0
*
* @package Eximporter\Validators
*/
class Required implements ValidatorInterface {
private $name;
function __construct($name) {
$this->name = $name;
}
/**
* Get name of this validator
*
* @return mixed
*/
function getName() {
return $this->name;
}
public function validate($value)
{
// make sure it is a string
if(!$value) {
return false;
}
// trim trailing spaces
$value = trim(strval($value));
if(!mb_strlen($value)) {
return false;
}
return true;
}
public function attachArgument($argument)
{
// not in use for this validator
}
}<file_sep>/src/Validators/Result.php
<?php
namespace Eximporter\Validators;
/**
* Class Result
* Contains resulting data after validation - which validators passed and which failed
*
* @package Eximporter\Validators
*/
class Result
{
private $passed_validators = [];
private $failed_validators = [];
/**
* Add passed validator
*
* @param $validator_title
*/
function addPassed($validator_title)
{
$this->passed_validators[] = $validator_title;
}
/**
* Add failed validator
*
* @param $validator_title
*/
function addFailed($validator_title)
{
$this->failed_validators[] = $validator_title;
}
/**
* Detect if result has failed validators
*
* @return bool
*/
public function isFailed()
{
return count($this->failed_validators) > 0;
}
/**
* Return failed validators
*
* @return array
*/
public function getFailed()
{
return $this->failed_validators;
}
/**
* Return passed validator
*
* @return array
*/
public function getPassed()
{
return $this->passed_validators;
}
}<file_sep>/src/Validators/Manager.php
<?php
namespace Eximporter\Validators;
use Eximporter\Exceptions\MissedValidator;
/**
* Class Manager
* Contains all validation logic, validates things
*
* @package Eximporter\Validators
*/
class Manager
{
// a list of available validators
private $validators = [];
// a list of rules for each column title
private $validation_rules = [];
function __construct()
{
// built in validators
$this->validators['required'] = new Required('required');
$this->validators['regexp'] = new Regexp('regexp');
}
/**
* Assign rules to field names
*
* Rules can be a string - will be exploded by | character to array
* Or it can be an array
* Each element can be a string - validator name or a closure (f($cell_value){ return bool; }) with custom validation logic
* You can address columns by it's title or by it's number from 0 to N or A to ZN
*
* @param $rules
*/
public function setRules($rules)
{
if (!is_array($rules)) {
throw new \InvalidArgumentException("Rules must be an array of field_title=>validators");
}
foreach($rules as $field=>$validators) {
if (is_string($validators)) {
$rules[$field] = explode("|", $validators);
}
}
$this->validation_rules = $rules;
}
/**
* Validate given $value
*
* @param string $title
* @param mixed $value
*
* @return Result
*/
public function validate($title, $value) {
// find a rule
$validators = $this->getValidatorsForTitle($title);
$result = $this->validateWith($value, $validators);
return $result;
}
/**
* Get all loaded validation rules for a given field title
*
* @param $title
* @return array
*/
private function getValidatorsForTitle($title) {
foreach($this->validation_rules as $field_title=>$validators) {
// always cast titles to lower case to be consistent
if(mb_strtolower($field_title) == mb_strtolower($title)) {
return $validators;
}
}
return [];
}
/**
* Validate value against given validator
*
* @param $value
* @param $validators
*
* @return Result
*/
private function validateWith($value, $validators) {
$result = new Result();
foreach($validators as $validator) {
$object = $this->resolveValidator($validator);
if($object->validate($value)) {
$result->addPassed($object->getName());
} else {
$result->addFailed($object->getName());
}
}
return $result;
}
/**
* Find implementation for validation rule
*
* @param $validator
*
* @return mixed
*/
private function resolveValidator($validator) {
// in case this is a callable
if(is_array($validator)) {
$callable_title = array_keys($validator)[0];
$callable = array_values($validator)[0];
if(!is_callable($callable)) {
throw new MissedValidator("Validator [".$callable_title."] was not resolved");
}
// this is a closure - call it
$object = new Closure($callable_title);
$object->setCallable($callable);
return $object;
}
// string can contain arguments like "regex:#[a-z]+#"
$items = explode(":",$validator);
$validator_name = $items[0];
$argument = isset($items[1])?$items[1]:null;
foreach($this->validators as $exist_validator_name=>$object) {
if($validator_name == $exist_validator_name) {
$validator_object = clone $object;
$validator_object->attachArgument($argument);
return $validator_object;
}
}
}
}<file_sep>/src/Validators/Regexp.php
<?php
namespace Eximporter\Validators;
/**
* Class Regexp
* Validates string against a pattern
*
* @package Eximporter\Validators
*/
class Regexp implements ValidatorInterface
{
protected $pattern = null;
private $name;
function __construct($name) {
$this->name = $name;
}
/**
* Get name of this validator
*
* @return mixed
*/
function getName() {
return $this->name;
}
public function validate($value)
{
if (is_null($this->pattern)) {
return false; // @todo probably we can throw an Exception here
}
return preg_match($this->pattern, $value);
}
/**
* Assign arguments for this validator
*
* @param $argument string
*/
public function attachArgument($argument)
{
$this->pattern = $argument;
}
}
|
0559aa0d8461beea14151a845281718cd939a657
|
[
"Markdown",
"PHP"
] | 13
|
Markdown
|
lezhnev74/Eximporter
|
a20b2f323c6d4a3c6e92cdab3d9cd55047675780
|
e2a25cdd125584e8b5cf5abf0153cfb0a9a1b147
|
refs/heads/master
|
<file_sep>//
// main.cpp
// apc-ex-02-4
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include <fstream>
#include <set>
#include <cerrno>
std::set<std::string> dictionary;
void readDictionary(const char *pPath);
void check(const char *pPath);
int main(int argc, const char * argv[])
{
if (argc < 3) {
std::cerr << "Too few arguments to start program." << std::endl;
std::cerr << "Usage: dictionary.txt text.txt" << std::endl;
exit(1);
}
// read dictonary from file
readDictionary(argv[1]);
check(argv[2]);
return 0;
}
// read Patterns
void readDictionary(const char *pPath) {
std::ifstream infile;
infile.open(pPath);
if (!infile.good()) {
std::cerr << "An error occured during file read. File: " << pPath << ", Error: " << strerror(errno) << std::endl;
infile.close();
exit(2);
}
while (!infile.eof()) {
std::string tmp;
std::string result;
infile >> tmp;
// remove puncuation
std::remove_copy_if( tmp.begin(),
tmp.end(),
std::back_inserter(result),
std::ptr_fun<int, int>(&std::ispunct));
dictionary.insert(result);
}
infile.close();
}
// check if word is stored in the set
void check(const char *pPath) {
std::ifstream infile;
infile.open(pPath);
if (!infile.good()) {
std::cerr << "An error occured during file read. File: " << pPath << ", Error: " << strerror(errno) << std::endl;
infile.close();
exit(2);
}
while (!infile.eof()) {
std::string tmp;
std::string result;
infile >> tmp;
// remove punctuation
std::remove_copy_if( tmp.begin(),
tmp.end(),
std::back_inserter(result),
std::ptr_fun<int, int>(&std::ispunct));
if (dictionary.find(result) == dictionary.end()) {
// word not found
std::cout << tmp << std::endl;
}
}
std::cout << "All words checked." << std::endl;
}
<file_sep>//
// Util.h
// apcpp-ex2
//
// Created by <NAME> on 07.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apcpp_ex2__Util__
#define __apcpp_ex2__Util__
int gcf(int a, int b); //ggT
#endif /* defined(__apcpp_ex2__Util__) */
<file_sep>//
// player.cpp
// apc-ex-04-5
//
// Created by <NAME> on 07.11.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "player.h"
template<typename F>
int player<F>::play(const F &pField) {
int column;
std::cout << "Type in a column where your stone should be placed: " << std::endl;
std::cin >> column;
return column;
}<file_sep>//
// Fraction.h
// Fraction
//
// Created by <NAME> on 19.09.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __Fraction__Fraction__
#define __Fraction__Fraction__
#include "Util.h"
#include <iostream>
class Fraction {
public:
Fraction(int pCounter=0, int pDenominator=1);
const int get_counter();
void set_counter(int cntr);
const int get_denominator();
void set_denominator(int pDenominator);
float to_float();
Fraction operator+(Fraction &pFraction);
Fraction operator-(Fraction &pFraction);
Fraction operator*(Fraction &pFraction);
Fraction operator/(Fraction &pFraction);
Fraction operator>(Fraction &pFraction);
Fraction operator<(Fraction &pFraction);
operator float() const;
friend std::ostream &operator<<(std::ostream &pOutput, const Fraction &pFraction);
friend std::istream &operator>>(std::istream &pInput, Fraction &pFraction);
void reduce();
void validateFraction(Fraction *pFraction);
private:
int counter;
int denominator;
};
#endif /* defined(__Fraction__Fraction__) */
<file_sep>//
// playfield.h
// apc-ex-06-3
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_3__playfield__
#define __apc_ex_06_3__playfield__
#include <iostream>
#endif /* defined(__apc_ex_06_3__playfield__) */
<file_sep>//
// main.cpp
// apc-ex-04-4
//
// Created by <NAME> on 31.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include <cmath>
#include "combineops_t.h"
struct division: public std::binary_function<double, double, double> {
public:
double operator()(const float &a, const float &b) {
return (a/b);
}
};
struct f1: public std::unary_function<double, double> {
public:
double operator()(const int &x) {
return (pow(x, 3));
}
};
struct f2: public std::unary_function<double, double> {
public:
double operator()(const int &x) {
return (pow(x+3, 2));
}
};
int main() {
// calculating a division of two functions:
// function f1: x^3
// function f2: (x+3)^2
// class combineops combines two unary functions (functions with only one argument).
// resulttype is a binary function with f1(x) and f2(x) as arguments.
// Op1: f1
// Op2: f2
// BinOp: f1/f2
division divOb;
f1 function1;
f2 function2;
combineops_t<division, f1, f2> result(divOb, function1, function2);
for (double d = 0.0; d<9; d+=2) {
std::cout << "(" << d << "^3) / ([" << d << "+3]^2), result: " << result(d) << std::endl;
}
return 0;
}<file_sep>//
// main.cpp
// apc-ex-03-1
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
class A
{
public:
virtual int foo() { return 1; }
};
class B : public A
{
public:
virtual int foo() { return 2; }
};
/*
Virtual means, that the behaviour can be overwritten in a child class.
The main function will return 2 and not 1.
If the compiler inline the function it will return 1 because the compiler
can only assume that the type of *a is A and not B.
So the compiler will not do it if he cannot assume safely the type of *a.
In this example the compiler may successfully and safely assume that
virtualization is not needed : This is really depending of the compiler and of optimization level.
In some case the compiler can safely assume that virtualization is not needed and
only in these cases the inline keyword makes sense.
Even if you declare a function with the keywords inline, the function may not be inlined.
Anyway adding manually the inline keyword is most of the time not a good idea,
compiler today are good and automatically inline function when necessary.
By adding inline you may in some case decrease performance, so it a good practice to not abuse of it
Stackoverflow (23.10.13): http://stackoverflow.com/questions/14122637/why-that-pure-virtual-function-cannot-be-inline
*/
int main(void)
{
B b;
A *a = &b;
std::cout << a->foo() << std::endl;
return 0;
}
<file_sep>//
// my_player.h
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_4__my_player__
#define __apc_ex_06_4__my_player__
#include <iostream>
#include "player.h"
#include "my_playfield.h"
class my_player : public player {
public:
int play(const playfield &field) { return 0; } // override the virtual function of super class player
int play(const my_playfield &field);
~my_player() {}
};
#endif /* defined(__apc_ex_06_4__my_player__) */
<file_sep>all: clean connect4
connect4: main.o game.o ai.o my_player.o my_playfield.o
g++ main.o game.o ai.o my_player.o my_playfield.o -o connect4
main.o: main.cpp
g++ -c main.cpp
game.o: game.cpp
g++ -c game.cpp
ai.o: ai.cpp
g++ -c ai.cpp
my_player.o: my_player.cpp
g++ -c my_player.cpp
my_playfield.o: my_playfield.cpp
g++ -c my_playfield.cpp
clean:
rm -rf *.o connect4<file_sep>//
// my_plafield.cpp
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "my_playfield.h"
my_playfield::my_playfield()
{
// initalize vec with 0's
std::vector<int> vec;
vec.assign(playfield::width, 0);
rep.assign(playfield::height, vec);
}
my_playfield::~my_playfield(){ }
int my_playfield::stoneat(int pColumn, int pRow) const
{
return rep[pRow][pColumn];
}
void my_playfield::drawPlayfield() {
for (int i=0; i<rep.size(); i++) {
std::cout << " | ";
for (int j=0; j<rep.at(i).size(); j++) {
if (rep.at(i).at(j) == 0) {
std::cout << " ";
} else {
std::cout << rep.at(i).at(j) << " ";
}
}
std::cout << "|" << std::endl;
}
std::cout << " -----------------" << std::endl;
std::cout << " | 0|1|2|3|4|5|6 |" << std::endl;
std::cout << " -----------------" << std::endl;
}
void my_playfield::insertStone(int pPlayer, int pColumn) {
// check range
if (pColumn > playfield::width-1) {
std::cerr << "Column is out of the playfield! Stone could not be inserted." << std::endl;
exit(EXIT_FAILURE);
}
// check wether the desired column is full or not
if (isFull(pColumn)) {
std::cerr << "This column is already full. Choose another one:" << std::endl;
int input;
std::cin >> input;
insertStone(pPlayer, input);
}
// start on the top of the playfield
int tmpPlace = 0;
// go through all rows
for (int i=0; i<rep.size(); i++) {
if (rep.at(i).at(pColumn) == 0) {
// set current row as free place
tmpPlace = i;
}
}
rep.at(tmpPlace).at(pColumn) = pPlayer;
}
bool my_playfield::isWinning(int pPlayer) {
// check horizontal places
for (int i=0; i<rep.size(); i++) { // height
for (int j=0; j<rep.at(i).size(); j++) { // width
// check horizontal places
if (j < rep.at(i).size()-3) {
if ((rep.at(i).at(j) == pPlayer)
&& (rep.at(i).at(j+1) == pPlayer)
&& (rep.at(i).at(j+2) == pPlayer)
&& (rep.at(i).at(j+3) == pPlayer)) {
// found 4 stones of pPlayer
return true;
}
}
// check vertical places
if (i < rep.size()-3) {
if ((rep.at(i).at(j) == pPlayer)
&& (rep.at(i+1).at(j) == pPlayer)
&& (rep.at(i+2).at(j) == pPlayer)
&& (rep.at(i+3).at(j) == pPlayer)) {
// found 4 stones of pPlayer
return true;
}
}
// check diagonal way, starting in top left and go to bottom rights
if ((i < rep.size()-3) && (j < rep.at(i).size()-3)) {
if ((rep.at(i).at(j) == pPlayer)
&& (rep.at(i+1).at(j+1) == pPlayer)
&& (rep.at(i+2).at(j+2) == pPlayer)
&& (rep.at(i+3).at(j+3) == pPlayer)) {
// found 4 stones of pPlayer
return true;
}
}
// check diagonal way, starting in top right and go to bottom left
if ((i < rep.size()-3) && (j < rep.at(i).size()-3)) {
if ((rep.at(i+3).at(j) == pPlayer)
&& (rep.at(i+2).at(j+1) == pPlayer)
&& (rep.at(i+1).at(j+2) == pPlayer)
&& (rep.at(i).at(j+3) == pPlayer)) {
// found 4 stones of pPlayer
return true;
}
}
}
}
// not winning
return false;
}
void my_playfield::clearPlayfield() {
for (int i=0; i<rep.size(); i++) {
for (int j=0; j<rep.size(); j++) {
rep.at(i).at(j) = 0;
}
}
}
bool my_playfield::isFull() {
bool isFull = true;
// going through the whole playfield
for (int i=0; i<rep.size(); i++) {
for (int j=0; j<rep.at(i).size(); j++) {
if (!isFull) {
return isFull;
}
if (rep.at(i).at(j) != 0) {
isFull = true;
} else {
isFull = false;
}
}
}
return isFull;
}
bool my_playfield::isFull(int pColumn) {
bool isFull = true;
// going through the whole column
for (int i=0; i<rep.size(); i++) {
if (!isFull) {
return isFull;
}
if (rep.at(i).at(pColumn) != 0) {
isFull = true;
} else {
isFull = false;
}
}
return isFull;
}<file_sep>//
// main.cpp
// apc-ex-02-3
//
// Created by <NAME> on 12.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include <vector>
#include <list>
#include <stdlib.h> /* atoi */
void printUsage();
void operate(const char* pOperation);
void printStack();
std::vector<float> stack;
std::string input;
int main(int argc, const char * argv[])
{
// rpn loop
while (input != "q") {
std::cout << "Type in your command:" << std::endl;
std::cin >> input;
if (input == "d") {
// erase number on the top of the stack
if (stack.size() > 0) {
stack.erase(stack.begin());
} else {
std::cout << "Your stack is already empty." << std::endl;
}
} else if(input == "n") {
// adding new number to stack
std::cout << "Type your number to add to the stack: " << std::endl;
float number;
// check input
if (std::cin >> number) {
stack.push_back(number);
} else {
std::cerr << "Type in a valid number next time." << std::endl;
std::cin.clear();
exit(EXIT_FAILURE); // shortcoming...
}
} else if (((input == "+") || (input == "-") || (input == "/") || (input == "*")) && (stack.size() > 1)) {
if (input == "+") {
// addition
std::vector<float>::iterator it = stack.end();
--it;
float last = *it;
stack.pop_back();
--it;
float second_last = *it;
stack.pop_back();
float res = last + second_last;
std::cout << "the result is: " << res << std::endl;
stack.push_back(res);
} else if (input == "-") {
// subtraction
std::vector<float>::iterator it = stack.end();
--it;
float last = *it;
stack.pop_back();
--it;
float second_last = *it;
stack.pop_back();
float res = second_last - last;
std::cout << "the result is: " << res << std::endl;
stack.push_back(res);
} else if (input == "*") {
// multipliaction
std::vector<float>::iterator it = stack.end();
--it;
float last = *it;
stack.pop_back();
--it;
float second_last = *it;
stack.pop_back();
float res = second_last * last;
std::cout << "the result is: " << res << std::endl;
stack.push_back(res);
} else if (input == "/") {
// division
std::vector<float>::iterator it = stack.end();
--it;
float last = *it;
stack.pop_back();
--it;
float second_last = *it;
stack.pop_back();
float res = second_last / last;
std::cout << "the result is: " << res << std::endl;
stack.push_back(res);
} else {
// command not specified
std::cout << "Your command is not available. Choose an athoer one." << std::endl;
std::cout << "Available commands are: +,-,*,/." << std::endl;
}
} else if (input != "+" || input != "-" || input != "*" || input != "/") {
// command not specified
if (input != "q") {
std::cout << "Your command is not available. Choose an athoer one." << std::endl;
std::cout << "Available commands are: q, n, d." << std::endl;
}
} else {
std::cerr << "Stack has only one element. Could not operate." << std::endl;
}
printStack();
}
return 0;
}
void printStack() {
std::vector<float>::iterator pbegin = stack.begin();
std::vector<float>::iterator pend = stack.end();
// shortcoming: view of numbers with different number of digits is a bit messy...
for (; pbegin != pend; ++pbegin) {
std::cout << "| " << *pbegin << " |" << std::endl;
}
}
<file_sep>//
// main.cpp
// apc-ex-04-5
//
// Created by <NAME> on 07.11.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "playfield.h"
#include "player.h"
#include "player.cpp"
#include "game.h"
#include "game.cpp"
int main(int argc, const char * argv[])
{
player<playfield> pl1;
player<playfield> pl2;
playfield field;
game<player<playfield>, player<playfield> > g(pl1, pl2);
g.run(field);
return 0;
}
<file_sep>//
// main.cpp
// apc-ex-02-3
//
// Created by <NAME> on 12.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "RPN.h"
int main(int argc, const char * argv[])
{
// Note: Xcode is using absolute paths to files. If you are not using Xcode you may have to change
// this path...
RPN rpn = RPN("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-03-3/apc-ex-03-3/stack.txt");
rpn.run();
return 0;
}
<file_sep>//
// PVector.h
// apc-ex-03-2
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_03_2__PVector__
#define __apc_ex_03_2__PVector__
#include <iostream>
#include <vector>
#include <fstream>
#include <cerrno>
template<class T> class PVector {
private:
std::string mFilename;
std::vector<T> mVector;
void readVector();
void writeVector();
public:
PVector(std::string pName) : mFilename(pName) {
readVector();
}
~PVector() {
writeVector();
}
void push_back(const T &pElement) {
mVector.push_back(pElement);
}
void pop_back() {
mVector.pop_back();
}
unsigned long size() {
return mVector.size();
}
T at(const int pPos) {
return mVector.at(pPos);
}
};
#endif /* defined(__apc_ex_03_2__PVector__) */
<file_sep>//
// RPN.h
// apc-ex-03-3
//
// Created by <NAME> on 28.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_03_3__RPN__
#define __apc_ex_03_3__RPN__
#include <iostream>
#include <vector>
#include <list>
#include <stdlib.h> /* atoi */
#include "math.h"
#include "PVector.h"
#include "PVector.cpp"
template<typename T> class RPN {
private:
std::string input_;
typename std::vector<T>::iterator end_;
T first_;
T second_;
T my_min_val;
PVector<T> stack_ = PVector<T>();
public:
RPN(std::string pFilename) {
stack_.setFilename(pFilename);
end_ = stack_.end();
};
~RPN() {};
void add();
void remove();
void compute_min();
void mymin(const T &a);
void addition();
void subtraction();
void mulitplication();
void division();
void printRes(const T pRes);
void printStack();
inline bool isEmpty() { return stack_.empty(); }
inline bool tooShort() { return (stack_.size() < 2); }
void run();
};
#endif /* defined(__apc_ex_03_3__RPN__) */
<file_sep>all: clean pvector
pvector: main.o PVector.o PSet.o
g++ main.o PVector.o PSet.o -o pvector
main.o: main.cpp
g++ -c main.cpp
PVector.o: PVector.cpp
g++ -c PVector.cpp
PSet.o: PSet.cpp
g++ -c PSet.cpp
clean:
rm -rf *.o pvector<file_sep>//
// PVector.h
// apc-ex-03-2
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_03_2__PVector__
#define __apc_ex_03_2__PVector__
#include <iostream>
#include <vector>
#include <fstream>
#include <cerrno>
#include "Fraction.h"
template<typename T>
struct persister {
static void read(std::ifstream &i, T &elem) {
i >> elem;
}
static void write(std::ofstream &o, const T &elem) {
o << elem << " ";
}
};
template<>
struct persister<std::string> {
static void read(std::ifstream &i, std::string &elem) {
std::getline(i, elem);
}
static void write(std::ofstream &o, const std::string &elem) {
o << elem;
}
};
template<>
struct persister<Fraction> {
static void read(std::ifstream &i, Fraction &elem) {
i >> elem;
}
static void write(std::ofstream &o, const Fraction &elem) {
o << elem;
}
};
template<typename T, typename P=persister<T> >
class PVector {
typedef P persister;
typedef typename std::vector<T>::iterator iterator;
private:
std::string mFilename;
std::vector<T> mVector;
void readVector();
void writeVector();
public:
PVector() {
mFilename = "";
}
PVector(std::string pName) : mFilename(pName) {
readVector();
}
~PVector() {
writeVector();
}
void push_back(const T &pElement) {
mVector.push_back(pElement);
}
void pop_back() {
mVector.pop_back();
}
unsigned long size() {
return mVector.size();
}
T at(const int pPos) {
return mVector.at(pPos);
}
void erase(typename std::vector<T>::iterator pPosition) {
mVector.erase(pPosition);
}
typename std::vector<T>::iterator begin() {
return mVector.begin();
}
typename std::vector<T>::iterator end() {
return mVector.end();
}
typename std::vector<T>::reverse_iterator rbegin() {
return mVector.rbegin();
}
typename std::vector<T>::reverse_iterator rend() {
return mVector.rend();
}
};
#endif /* defined(__apc_ex_03_2__PVector__) */
<file_sep>//
// main.cpp
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "my_player.h"
#include "game.h"
#include "game.cpp"
#include "ai.h"
int main(int argc, const char * argv[])
{
my_player pl1;
//my_player pl2;
ai pl2;
ai pl3;
my_playfield field;
game<my_player, ai> game(pl1, pl2);
//game<ai, ai> game(pl2, pl3);
game.run(field);
return 0;
}
<file_sep>//
// RPN.cpp
// apc-ex-03-3
//
// Created by <NAME> on 28.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "RPN.h"
using namespace std;
void RPN::run() {
while (input_ != "q") {
cout << "Type in your command:" << endl;
cin >> input_;
// erase the number on the top of the stack
if (input_ == "d") {
if (pvector_stack_.size() > 0) {
pvector_stack_.erase(--pvector_stack_.end());
} else {
cout << "Your stack is already empty" << endl;
}
} else if(input_ == "n") {
cout << "Type your number to add to the stack: " << endl;
float number;
cin >> number;
pvector_stack_.push_back(number);
} else if (input_ == "+" || input_ == "-" || input_ == "/" || input_ == "*") {
if (pvector_stack_.size() < 2) {
cout << "Your stack is already empty" << endl;
} else {
if (input_ == "+") {
std::vector<float>::iterator it = pvector_stack_.end();
--it;
float last = *it;
pvector_stack_.pop_back();
--it;
float second_last = *it;
pvector_stack_.pop_back();
float res = last + second_last;
cout << "the result is: " << res << endl;
pvector_stack_.push_back(res);
} else if (input_ == "-") {
std::vector<float>::iterator it = pvector_stack_.end();
--it;
float last = *it;
pvector_stack_.pop_back();
--it;
float second_last = *it;
pvector_stack_.pop_back();
float res = second_last - last;
cout << "the result is: " << res << endl;
pvector_stack_.push_back(res);
} else if (input_ == "*") {
std::vector<float>::iterator it = pvector_stack_.end();
--it;
float last = *it;
pvector_stack_.pop_back();
--it;
float second_last = *it;
pvector_stack_.pop_back();
float res = second_last * last;
cout << "the result is: " << res << endl;
pvector_stack_.push_back(res);
} else if (input_ == "/") {
std::vector<float>::iterator it = pvector_stack_.end();
--it;
float last = *it;
pvector_stack_.pop_back();
--it;
float second_last = *it;
pvector_stack_.pop_back();
float res = second_last / last;
cout << "the result is: " << res << endl;
pvector_stack_.push_back(res);
} else {
cout << "Your command is not available. Choose another one." << endl;
}
}
}
this->printStack();
}
}
void RPN::printStack() {
std::vector<float>::reverse_iterator pbegin = pvector_stack_.rbegin();
std::vector<float>::reverse_iterator pend = pvector_stack_.rend();
for (; pbegin != pend; ++pbegin) {
std::cout << "| " << *pbegin << " |" << std::endl;
}
std::cout << " ---" << std::endl;
}
<file_sep>//
// variant2.cpp
// apc-ex-05-1
//
// Created by <NAME> on 12/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "variant2.h"
<file_sep>//
// game.h
// apc-ex-04-5
//
// Created by <NAME> on 07.11.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_04_5__game__
#define __apc_ex_04_5__game__
#include <iostream>
#include "playfield.h"
#include "player.h"
template<typename P1, typename P2>
class game {
private:
playfield playField;
bool isRunning = false;
P1 player1;
P2 player2;
public:
game(P1 &pPlayer1, P2 &pPlayer2);
~game() {};
void run(playfield &pField);
};
#endif /* defined(__apc_ex_04_5__game__) */
<file_sep>all: clean exec
exec: fraction_exec
./fraction_exec
fraction_exec: fraction-test.o Fraction.o Util.o
g++ fraction-test.o Fraction.o Util.o -o fraction_exec
fraction-test.o: fraction-test.cpp
g++ -c fraction-test.cpp
Fraction.o: Fraction.cpp
g++ -c Fraction.cpp
Util.o: Util.cpp
g++ -c Util.cpp
clean:
rm -rf fraction-test.o Fraction.o Util.o fraction_exec<file_sep>//
// Fraction.cpp
// Fraction
//
// Created by <NAME> on 19.09.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "Fraction.h"
using namespace std;
Fraction::Fraction(int pCounter, int pDenominator) : counter(pCounter), denominator(pDenominator) {
validateFraction(this);
}
const int Fraction::get_counter() {
return counter;
}
void Fraction::set_counter(int cntr) {
counter = cntr;
}
const int Fraction::get_denominator() {
return denominator;
}
void Fraction::set_denominator(int pDenominator) {
denominator = pDenominator;
validateFraction(this);
}
float Fraction::to_float() {
return float(counter)/float(denominator);
}
Fraction Fraction::operator+(Fraction &pFraction) {
Fraction tmp;
// crosswise multiply
tmp.counter = (counter * pFraction.denominator) + (pFraction.counter * denominator);
tmp.denominator = (denominator * pFraction.denominator);
tmp.reduce();
return tmp;
}
Fraction Fraction::operator-(Fraction &pFraction) {
Fraction tmp;
// crosswise multiply
tmp.counter = (counter * pFraction.denominator) - (pFraction.counter * denominator);
tmp.denominator = (denominator * pFraction.denominator);
tmp.reduce();
return tmp;
}
Fraction Fraction::operator*(Fraction &pFraction) {
Fraction tmp;
// crosswise multiply
tmp.counter = (counter * pFraction.counter);
tmp.denominator = (denominator * pFraction.denominator);
tmp.reduce();
return tmp;
}
Fraction Fraction::operator/(Fraction &pFraction) {
Fraction tmp;
tmp.counter = (counter * pFraction.denominator);
tmp.denominator = (denominator * pFraction.counter);
tmp.reduce();
return tmp;
}
Fraction Fraction::operator>(Fraction &pFraction) {
if (this->to_float()>pFraction.to_float()) {
return pFraction;
}
return *this;
}
Fraction Fraction::operator<(Fraction &pFraction) {
if (this->to_float()<pFraction.to_float()) {
return *this;
}
return pFraction;
}
Fraction::operator float() const {
return float(counter)/float(denominator);
}
std::ostream &operator<<(std::ostream &pOutput, const Fraction &pFraction) {
pOutput << "(" << pFraction.counter << "/" << pFraction.denominator << ")";
return pOutput;
}
std::istream &operator>>(std::istream &pInput, Fraction &pFraction) {
char tmp0, tmp1, tmp2;
pInput >> tmp0;
pInput >> pFraction.counter;
pInput >> tmp1;
pInput >> pFraction.denominator;
pInput >> tmp2;
pFraction.reduce();
return pInput;
}
/*
* helper methods
*/
void Fraction::reduce() {
int i = counter;
// gcf makes no sense with negative numbers, no effects on gcf
if(i<0) {
i *= -1;
}
int j = denominator;
if(j<0) {
j *= -1;
}
int res = gcf(i, j);
// reduce fraction
counter /= res;
denominator /= res;
}
void Fraction::validateFraction(Fraction *pFraction) {
if(pFraction->get_denominator() == 0) {
throw "Divide by zero is not allowed according to general laws of math.";
}
}
<file_sep>//
// combineops_t.cpp
// apc-ex-04-4
//
// Created by <NAME> on 31.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "combineops_t.h"
<file_sep>//
// declaration-definition.cpp
// apc-ex-02-1
//
// Created by <NAME> on 13/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "declaration-definition.h"
int count = 1; // declaration and initialising of a new variable
char *name = "It's me."; // declaration and initalising of a new pointer. Compiler knows where to point at.
char *prog[] = {"echo", "hello", "world!", NULL }; // declaration and initalising of a new array. Compiler knows were to point at.
void swap(int &a, int &b) { // implementation of a method. Requires already defined integers as arguments
int c = a;
a = b;
b = c;
}
<file_sep>//
// PVector.cpp
// apc-ex-03-2
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "PVector.h"
template<typename T>
void PVector<T>::readVector() {
// Note: there must be a new line at the end of stack.txt to work correctly
mVector.clear();
std::ifstream infile(mFilename.c_str());
if (!infile.good()) {
std::cerr << "An error occured during file read. File: " << mFilename << " Error: " << strerror(errno) << std::endl;
std::cerr << "Note: If you aren't using Xcode you may have to change the filepaths in 'main.cpp'. Thats because Xcode is using absolute paths to files..." << std::endl;
exit(1);
}
for (;;) {
T x;
infile >> x;
if (!infile.good()) {
break;
}
mVector.push_back(x);
}
}
template<typename T>
void PVector<T>::writeVector() {
std::ofstream outfile(mFilename.c_str(), std::ios::trunc);
typename std::vector<T>::iterator first = mVector.begin();
typename std::vector<T>::iterator last = mVector.end();
std::cout << "------" << std::endl;
while (first != last) {
std::cout << "writing to file " << *first << std::endl;
outfile << *first++ << " ";
}
}
<file_sep>//
// PVector.cpp
// apc-ex-03-2
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "PVector.h"
template<typename T, typename P>
void PVector<T, P>::readVector() {
mVector.clear();
std::ifstream infile(mFilename.c_str());
if (!infile.good()) {
std::cerr << "An error occured during file read. File: " << mFilename << " Error: " << strerror(errno) << std::endl;
exit(1);
}
for (;;) {
T x;
persister::read(infile, x);
if (!infile.good()) {
break;
}
mVector.push_back(x);
}
}
template<typename T, typename P>
void PVector<T, P>::writeVector() {
std::ofstream outfile(mFilename.c_str(), std::ios::ate | std::ios::app);
iterator first = mVector.begin();
iterator last = mVector.end();
while (first != last) {
persister::write(outfile, *first++);
}
}
<file_sep>//
// declaration-definition.h
// apc-ex-02-1
//
// Created by <NAME> on 13/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_02_1__declaration_definition__
#define __apc_ex_02_1__declaration_definition__
#include <iostream>
char ch; // declaration of a new variable
string s; // declaration of a new variable
extern int error_number; // extern tells the compiler that the variable is defined somewhere else, so it doesn't complain about it being undefined
static double sq(double); // static method declaration: compiler knows about method name and argument type for linkage
const double pi = 3.5 // const variable
struct fraction { // declaration of a new struct
int c;
int d;
};
extern "C" void c_swap(int *a, int *b); // c++ is using method name and information about the arguments -> using c linkage of compiler
// -> functions can not be overloaded in C ->
// C++ compiler does not add argument/paramter type information to the name used for linkage
double sqrt(double); // compiler knows method name and argument type used for linkage
namespace NS { // declaring a integer variable only in the scope of the namespace NS
int a;
}
struct user; // declaring a new struct.
#endif /* defined(__apc_ex_02_1__declaration_definition__) */
<file_sep>//
// playfield.h
// apc-ex-04-5
//
// Created by <NAME> on 07.11.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_04_5__playfield__
#define __apc_ex_04_5__playfield__
#include <iostream>
#include <vector>
class playfield {
public:
// the size of the field
const static int width = 7;
const static int height = 6;
// these elements are used to indicate whether a given position
// in the playing field is taken by a given player
const static int none = 0;
const static int player1 = 1;
const static int player2 = 2;
playfield();
~playfield();
// return the stone (none/player1/player2) at the position(x,y)
// 0 <= x <= width
// 0 <= y <= height
// stoneat(0,0) ................ top left
// stoneat(width-1,height-1) ... bottom right
// if we insert a stone in a new game in column i,
// it lands at (i,height-1)
// implementation may be changed, interface not
int stoneat(int pColumn, int pRow) const;
// draws playfield to std::cout
void drawPlayfield();
// inserts a stone for player pPlayer at a desired column
void insertStone(int pPlayer, int pColumn);
// checks if player pPlayer is winning
bool isWinning(int pPlayer);
// clear playfield
void clearPlayfield();
// check if playfield is full
bool isFull();
// check if pColumn is full
bool isFull(int pColumn);
protected:
// the internal representation of the field.
std::vector<std::vector<int> > rep;
};
#endif /* defined(__apc_ex_04_5__playfield__) */
<file_sep>//
// PVector.h
// apc-ex-03-2
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_03_2__PVector__
#define __apc_ex_03_2__PVector__
#include <iostream>
#include <vector>
#include <fstream>
#include <cerrno>
template<class T> class PVector {
private:
std::string mFilename;
std::vector<T> mVector;
void readVector();
void writeVector();
public:
PVector() {
mFilename = "";
}
PVector(std::string pName) : mFilename(pName) {
readVector();
}
~PVector() {
writeVector();
}
void setFilename(std::string pFilename) {
mFilename = pFilename;
readVector();
}
void push_back(const T &pElement) {
mVector.push_back(pElement);
}
void pop_back() {
mVector.pop_back();
}
unsigned long size() {
return mVector.size();
}
T at(const int pPos) {
return mVector.at(pPos);
}
void erase(typename std::vector<T>::iterator pPosition) {
mVector.erase(pPosition);
}
typename std::vector<T>::iterator begin() {
return mVector.begin();
}
typename std::vector<T>::iterator end() {
return mVector.end();
}
typename std::vector<T>::reverse_iterator rbegin() {
return mVector.rbegin();
}
typename std::vector<T>::reverse_iterator rend() {
return mVector.rend();
}
};
#endif /* defined(__apc_ex_03_2__PVector__) */
<file_sep>//
// main.cpp
// apc-ex-03-4
//
// Created by <NAME> on 28.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "RPN.h"
#include "RPN.cpp"
int main(int argc, const char * argv[])
{
// int
RPN<int> rpn = RPN<int>("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-04-3/apc-ex-04-3/stack-int.txt");
rpn.run();
// Fraction
std::cout << "------------------------------------" << std::endl;
std::cout << "now you are using rpn with Fractions" << std::endl;
RPN<Fraction> rpn2 = RPN<Fraction>("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-04-3/apc-ex-04-3/stack.txt");
rpn2.run();
return 0;
}
<file_sep>all: clean dictionary
dictionary: main.o
g++ main.o -o dictionary
main.o: main.cpp
g++ -c main.cpp
clean:
rm -rf main.o dictionary<file_sep>//
// playfield.h
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_4__playfield__
#define __apc_ex_06_4__playfield__
class playfield {
public:
// the size of the field
const static int width=7;
const static int height=6;
// the elements stored at individual field positions
const static int none=0;
const static int player1=1;
const static int player2=2;
// returns the stone (none/player1/player2) at the position
// 0 <= x <= width
// 0 <= y <= height
// stoneat(0,0) ................ top left
// stoneat(width-1,height-1) ... bottom right
// if we insert a stone in a new game in column i,
// it lands at (i,height-1)
virtual int stoneat(int x, int y) const = 0;
virtual ~playfield() {}
};
#endif /* defined(__apc_ex_06_4__playfield__) */
<file_sep>//
// main.cpp
// apc-ex-03-2
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "PVector.h"
#include "PVector.cpp" // because of templates
int main(int argc, const char * argv[])
{
// Test Driver
// Note: Xcode is using absolute paths to files...
PVector<int> pvInt = PVector<int>("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-03-2/apc-ex-03-2/pv-int.txt");
pvInt.push_back(2);
pvInt.push_back(22222223);
PVector<float> pvFloat = PVector<float>("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-03-2/apc-ex-03-2/pv-float.txt");
pvFloat.push_back(4.5677);
pvFloat.push_back(3.1415);
PVector<std::string> pvString = PVector<std::string>("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-03-2/apc-ex-03-2/pv-string.txt");
pvString.push_back("Test1");
pvString.push_back("Test2");
return 0;
}
<file_sep>//
// Fraction.cpp
// Fraction
//
// Created by <NAME> on 19.09.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "Fraction.h"
Fraction::Fraction(int pCounter, int pDenominator) {
counter = pCounter;
denominator = pDenominator;
validateFraction(*this);
}
Fraction::Fraction() {
counter = 0;
denominator = 1;
}
int Fraction::getCounter() {
return counter;
}
void Fraction::setCounter(const int cntr) {
counter = cntr;
}
int Fraction::getDenominator() {
return denominator;
}
void Fraction::setDenominator(const int pDenominator) {
denominator = pDenominator;
// validating denominator
try {
validateFraction(*this);
} catch (const char *e) {
std::cerr << e << std::endl;
std::cerr << "Denominator will be set to 1." << std::endl;
denominator = 1;
}
}
Fraction Fraction::operator+(Fraction &pFraction) {
Fraction tmp;
// crosswise multiply
tmp.counter = (counter * pFraction.denominator) + (pFraction.counter * denominator);
tmp.denominator = (denominator * pFraction.denominator);
try {
tmp.reduce();
} catch (const char *e) {
std::cerr << e << std::endl;
}
return tmp;
}
Fraction Fraction::operator-(Fraction &pFraction) {
Fraction tmp;
// crosswise multiply
tmp.counter = (counter * pFraction.denominator) - (pFraction.counter * denominator);
tmp.denominator = (denominator * pFraction.denominator);
try {
tmp.reduce();
} catch (const char *e) {
std::cerr << e << std::endl;
}
return tmp;
}
Fraction Fraction::operator*(Fraction &pFraction) {
Fraction tmp;
// crosswise multiply
tmp.counter = (counter * pFraction.counter);
tmp.denominator = (denominator * pFraction.denominator);
try {
tmp.reduce();
} catch (const char *e) {
std::cerr << e << std::endl;
}
return tmp;
}
Fraction Fraction::operator/(Fraction &pFraction) {
Fraction tmp;
// validation of fraction in constructor and in method setDenominator()
tmp.counter = (counter * pFraction.denominator);
tmp.denominator = (denominator * pFraction.counter);
try {
tmp.reduce();
} catch (const char *e) {
std::cerr << e << std::endl;
}
return tmp;
}
/*
* helper methods
*/
// wheter a nor b should be 0
int Fraction::gcf(int a, int b) {
if (a<b) std::swap(a,b);
while (b!=0) {
a=a-b;
if (a<b) std::swap(a,b);
}
return a;
}
void Fraction::reduce() {
int i = counter;
// gcf makes no sense with negative numbers, no effects on gcf
if(i<0) {
i *= -1;
}
int j = denominator;
if(j<0) {
j *= -1;
}
int result = gcf(i, j);
if (result == 0) {
throw "Dvide by zero in reduce-method. Reduce not possible";
}
// reduce fraction
counter /= result;
denominator /= result;
}
void Fraction::validateFraction(Fraction &pFraction) {
if(pFraction.getDenominator() == 0) {
throw "Divide by zero in validateFraction-method.";
}
}
<file_sep>//
// main.cpp
// apc-ex-05-1
//
// Created by <NAME> on 10.11.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "variant1.h"
#include "variant2.h"
template<typename T>
void calculate(T &ob) {
ob.setWidth(5);
ob.setHeight(4);
std::cout << "area: " << ob.getHeight()*ob.getWidth() << std::endl;
}
int main(int argc, const char * argv[])
{
// rectangle inhertis from square
Square sq = Rectangle();
calculate(sq); // because a rectangle inhertis from a square, the result will be incorrect.
// square inhertis from rectangle
Rectangle2 r = Square2();
calculate(r); // result will be correct
return 0;
}
<file_sep>//
// PSet.h
// apc-ex-03-2
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_03_2__PSet__
#define __apc_ex_03_2__PSet__
#include <iostream>
#include <set>
#include <fstream>
#include <cerrno>
#include "Fraction.h"
template<typename T>
struct persisterTwo {
static void read(std::ifstream &i, T &elem) {
i >> elem;
}
static void write(std::ofstream &o, const T &elem) {
o << elem << " ";
}
};
template<>
struct persisterTwo<std::string> {
static void read(std::ifstream &i, std::string &elem) {
std::getline(i, elem);
}
static void write(std::ofstream &o, const std::string &elem) {
if (elem != "") {
o << elem << "\n";
}
}
};
template<>
struct persisterTwo<Fraction> {
static void read(std::ifstream &i, Fraction &elem) {
i >> elem;
}
static void write(std::ofstream &o, const Fraction &elem) {
o << elem << "\n";
}
};
template<typename T, typename P=persisterTwo<T> >
class PSet {
typedef P persisterTwo;
typedef typename std::set<T>::iterator iterator;
private:
std::string mFilename;
std::set<T> mSet;
void read_set();
void write_set();
public:
PSet() {
mFilename = "";
}
PSet(std::string pName) : mFilename(pName) {
read_set();
}
~PSet() {
write_set();
}
unsigned long size() {
return mSet.size();
}
T at(const int pPos) {
return mSet.at(pPos);
}
void insert(typename std::set<T>::iterator &pPosition, const T &elem) {
mSet.insert(pPosition, elem);
}
void insert(const T &elem) {
mSet.insert(elem);
}
void erase(typename std::set<T>::iterator pPosition) {
mSet.erase(pPosition);
}
typename std::set<T>::iterator begin() {
return mSet.begin();
}
typename std::set<T>::iterator end() {
return mSet.end();
}
typename std::set<T>::reverse_iterator rbegin() {
return mSet.rbegin();
}
typename std::set<T>::reverse_iterator rend() {
return mSet.rend();
}
};
#endif /* defined(__apc_ex_03_2__PSet__) */
<file_sep>//
// Util.h
// apcpp-ex2
//
// Created by <NAME> on 07.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apcpp_ex2__Util__
#define __apcpp_ex2__Util__
#include <iostream>
int gcf(int a, int b); //ggT
#endif /* defined(__apcpp_ex2__Util__) */
<file_sep>//
// player.h
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_4__player__
#define __apc_ex_06_4__player__
#include <iostream>
#include "playfield.h"
class player {
public:
virtual int play(const playfield &field) = 0;
virtual ~player() {}
};
#endif /* defined(__apc_ex_06_4__player__) */
<file_sep>//
// ai.cpp
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "ai.h"
int ai::play(const my_playfield &pField) {
my_playfield privateField = pField;
// either 1 (player1) or 2 (player2)
playerRole = determinePlayerRole(privateField);
if (playerRole == 2) {
// determine next move
return playNext(1, privateField);
}
// i am player 1 -> i play first
return playNext(2, privateField);
}
int ai::playNext(int enemyPlayer, my_playfield pField) {
// check enemy positions
// if enemy is winning, figure out where to play next stone
for (int x=0; x<pField.width; x++) {
my_playfield tmp = pField;
if (!tmp.isFull(x)) {
tmp.insertStone(enemyPlayer, x);
if (tmp.isWinning(enemyPlayer) && !isDiagonalAndLeftEmpty(tmp, x, enemyPlayer) && !isDiagonalAndRightEmpty(tmp, x, enemyPlayer)) {
return x;
}
}
}
// check own position for the next move
for (int x=0; x<pField.width; x++) {
my_playfield tmp = pField; // so no delete method is needed...
if (!tmp.isFull(x)) {
tmp.insertStone(playerRole, x);
if (tmp.isWinning(playerRole)) {
return x;
}
}
}
// if no direct move to win is found, check a move where i can win in the move after
for (int x=0; x<pField.width; x++) {
my_playfield tmp = pField;
if (!tmp.isFull(x)) {
tmp.insertStone(playerRole, x);
// check second move
// check turn after turn before
for (int x2=0; x2<pField.width; x2++) {
my_playfield tmp2 = tmp;
if (!tmp.isFull(x2)) {
tmp.insertStone(playerRole, x2);
if (tmp.isWinning(playerRole)) {
return x2;
}
}
}
}
}
// no good move found
// -> random move
bool isFull = false;
srand((int)time(NULL)); // initialise random with time as seed
int column = rand() % 7;
std::cout << "make randomly move..." << std::endl;
if (pField.isFull(column)) {
isFull = true;
}
while (isFull) {
std::cout << "while..." << std::endl;
column = rand() % 7;
if (!pField.isFull(column)) {
isFull = false;
}
}
return column;
}
bool ai::isDiagonalAndLeftEmpty(my_playfield pField, int pColumn, int pPlayer) {
// pColumn is the third stone in a diagonal way, so that to win, it is
// required to place a 4th stone in the column left to it
//
// example (the other way round)
// 1
// 1 2 X <- don't place it
// 1 2 2 2
if (pColumn-1 > 0 && pColumn < pField.width) {
// is in the boundary of the playfield
pField.insertStone(pPlayer, pColumn-1);
if (pField.isWinning(pPlayer)) {
// not an empty place left to pColumn
return false;
}
// is an empty place left to pColumn
return true;
}
return false;
}
bool ai::isDiagonalAndRightEmpty(my_playfield pField, int pColumn, int pPlayer) {
// pColumn is the third stone in a diagonal way, so that to win, it is
// required to place a 4th stone in the column left to it
//
// example
// 1
// 1 2 X <- don't place it
// 1 2 2 2
if (pColumn > 0 && pColumn+1 < pField.width) {
// is in the boundary of the playfield
pField.insertStone(pPlayer, pColumn+1);
if (pField.isWinning(pPlayer)) {
// not an empty place right to pColumn
return false;
}
// is an empty place right to pColumn
return true;
}
return false;
}
// returns 1 for player1, 2 for player2
int ai::determinePlayerRole(my_playfield pField) {
for (int x=0; x<pField.width; x++) {
for (int y=0; y<pField.height; y++) {
if (pField.stoneat(x, y) != pField.none) {
// playfield is not empty
// i am player 2
return 2;
}
}
}
return 1;
}
<file_sep>//
// my_plafield.h
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_4__my_plafield__
#define __apc_ex_06_4__my_plafield__
#include <iostream>
#include <vector>
#include "playfield.h"
class my_playfield : public playfield {
public:
my_playfield();
virtual ~my_playfield();
// return the stone (none/player1/player2) at the position(x,y)
// 0 <= x <= width
// 0 <= y <= height
// stoneat(0,0) ................ top left
// stoneat(width-1,height-1) ... bottom right
// if we insert a stone in a new game in column i,
// it lands at (i,height-1)
// implementation may be changed, interface not
virtual int stoneat(int pColumn, int pRow) const;
// draws playfield to std::cout
void drawPlayfield();
// inserts a stone for player pPlayer at a desired column
void insertStone(int pPlayer, int pColumn);
// checks if player pPlayer is winning
bool isWinning(int pPlayer);
// clear playfield
void clearPlayfield();
// check if playfield is full
bool isFull();
// check if pColumn is full
bool isFull(int pColumn);
protected:
// the internal representation of the field.
std::vector<std::vector<int> > rep;
};
#endif /* defined(__apc_ex_06_4__my_plafield__) */
<file_sep>//
// my_plafield.cpp
// apc-ex-06-3
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "my_plafield.h"
<file_sep>//
// player.h
// apc-ex-04-5
//
// Created by <NAME> on 07.11.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_04_5__player__
#define __apc_ex_04_5__player__
#include "playfield.h"
#include <iostream>
template<typename F>
class player {
public:
// returns the column where the player decides to throw in his
// stone
// F is the playfield which may be any playfield implementing
// the stoneat method, if you expect a different class because
// you need methods to verify whether the opponent can win,
// copy the field into the class that you expect.
int play(const F &field);
};
#endif /* defined(__apc_ex_04_5__player__) */
<file_sep>//
// RPN.cpp
// apc-ex-03-3
//
// Created by <NAME> on 28.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "RPN.h"
using namespace std;
template<typename T>
void RPN<T>::run() {
while (input_ != "q") {
cout << "Type in your command:" << endl;
cin >> input_;
try {
// erase the number on the top of the stack
if (input_ == "d") {
remove();
} else if(input_ == "n") {
add();
} else if (input_ == "m") {
compute_min();
} else if (input_ == "+" || input_ == "-" || input_ == "/" || input_ == "*") {
if (input_ == "+") {
addition();
} else if (input_ == "-") {
subtraction();
} else if (input_ == "*") {
mulitplication();
} else if (input_ == "/") {
division();
}
} else {
if (input_ != "q") {
cout << "Your command is not available. Choose another one." << endl;
}
}
this->printStack();
} catch (const char *&pException) {
cout << pException << endl;
}
}
}
template<typename T>
void RPN<T>::add() {
cout << "Type your number to add to the stack: " << endl;
T number;
cin >> number;
stack_.push_back(number);
end_ = --stack_.end();
}
//Note: the inline in the specialization method. It is required for the code not to have linker error due to the method being defined more then once.
template<>
inline void RPN<Fraction>::add() {
cout << "Type your number to add to the stack: " << endl;
cout << "Note: User (x/y) to iput your Fraction " << endl;
Fraction number;
cin >> number;
stack_.push_back(number);
end_ = --stack_.end();
}
template<typename T>
void RPN<T>::remove() {
if (stack_.size() > 0) {
stack_.pop_back();
end_ = stack_.end();
} else {
throw "Note: Your stack is already empty.";
}
}
template<typename T>
void RPN<T>::compute_min() {
if (isEmpty()) {
throw "Error: Stack is empty.";
}
if (tooShort()) {
throw "Error: Stack has only one element.";
}
end_ = stack_.end();
--end_;
first_ = *end_;
stack_.pop_back();
--end_;
second_ = *end_;
stack_.pop_back();
T res = mymin(first_, second_);
stack_.push_back(res);
++end_;
}
template<typename T>
T RPN<T>::mymin(T a, T b) {
return a<b?a:b;
}
template<typename T>
void RPN<T>::addition() {
if (isEmpty()) {
throw "Error: Stack is empty.";
}
if (tooShort()) {
throw "Error: Stack has only one element.";
}
end_= stack_.end();
--end_;
first_ = *end_;
stack_.pop_back();
--end_;
second_ = *end_;
stack_.pop_back();
T res = first_ + second_;
printRes(res);
stack_.push_back(res);
++end_;
}
template<typename T>
void RPN<T>::subtraction() {
if (isEmpty()) {
throw "Error: Stack is empty.";
}
if (tooShort()) {
throw "Error: Stack has only one element.";
}
end_= stack_.end();
--end_;
first_ = *end_;
stack_.pop_back();
--end_;
second_ = *end_;
stack_.pop_back();
T res = first_ - second_;
printRes(res);
stack_.push_back(res);
++end_;
}
template<typename T>
void RPN<T>::mulitplication() {
if (isEmpty()) {
throw "Error: Stack is empty.";
}
if (tooShort()) {
throw "Error: Stack has only one element.";
}
end_= stack_.end();
--end_;
first_ = *end_;
stack_.pop_back();
--end_;
second_ = *end_;
stack_.pop_back();
T res = first_ + second_;
printRes(res);
stack_.push_back(res);
++end_;
}
template<typename T>
void RPN<T>::division() {
if (isEmpty()) {
throw "Error: Stack is empty.";
}
if (tooShort()) {
throw "Error: Stack has only one element.";
}
if ((float)*end_ == 0.0f) {
throw "Error: Division by zero is not allowed...";
}
end_= stack_.end();
--end_;
first_ = *end_;
stack_.pop_back();
--end_;
second_ = *end_;
stack_.pop_back();
T res = first_ / second_;
cout << "the result is: " << res << endl;
stack_.push_back(res);
}
template<typename T>
void RPN<T>::arctan() {
if (isEmpty()) {
throw "Error: Stack is empty.";
}
if (tooShort()) {
throw "Error: Stack has only one element.";
}
end_= stack_.end();
--end_;
first_ = *end_;
stack_.pop_back();
--end_;
second_ = *end_;
stack_.pop_back();
T res = first_ + second_;
printRes(res);
stack_.push_back(res);
++end_;
}
template<typename T>
void RPN<T>::printRes(const T pRes) {
cout << "the result is: " << pRes << endl;
}
template<typename T>
void RPN<T>::printStack() {
typename std::vector<T>::reverse_iterator pbegin = stack_.rbegin();
typename std::vector<T>::reverse_iterator pend = stack_.rend();
if (pbegin == pend) {
// empty stack
std::cout << "| |" << std::endl;
std::cout << " ---" << std::endl;
return;
}
unsigned long width = 0;
for (;pbegin != pend; ++pbegin) {
unsigned long tmp = std::to_string(*pbegin).length();
if (tmp > width) {
width = tmp;
}
std::cout << "| " << *pbegin << " |" << std::endl;
}
std::cout << " ";
for (unsigned long i=0; i<width-1; i++) {
std::cout << "-";
}
std::cout << " " << std::endl;
}
<file_sep>//
// game.h
// apc-ex-04-5
//
// Created by <NAME> on 07.11.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_4__game__
#define __apc_ex_06_4__game__
#include <iostream>
#include "my_playfield.h"
#include "my_player.h"
template<typename P1, typename P2>
class game {
public:
game(P1 &pPlayer1, P2 &pPlayer2);
~game() {};
void run(my_playfield &pField);
private:
my_playfield playField;
bool isRunning = false;
P1 player1;
P2 player2;
};
#endif /* defined(__apc_ex_06_4__game__) */
<file_sep>//
// PVector.cpp
// apc-ex-03-2
//
// Created by <NAME> on 23.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "PSet.h"
template<typename T, typename P>
void PSet<T, P>::read_set() {
mSet.clear();
std::ifstream infile(mFilename.c_str());
if (!infile.good()) {
std::cerr << "An error occured during file read. File: " << mFilename << " Error: " << strerror(errno) << std::endl;
exit(1);
}
bool isGood = true;
for (;;) {
T x;
persisterTwo::read(infile, x);
if (!infile.good()) {
isGood = false;
}
if (!isGood) {
return;
}
mSet.insert(x);
}
}
template<typename T, typename P>
void PSet<T, P>::write_set() {
std::ofstream outfile(mFilename.c_str(), std::ios::ate | std::ios::app);
iterator first = mSet.begin();
iterator last = mSet.end();
while (first != last) {
persisterTwo::write(outfile, *first++);
}
}<file_sep>//
// my_player.cpp
// apc-ex-06-3
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "my_player.h"
<file_sep>//
// Util.cpp
// apcpp-ex2
//
// Created by <NAME> on 07.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "Util.h"
// wheter a nor b should be 0
int gcf(int a, int b) {
if (a<b) std::swap(a,b);
while (b!=0) {
a=a-b;
if (a<b) std::swap(a,b);
}
return a;
}<file_sep>//
// myIterator.cpp
// apc-ex-06-2
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "myIterator.h"
<file_sep>//
// my_player.h
// apc-ex-06-3
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_3__my_player__
#define __apc_ex_06_3__my_player__
#include <iostream>
#endif /* defined(__apc_ex_06_3__my_player__) */
<file_sep>//
// my_player.cpp
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "my_player.h"
int my_player::play(const my_playfield &pField) {
int column;
std::cout << "Type in a column where your stone should be placed: " << std::endl;
std::cin >> column;
return column;
}<file_sep>//
// main.cpp
// apc-ex-04-1
//
// Created by <NAME> on 30.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "PVector.h"
#include "PVector.cpp"
#include "PSet.h"
#include "PSet.cpp"
int main(int argc, const char * argv[])
{
// Vectors
PVector<int, persister<int> > pvInt = PVector<int, persister<int> >("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-04-1/apc-ex-04-1/tmp.txt");
pvInt.push_back(12);
pvInt.push_back(13);
PVector<std::string, persister<std::string> > pv = PVector<std::string, persister<std::string> >("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-04-1/apc-ex-04-1/tmp2.txt");
pv.push_back("Test1.");
pv.push_back("Test2.a Test2.b Test2.c");
PVector<Fraction, persister<Fraction> > pvFr = PVector<Fraction, persister<Fraction> >("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-04-1/apc-ex-04-1/tmp5.txt");
Fraction fr1 = Fraction();
fr1.set_counter(4);
fr1.set_denominator(5);
Fraction fr2 = Fraction();
fr2.set_counter(5);
fr2.set_denominator(7);
pvFr.push_back(fr1);
pvFr.push_back(fr2);
// Sets: unique Values
PSet<int, persisterTwo<int> > psInt = PSet<int, persisterTwo<int> >("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-04-1/apc-ex-04-1/tmp3.txt");
psInt.insert(2);
psInt.insert(4);
PSet<std::string, persisterTwo<std::string> > ps = PSet<std::string, persisterTwo<std::string> >("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-04-1/apc-ex-04-1/tmp4.txt");
std::string a = "Test1";
std::string b = "Test2.a Test2.b Test2.c";
ps.insert(a);
ps.insert(b);
PSet<Fraction, persisterTwo<Fraction> > psFr = PSet<Fraction, persisterTwo<Fraction> >("/Users/Raphael/Dropbox/uzh/uzh-13-fall/uzh-apc/uzh-apc-ex/apc-ex-04-1/apc-ex-04-1/tmp6.txt");
psFr.insert(fr1);
psFr.insert(fr2);
return 0;
}
<file_sep>//
// my_plafield.h
// apc-ex-06-3
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_3__my_plafield__
#define __apc_ex_06_3__my_plafield__
#include <iostream>
#endif /* defined(__apc_ex_06_3__my_plafield__) */
<file_sep>//
// ai.h
// apc-ex-06-4
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_4__ai__
#define __apc_ex_06_4__ai__
#include <iostream>
#include "player.h"
#include "my_playfield.h"
#include <ctime>
class ai : player {
public:
int play(const my_playfield &field);
int play(const playfield &field) { return 0; } // override the virtual function of super class player
private:
int determinePlayerRole(my_playfield pField);
int playNext(int enemyPlayer, my_playfield pField);
int playerRole = 0;
bool isDiagonalAndLeftEmpty(my_playfield pField, int pColumn, int pPlayer);
bool isDiagonalAndRightEmpty(my_playfield pField, int pColumn, int pPlayer);
};
#endif /* defined(__apc_ex_06_4__ai__) */
<file_sep>//
// game.cpp
// apc-ex-04-5
//
// Created by <NAME> on 07.11.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include "game.h"
template<typename P1, typename P2>
game<P1, P2>::game(P1 &pPlayer1, P2 &pPlayer2) {
player1 = pPlayer1;
player2 = pPlayer2;
isRunning = true;
}
template<typename P1, typename P2>
void game<P1, P2>::run(playfield &pField) {
std::cout << std::endl;
std::cout << "******************************" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << " STARTING GAME" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << std::endl;
while (isRunning) {
if (isRunning) {
// Player 1
std::cout << "Player1's turn:" << std::endl;
int column = player1.play(pField);
pField.insertStone(playfield::player1, column);
// checks
if(pField.isWinning(playfield::player1)) {
std::cout << std::endl;
std::cout << "******************************" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << " PLAYER 1 WINS" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << std::endl;
isRunning = false;
}
if (pField.isFull()) {
std::cout << "Playfield is full. Game ends." << std::endl;
isRunning = false;
}
pField.drawPlayfield();
}
if (isRunning) {
// Player 2
std::cout << "Player2's turn:" << std::endl;
int column = player2.play(pField);
pField.insertStone(playfield::player2, column);
// checks
if (pField.isWinning(playfield::player2)) {
std::cout << std::endl;
std::cout << "******************************" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << " PLAYER 2 WINS" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << std::endl;
isRunning = false;
}
if (pField.isFull()) {
std::cout << "Playfield is full. Game ends." << std::endl;
isRunning = false;
}
pField.drawPlayfield();
}
}
std::cout << std::endl;
std::cout << "******************************" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << " GAME ENDED" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << "******************************" << std::endl;
std::cout << std::endl;
}
<file_sep>uzh-apc
=======
assignments of course advanced programming in c++ at university of zurich
<file_sep>//
// main.cpp
// Fraction
//
// Created by <NAME> on 19.09.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#include <iostream>
#include "Fraction.h"
int main(int argc, const char * argv[])
{
Fraction f1(2,6);
Fraction f2(2,6);
// provocating an exception
try {
Fraction f7(1,0);
} catch (const char *pException) {
std::cerr << pException << std::endl;
}
// make some math...
Fraction f3, f4, f5, f6;
try {
f3 = f1*f2;
f4 = f1/f2;
f5 = f1+f2;
f6 = f1-f2;
} catch (const char *pExeption) {
std::cerr << pExeption << std::endl;
}
std::cout << "original fractions: " << f1.getCounter() << ":" << f1.getDenominator() << " and " << f2.getCounter() << ":" << f2.getDenominator() << std::endl;
std::cout << "*: " << f3.getCounter() << ":" << f3.getDenominator() << std::endl;
std::cout << "/: " << f4.getCounter() << ":" << f4.getDenominator() << std::endl;
std::cout << "+: " << f5.getCounter() << ":" << f5.getDenominator() << std::endl;
std::cout << "-: " << f6.getCounter() << ":" << f6.getDenominator() << std::endl;
return 0;
}
<file_sep>//
// Fraction.h
// Fraction
//
// Created by <NAME> on 19.09.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __Fraction__Fraction__
#define __Fraction__Fraction__
#include <iostream>
class Fraction {
public:
Fraction(int pCounter, int pDenominator);
Fraction();
int getCounter();
void setCounter(const int cntr);
int getDenominator();
void setDenominator(const int pDenominator);
Fraction operator+(Fraction &pFraction);
Fraction operator-(Fraction &pFraction);
Fraction operator*(Fraction &pFraction);
Fraction operator/(Fraction &pFraction);
void reduce();
int gcf(int a, int b); //ggT
void validateFraction(Fraction &pFraction);
private:
int counter;
int denominator;
};
#endif /* defined(__Fraction__Fraction__) */
<file_sep>//
// variant2.h
// apc-ex-05-1
//
// Created by <NAME> on 12/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_05_1__variant2__
#define __apc_ex_05_1__variant2__
#include <iostream>
class Rectangle2 {
protected:
int width;
int height;
public:
virtual void setWidth(int pWidth) {
width = pWidth;
}
virtual void setHeight(int pHeight) {
height = pHeight;
}
int getHeight() {
return height;
}
int getWidth() {
return width;
}
};
class Square2 : public Rectangle2 {
public:
void setWidth(int pWidth) {
width = pWidth;
height = pWidth;
}
void setHeight(int pHeight) {
width = pHeight;
height = pHeight;
}
};
#endif /* defined(__apc_ex_05_1__variant2__) */
<file_sep>//
// combineops_t.h
// apc-ex-04-4
//
// Created by <NAME> on 31.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_04_4__combineops_t__
#define __apc_ex_04_4__combineops_t__
#include <iostream>
/*
* Mit der Klasse combineops_t kann man auf einer variable zuerst
* zwei verschiedene unäre Funktionen ausgeführt werden, und
* danach mit dem ergebnis dieser beiden unären Funktionen
* eine binäre operation ausgeführt werden.
*/
template<typename BinOp, typename Op1, typename Op2>
class combineops_t: public std::unary_function<typename Op1::argument_type, typename BinOp::result_type> {
protected:
BinOp o;
Op1 o1;
Op2 o2;
public:
combineops_t(BinOp binop, Op1 op1, Op2 op2) :
o(binop), o1(op1), o2(op2) {
}
typename BinOp::result_type operator()(const typename Op1::argument_type &x) {
return o(o1(x), o2(x));
}
};
#endif /* defined(__apc_ex_04_4__combineops_t__) */
<file_sep>//
// myIterator.h
// apc-ex-06-2
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_2__myIterator__
#define __apc_ex_06_2__myIterator__
#include <iostream>
#endif /* defined(__apc_ex_06_2__myIterator__) */
<file_sep>//
// player.h
// apc-ex-06-3
//
// Created by <NAME> on 28/11/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_06_3__player__
#define __apc_ex_06_3__player__
#include <iostream>
#endif /* defined(__apc_ex_06_3__player__) */
<file_sep>//
// RPN.h
// apc-ex-03-3
//
// Created by <NAME> on 28.10.13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#ifndef __apc_ex_03_3__RPN__
#define __apc_ex_03_3__RPN__
#include <iostream>
#include <vector>
#include <list>
#include <stdlib.h> /* atoi */
#include "PVector.cpp"
#include "PVector.h"
class RPN {
private:
std::string input_;
PVector<float> pvector_stack_ = PVector<float>();
public:
RPN(std::string pFilename) {
pvector_stack_.setFilename(pFilename);
};
~RPN() {};
void printUsage();
void operate(const char* pOperation);
void printStack();
void run();
};
#endif /* defined(__apc_ex_03_3__RPN__) */
|
81e47c96d9a6952f1e94a60968bac72822f6dba6
|
[
"Markdown",
"C",
"Makefile",
"C++"
] | 63
|
C++
|
rmatil/uzh-apc
|
44dd3bb6bd308e80a029e95f998e60e87d978adf
|
129a4b41d32bb8ece367ebbcc81b371185946229
|
refs/heads/master
|
<repo_name>vishu160196/Registration<file_sep>/README.md
# Registration
Registration page with Node server and MongoDB backend. It checks bots based on IP address using Google RECAPTCHA
### Steps to run locally
Prerequisistes: Nodejs and NPM
1. Download and install MongoDB from here https://www.mongodb.com/download-center/community
2. `cd` to `bin` directory in installation folder
3. Run `mongod`
4. Clone this repo
5. `cd` to the repo, run `npm install`
6. Run `node server.js`
7. Head to `localhost` in browser on machine running the script, or use the local IP of this machine to access page on another machine's broswer
<file_sep>/js/script.js
$(document).ready(function () {
jQuery.validator.addMethod("password", function(value, element) {
var result = this.optional(element) || value.length >= 6 && /\d/.test(value) && /[a-z]/i.test(value);
return result;
}, "Your password must be at least 6 characters long and contain at least one number and one character.");
$('#registration-form').validate({
rules:{
name: {
required: true,
minlength: 2
},
email: {
required: true,
email: true
}
},
messages: {
name: {
required: "Please enter a name",
minlength: "Name should be at least 2 characters long"
}
}
});
// $('#registration-form').submit(function() {
// if($(this).valid()){
// $(this).ajaxSubmit({
// error: function(xhr) {
// console.log('Error: ' + xhr.status);
// },
// success: function(response) {
// console.log(response);
// }
// });
// }
// //Very important line, it disable the page refresh.
// return true;
// });
});<file_sep>/server.js
const express = require('express');
const bodyParser = require('body-parser');
const app = express();
const request = require('request');
app.use(bodyParser.urlencoded({extended: true}));
const port = 80;
const connection = require('./models');
const mongoose = require('mongoose');
const User = mongoose.model('User');
const requestCount = new Map();
app.set('view engine', 'pug');
setTimeout(()=>{
requestCount.clear();
}, 24*3600000);
app.get('/', (req, res) =>{
let recaptcha = false;
if(requestCount.get(req.connection.remoteAddress) >= 3)
recaptcha = true;
res.render('registration', {'recaptcha' : recaptcha});
});
app.get('/css/styles.css', (req, res) =>{
res.sendFile(__dirname + '/css/styles.css');
});
app.get('/js/script.js', (req, res) =>{
res.sendFile(__dirname + '/js/script.js');
});
function saveUserAndRespond(name, email, password, res){
var user = new User({
name: name,
email: email,
password: <PASSWORD>
});
user.save((err, user)=>{
if(err){
return res.json({'message': 'dberror saving user'});
}
return res.json({"responseCode" : 0,"responseDesc" : "Success", "message" : "registered successfully"});
});
}
app.post('/submit', (req, res) =>{
let ip = req.connection.remoteAddress;
if(requestCount.get(ip) === undefined){
requestCount.set(ip, 0);
}
let count = requestCount.get(ip);
let verifyCaptcha = count >= 3;
count = count < 4 ? count+1 : count;
requestCount.set(ip, count);
if(verifyCaptcha){
if(req.body['g-recaptcha-response'] === undefined || req.body['g-recaptcha-response'] === '' || req.body['g-recaptcha-response'] === null) {
return res.json({"responseCode" : 1,"responseDesc" : "Please select captcha"});
}
// Put your secret key here.
var secretKey = "<KEY>";
// req.connection.remoteAddress will provide IP address of connected user.
var verificationUrl = "https://www.google.com/recaptcha/api/siteverify?secret=" + secretKey + "&response=" + req.body['g-recaptcha-response'] + "&remoteip=" + req.connection.remoteAddress;
// Hitting GET request to the URL, Google will respond with success or error scenario.
request(verificationUrl,function(error,response,body) {
body = JSON.parse(body);
// Success will be true or false depending upon captcha validation.
if(body.success !== undefined && !body.success) {
return res.json({"responseCode" : 1,"responseDesc" : "Failed captcha verification"});
}
saveUserAndRespond(req.body.name, req.body.email, req.body.password, res);
});
}else{
saveUserAndRespond(req.body.name, req.body.email, req.body.password, res);
}
});
app.listen(port, () => console.log(`Example app listening at http://localhost:${port}`))
|
4140406eb0e4c51cb6acb01918db45076ad8cb6e
|
[
"Markdown",
"JavaScript"
] | 3
|
Markdown
|
vishu160196/Registration
|
1edce220e8bb7456f57161ca6fcdfa14b254ad4f
|
31cf669512d7346717c7a7718c89fb6a36843b1b
|
refs/heads/master
|
<file_sep>import { combineReducers } from "redux";
import loginReducer from './loginReducer'
import fetchReducer from './fetchReducer'
export default combineReducers({
loginReducer,
fetchReducer
});
<file_sep>import React, { Component } from 'react'
import { Text, View } from 'react-native'
import { MyButton, MyText } from './components/DefaultComponent'
export class Register extends Component {
render() {
return (
<View style={{ flex: 1, justifyContent: 'center', alignItems: 'center', backgroundColor: '#222' }}>
<MyText> Register </MyText>
<MyButton onPress={() => this.props.navigation.pop()}>
<MyText>Go back</MyText>
</MyButton>
</View>
)
}
}
export default Register<file_sep>import { fetchData } from './apiFetching'
export const setAction = (actionName = '', payload = []) => {
return {
type: actionName,
payload: payload
};
};
export const setLogin = (data) => {
return (dispatch) => {
dispatch(setAction("LOGIN_LOADING"));
getUserProfile(data)
.then(result => {
dispatch(setAction("LOGIN_SUCCESS", result));
})
.catch(error => {
dispatch(setAction("LOGIN_FAILURE"))
})
}
}
export const getFetchData = () => {
return (dispatch) => {
dispatch(setAction("FETCH_LOADING"));
fetchData()
.then(result => {
dispatch(setAction("FETCH_SUCCESS", result));
})
.catch(error => {
dispatch(setAction("FETCH_FAILURE"))
})
}
}
export const updateUser = (data) => {
return (dispatch) => {
dispatch(setAction("LOGIN_UPDATE", data));
}
}
export const getUserProfile = (data) => {
return new Promise((resolve, reject) => {
setTimeout(() => {
return resolve(getUserProfileApi(data))
}, 0)
});
};
function getUserProfileApi(data) {
if (data.username == "Auchaphon" && data.password == "<PASSWORD>") {
return { username: "Auchaphon", email: '<EMAIL>', telephone: "0982856576" }
} else {
return "";
}
}
<file_sep>export const fetchData = () => {
return new Promise((resolve, reject) => {
setTimeout(() => {
return resolve(fetchDataApi())
}, 2000)
});
}
async function fetchDataApi() {
let url = "https://api.github.com/repositories"
const result = fetch(url, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
}
}).then(function (response) {
return response.json()
}).then(function (responeJson) {
return responeJson
})
.catch((error) => {
return error
});
return result
}<file_sep>import React, { Component } from 'react'
import { Text, View } from 'react-native'
import { Container, Header, Left, Body, Right,Icon } from 'native-base';
import { MyText } from './components/DefaultComponent'
export class HomeBase extends Component {
render() {
return (
<Container>
<Header>
<Left>
</Left>
<Body>
<MyText style={{color:'#000'}}>HomeBase</MyText>
</Body>
<Right>
</Right>
</Header>
</Container>
)
}
}
export default HomeBase<file_sep>import React, { Component } from 'react'
import { Text, View, TextInput } from 'react-native'
import { MyButton, MyText } from './components/DefaultComponent'
import { connect } from 'react-redux';
import { updateUser } from './redux/actions'
import Lib from './lib/util'
const mapStateToProps = (state) => {
return (
{
loginReducer: state.loginReducer
}
)
};
const mapDispatchToProps = {
// updateUser
};
export class EditProfile extends Component {
constructor(props) {
super(props)
this.state = {
telephone: this.props.loginReducer.data.telephone,
email: this.props.loginReducer.data.email,
username: this.props.loginReducer.data.username
}
}
submit = () => {
let data = {
telephone: this.state.telephone,
email: this.state.email,
username: this.props.loginReducer.data.username
}
this.props.updateUser(data);
}
componentWillReceiveProps(nextProps) {
if (this.props.loginReducer !== nextProps.loginReducer) {
this.props.navigation.pop();
}
}
render() {
return (
<View style={{ flex: 1, justifyContent: 'center', alignItems: 'center', backgroundColor: '#222' }}>
<MyText style={{ fontWeight: 'bold', fontSize: 18 }}>{`Username : ${this.state.username}`}</MyText>
<View style={{ height: Lib.point(10) }} />
<TextInput
onChangeText={(text) => this.setState({ telephone: text })}
style={styles.textInput} value={this.state.telephone}></TextInput>
<View style={{ height: Lib.point(10) }} />
<TextInput
onChangeText={(text) => this.setState({ email: text })}
style={styles.textInput} value={this.state.email}></TextInput>
<View style={{ height: Lib.point(10) }} />
<View style={{ flexDirection: 'row' }}>
<MyButton style={{ marginRight: Lib.point(10) }} onPress={() => this.props.navigation.pop()}>
<MyText>Go back</MyText>
</MyButton>
<MyButton onPress={() => this.submit()}>
<MyText>Submit</MyText>
</MyButton>
</View>
</View>
)
}
}
export default connect(mapStateToProps, mapDispatchToProps)(EditProfile);
const styles = {
textInput: {
width: '80%', height: 50,
backgroundColor: '#333',
borderColor: 'lawngreen',
borderWidth: 1,
color: '#fff',
padding: 4,
borderRadius: 10
}
}
<file_sep>import React, { Component } from 'react';
import { createStackNavigator, createSwitchNavigator } from 'react-navigation';
import { Provider } from "react-redux";
import store from './src/redux/configureStore'
import Login from './src/Login';
import Home from './src/Home';
import Register from './src/Register'
import EditProfile from './src/EditProfile'
import Fetching from './src/Fetching-NoRedux';
import FetchingRedux from './src/Fetching-Redux';
import HomeBase from './src/HomeBase'
// import Login from './src/screens/Login';
console.disableYellowBox = true;
const ScreenStackNavigator = createStackNavigator({
Login: Login,
Home: Home,
FetchingRedux: FetchingRedux,
HomeBase: HomeBase
},
{
headerMode: "none",
initialRouteName: "HomeBase",
});
const AppStackNavigator = createStackNavigator({
MainPage: ScreenStackNavigator,
Register: Register,
EditProfile: EditProfile
}, {
mode: 'modal',
headerMode: 'none',
});
class App extends Component {
render() {
return (
<Provider store={store}>
<AppStackNavigator />
</Provider>
);
}
}
export default App;
|
e1205fa4c35502282b6eb9b897b1c6729abd638c
|
[
"JavaScript"
] | 7
|
JavaScript
|
auchaphon/workshop_120961
|
a1016b05e5c024baec00ffe04c7ffb80b00c58dc
|
2c07341b7cf99261398ef32b58432910639e19d3
|
refs/heads/master
|
<repo_name>diogogomeswww/Getting-and-Cleaning-Data-Course-Project<file_sep>/CodeBook.md
## Description
Additional information about the variables, data and transformations used in the course project for the Johns Hopkins Getting and Cleaning Data course.
### Source Data
A full description of the data used in this project can be found at [The UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones)
[The source data for this project can be found here.](https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip)
#### Source Data: Description
The experiments have been carried out with a group of 30 volunteers within an age bracket of 19-48 years.
Each person performed six activities (WALKING, WALKING_UPSTAIRS, WALKING_DOWNSTAIRS, SITTING, STANDING, LAYING) wearing a smartphone (Samsung Galaxy S II) on the waist.
Using its embedded accelerometer and gyroscope, we captured 3-axial linear acceleration and 3-axial angular velocity at a constant rate of 50Hz.
The experiments have been video-recorded to label the data manually.
The obtained dataset has been randomly partitioned into two sets, where 70% of the volunteers was selected for generating the training data and 30% the test data.
The sensor signals (accelerometer and gyroscope) were pre-processed by applying noise filters and then sampled in fixed-width sliding windows of 2.56 sec and 50% overlap (128 readings/window). The sensor acceleration signal, which has gravitational and body motion components, was separated using a Butterworth low-pass filter into body acceleration and gravity. The gravitational force is assumed to have only low frequency components, therefore a filter with 0.3 Hz cutoff frequency was used. From each window, a vector of features was obtained by calculating variables from the time and frequency domain.
#### Source Data: Attribute Information
For each record in the dataset it is provided:
- Triaxial acceleration from the accelerometer (total acceleration) and the estimated body acceleration.
- Triaxial Angular velocity from the gyroscope.
- A 561-feature vector with time and frequency domain variables.
- Its activity label.
- An identifier of the subject who carried out the experiment.
### Getting and Cleaning Source Data
#### 1. Merge the training and the test sets to create one data set.
After setting the source directory for the files, read into tables the data located in
- features.txt
- activity_labels.txt
- subject_train.txt
- x_train.txt
- y_train.txt
- subject_test.txt
- x_test.txt
- y_test.txt
Assign column names and merge "train"" and "tests" sets to create one data set.
#### 2. Extract only the measurements on the mean and standard deviation for each measurement.
Create a logical vector that contains TRUE values for the ID, mean and stdev columns and FALSE values for the others.
Subset this data to keep only the necessary columns.
#### 3. Use descriptive activity names to name the activities in the data set
Convert the activity_id to activity_lavel using the activity_labels.txt
#### 4. Appropriately label the data set with descriptive activity names.
* Removing special characters _ () -
* convert to lower case
* full name instead of abbreviation
* ...
Made a 2-3 helper functions [helper_functions.R](helper_functions.R) to make multiple find and replace on the column names.
Example: "tBodyAcc-mean()-X" was converted to "time body accelerometer mean x"
#### 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject.
Per the project instructions, we need to produce only a data set with the average of each veriable for each activity and subject
So the final tinydata table has:
* 180 rows: 30 subjects * 6 activities
* 68 columns:
+ 1st column: subject's ID
+ 2nd column: activity label
+ remaining columns: mean of each measurement (column name) by each subject and each activity
<file_sep>/README.md
## Coursera - Getting and Cleaning - Course Project
Repo for the submission of the course project for the Johns Hopkins Getting and Cleaning Data course.
### Overview
This project serves to demonstrate the collection and cleaning of a tidy data set that can be used for subsequent
analysis.
A full description of the data used in this project can be found at [The UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones)
[The source data for this project can be found here.](https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip)
### Using this script
You should download and unzip the file on the same folder as run_analysis.R.
So there will be a folder named: "UCI HAR Dataset" on project folder.
You can then run the script run_analysis.R to produce the output data: tinydata object and tinydata.txt file.
### Project Summary
The following is a summary description of the project instructions
You should create one R script called run_analysis.R that does the following.
1. Merges the training and the test sets to create one data set.
2. Extracts only the measurements on the mean and standard deviation for each measurement.
3. Uses descriptive activity names to name the activities in the data set
4. Appropriately labels the data set with descriptive activity names.
5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
### Additional Information
You can find additional information about the variables, data and transformations in the [CodeBook.MD](https://github.com/diogogomeswww/Getting-and-Cleaning-Data-Course-Project/blob/master/CodeBook.md) file.
<file_sep>/helper_functions.R
multi.gsub = function(pattern, replacement, x, ...) {
if (length(replacement) == 1) {
replacement = rep(replacement,length(pattern))
}
result <- x
for (i in 1:length(pattern)) {
result <- gsub(pattern[i], replacement[i], result, ...)
}
result
}
multi.sub = function(pattern, replacement, x, ...) {
if (length(replacement) == 1) {
replacement = rep(replacement,length(pattern))
}
result <- x
for (i in 1:length(pattern)) {
result <- sub(pattern[i], replacement[i], result, ...)
}
result
}
multi.replace.first.letter = function(letters, replacement, x) {
result <- x
for (i in 1:length(letters)) {
result = replace.first.letter(letters[i], replacement[i], result)
}
return(result)
}
replace.first.letter = function(letter, replacement, x) {
#x indexes that start with the letter
index = substr(x, 1, 1) == letter
x[index] = sub(letter, replacement, x[index], fixed = TRUE)
return(x)
}
<file_sep>/run_analysis.R
setwd("~/Downloads/Estudo/Coursera.org/Getting and Cleaning Data/#meu/CourseProject")
source("helper_functions.R")
library(stringr)
library(dplyr)
#
if( !exists("set") )
{
# Load Training and Test Sets into one table
set = read.table('UCI HAR Dataset/train/X_train.txt', stringsAsFactors=FALSE)
#adding to existing table to save memory
set = rbind(set, read.table('UCI HAR Dataset/test/X_test.txt'))
}
#load features.txt
#this will give us the column names of each measurement
if( !exists("features") )
{
features = read.table('UCI HAR Dataset/features.txt')
}
##################
#2 Extracts only the measurements on the mean and standard deviation for each measurement
#get all features mean and std
#finding by search in the name
meanstd.features.colindex = sort(c(
grep("mean()",features[,2],fixed = TRUE),
grep("std()",features[,2],fixed = TRUE)
))
meanstd.features.colnames = as.vector( features[meanstd.features.colindex,2] )
data = set[,meanstd.features.colindex]
##################
#3 Uses descriptive activity names to name the activities in the data set
#Change text
#
#to lowercase
meanstd.features.colnames =tolower(meanstd.features.colnames)
#change t and f to time and frequency
meanstd.features.colnames = multi.replace.first.letter(c("t","f"),c("time ","frequency "),meanstd.features.colnames)
#remove ()- and -
meanstd.features.colnames = multi.gsub(c("()","-"),c(""," "),meanstd.features.colnames, fixed = TRUE)
#remove separate words with a space: body, acc, gyro, jerk
meanstd.features.colnames = multi.gsub(
c("body","acc","gyro","jerk","gravity","mag"),
c("body ","accelerometer ","gyroscope ","jerk ","gravity ","magnitude "),
meanstd.features.colnames, fixed = TRUE
)
#remove duplicate spaces
meanstd.features.colnames = gsub("^ *|(?<= ) | *$", "", meanstd.features.colnames, perl=T)
#Assing this to the colnames of data
colnames(data) = meanstd.features.colnames
#order columns by names so it groups each measure mean and std
data = data[,order(names(data))]
##################
# 4 Appropriately labels the data set with descriptive variable names.
# activity id
# add the column with subject id in the order that we pull the information: train + test
activity_ids = c(
readLines('UCI HAR Dataset/train/y_train.txt'),
readLines('UCI HAR Dataset/test/y_test.txt')
)
data = cbind(activity = activity_ids, data)
# subject id
# add the column with subject id in the order that we pull the information: train + test
subject_ids = c(
readLines('UCI HAR Dataset/train/subject_train.txt'),
readLines('UCI HAR Dataset/test/subject_test.txt')
)
data = cbind(subject = subject_ids, data)
#convert columns to numeric
data = transform(data, subject = as.numeric(subject), activity = as.numeric(activity))
#order table by subject + activity
data = arrange(data, subject, activity)
#name the activity instead of a number
activity_label = read.table('UCI HAR Dataset/activity_labels.txt')
data[,"activity"] = activity_label[data[,"activity"],2]
##################
# 5 From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tinydata = data %>%
group_by(subject, activity) %>%
summarise_each(funs(mean(.)))
#remove . from columns names
names(tinydata) = gsub("\\.", " ", names(tinydata))
#order columns by names so it groups each measure mean and std
#tinydata = tinydata[,order(names(tinydata))]
#write table to txt file
write.table(tinydata, "tinydata.txt",row.name=FALSE)
|
846f5557180429223559c0014c873ca96a1affd2
|
[
"Markdown",
"R"
] | 4
|
Markdown
|
diogogomeswww/Getting-and-Cleaning-Data-Course-Project
|
7a0a1593bc84c9ec7f14b633049b7f753442c9d1
|
3265f6ce5582e5ac70bf7170418e11b724aa4862
|
refs/heads/master
|
<repo_name>robertgz/Testbox<file_sep>/TestBox/src/TemperatureSensor.h
#ifndef TEMPERATURESENSOR_H
#define TEMPERATURESENSOR_H
#include "Arduino.h"
class TemperatureSensor {
public:
TemperatureSensor(int pin_SCK, int pin_CS, int pin_SO);
void readSensor(void);
int getTemperatureF(void);
void getFormatedOutput(char* charData);
void getFormatedSerialOutput(char* charData);
private:
int _temperature_F;
MAX6675 thermocouple;
};
#endif<file_sep>/README.md
Arduino project in C++ that reads from three sensors and outputs to an lcd.
## Dependencies
* MAX6675 library by Adafruit Version 1.0.0
* Install from Arduino's Library Manager
* New LiquidCrystal Library 1.5.0
* Download ZIP from https://bitbucket.org/fmalpartida/new-liquidcrystal/wiki/Home
* Place NewLiquidCrystal_lib folder from within ZIP into user folder \Documents\Arduino\libraries
## Components
* Arduino UNO
* SainSmart
* LCD 4 X 20
* LCD Module 2004A
* With i2c and SPI input/output expander
* [I2C search](http://www.google.com/search?q=I2C)
* [New LiquidCrystal](https://bitbucket.org/fmalpartida/new-liquidcrystal/wiki/Home) with I2C support by
<NAME>
* Others
* Sunfounder Wiki with LiquidCrystal_I2C library [I2C LCD2004](http://wiki.sunfounder.cc/index.php?title=I2C_LCD2004)
* [Adafruit_LiquidCrystal library](https://github.com/adafruit/Adafruit_LiquidCrystal)
* Pressure Sensor
* 300 PSI
* Kavlico P4000
* [Product page](http://www.kavlico.com/kavlico-pressure/pressure-products/piezo-resistive-pressure-sensors/p4000) with link to datasheet
* [Referenced forum discussion](https://forum.arduino.cc/index.php?topic=376384.0)
* Flow Sensor
* AICHI
* Model OF05ZAT
* Label on bag: STK0151012824
* [Product page and Data Sheet](https://www.aichitokei.net/products/microflow-sensor-of-z/)
* Thermocouple,
* MAX 6675 (V1.2)
* GND, VCC, SCK, CS, SO
* Datasheets
* https://datasheets.maximintegrated.com/en/ds/MAX6675.pdf
* https://www.sparkfun.com/datasheets/IC/MAX6675.pdf
* [MAX6675-library](https://github.com/adafruit/MAX6675-library)
* Note: MAX 6675 has been discontinued/retired. Adafruit has the [MAX31855](https://www.adafruit.com/product/269) as the upgrade.
<file_sep>/TestBox/src/FlowSensor.h
#ifndef FLOWSENSOR_H
#define FLOWSENSOR_H
#include "Arduino.h"
class FlowSensor {
public:
FlowSensor(int pin);
void readSensor();
float getFlowRate();
void getFormatedOutput(char* charData);
void getFormatedSerialOutput(char* charData);
private:
int _pin;
unsigned long _flowLastMeasuredTime; // time of last measured flow
float _flow;
static volatile unsigned long _pulseCount;
static volatile unsigned long _totalPulsesDetected;
static void _pulseCounter();
};
#endif<file_sep>/TestBox/src/PressureSensor.h
#ifndef PRESSURESENSOR_H
#define PRESSURESENSOR_H
#include "Arduino.h"
class PressureSensor {
public:
PressureSensor(int pin);
void readSensor(void);
int getPressure(void);
void getFormatedOutput(char* charData);
void getFormatedSerialOutput(char* charData);
private:
int _pin;
int _pressure_sensor_reading;
int _pressure_sensor_in_MV;
int _pressure_in_PSI;
};
#endif<file_sep>/TestBox/src/FlowSensor.cpp
#include "Arduino.h"
#include "FlowSensor.h"
FlowSensor::FlowSensor(int pin) {
pinMode(pin, INPUT);
_pin = pin;
_flowLastMeasuredTime = millis();
_flow = 0.0f;
attachInterrupt(digitalPinToInterrupt(_pin), FlowSensor::_pulseCounter, FALLING); // used
// attachInterrupt(digitalPinToInterrupt(_pin), _pulseCounter, RISING); // not used
}
volatile unsigned long FlowSensor::_pulseCount = 0;
volatile unsigned long FlowSensor::_totalPulsesDetected = 0;
void FlowSensor::_pulseCounter() {
FlowSensor::_pulseCount++;
FlowSensor::_totalPulsesDetected++;
}
void FlowSensor::readSensor() {
unsigned long flowMeasurementElapsedTime;
unsigned long pulses;
noInterrupts();
pulses = FlowSensor::_pulseCount;
FlowSensor::_pulseCount = 0;
flowMeasurementElapsedTime = millis() - _flowLastMeasuredTime;
_flowLastMeasuredTime = millis();
interrupts();
// Pulse constant: 0.46mL/Pulse = 0.00046L/Pulse
// compute flow rate in gallons/minute
float Liters = 0.00046 * pulses;
float LitersPerMs = Liters / flowMeasurementElapsedTime;
float LitersPerMinute = Liters / (flowMeasurementElapsedTime / 60000.0);
float GalllonsPerMinute = (Liters / 3.785412) / (flowMeasurementElapsedTime / 60000.0);
_flow = GalllonsPerMinute;
}
float FlowSensor::getFlowRate(){
FlowSensor::readSensor();
return _flow;
}
void FlowSensor::getFormatedOutput(char* charData) {
char flowStr[15];
dtostrf(_flow, 7, 2, flowStr);
sprintf(charData, "Flow: %-6d gal/min", flowStr);
}
void FlowSensor::getFormatedSerialOutput(char* charData) {
FlowSensor::getFormatedOutput(charData);
sprintf(charData, "%s, Total Pulses Detected: %d", charData, FlowSensor::_totalPulsesDetected);
}<file_sep>/TestBox/src/PressureSensor.cpp
#include "Arduino.h"
#include "PressureSensor.h"
PressureSensor::PressureSensor(int pin) {
_pin = pin;
_pressure_sensor_reading = 0;
_pressure_sensor_in_MV = 0;
_pressure_in_PSI = 0;
}
void PressureSensor::readSensor(void) {
// 300 PSI sensor outputs range from .5 Volts to 4.5 Volts
// .5 V = 0 PSI
// 4.5 V = 300 PSI
_pressure_sensor_reading = analogRead(_pin); // show about 100 at baseline
// Keep the sensor reading as an int and retain precision
_pressure_sensor_in_MV = map(_pressure_sensor_reading, 0, 1023, 0, 5000);
// 500 mV = 0 PSI
// 4500 mV = 300 PSI
_pressure_in_PSI = map(_pressure_sensor_in_MV, 500, 4500, 0, 300);
}
int PressureSensor::getPressure(void) {
PressureSensor::readSensor();
return _pressure_in_PSI;
}
void PressureSensor::getFormatedOutput(char* charData) {
sprintf(charData, "Pres: %d PSI ", _pressure_in_PSI);
}
void PressureSensor::getFormatedSerialOutput(char* charData) {
PressureSensor::getFormatedOutput(charData);
sprintf(charData, "%s, Raw sensor reading: %d, in MV: %d", charData, _pressure_sensor_reading, _pressure_sensor_in_MV);
}<file_sep>/TestBox/src/TemperatureSensor.cpp
#include "Arduino.h"
#include <max6675.h>
#include "TemperatureSensor.h"
TemperatureSensor::TemperatureSensor(int pin_SCK, int pin_CS, int pin_SO)
: thermocouple(pin_SCK, pin_CS, pin_SO) {
_temperature_F = 0;
}
void TemperatureSensor::readSensor(void) {
_temperature_F = thermocouple.readFahrenheit();
}
int TemperatureSensor::getTemperatureF(void) {
TemperatureSensor::readSensor();
return _temperature_F;
}
void TemperatureSensor::getFormatedOutput(char* charData) {
sprintf(charData, "Temp: %d F ", _temperature_F);
}
void TemperatureSensor::getFormatedSerialOutput(char* charData) {
TemperatureSensor::getFormatedOutput(charData);
}<file_sep>/TestBox/TestBox.ino
// include the library code
#include <Wire.h>
#include <LiquidCrystal_I2C.h>
#include <max6675.h>
#include "src\FlowSensor.h"
#include "src\PressureSensor.h"
#include "src\TemperatureSensor.h"
LiquidCrystal_I2C lcd(0x27, 2, 1, 0, 4, 5, 6, 7, 3, POSITIVE);
unsigned long previousUpdateTime;
const unsigned long updateRate = 1000; // in milliseconds
const int flowSensorPin = 2;
const int pressureSensorPin = A0;
const int tempSensorPinSCK = 9;
const int tempSensorPinCS = 13;
const int tempSensorPinSO = 10;
FlowSensor flowSensor(flowSensorPin);
PressureSensor pressureSensor(pressureSensorPin);
TemperatureSensor temperatureSensor(tempSensorPinSCK, tempSensorPinCS, tempSensorPinSO);
void setup() {
Serial.begin(9600);
Serial.println("Starting program!");
lcd.begin(20,4); // initialize the lcd
previousUpdateTime = millis();
}
void loop() {
unsigned long elapsedTime = millis() - previousUpdateTime;
elapsedTime = abs(elapsedTime);
// read sensors and display output every updateRate milliseconds
if (elapsedTime >= updateRate) {
flowSensor.readSensor();
pressureSensor.readSensor();
temperatureSensor.readSensor();
updateLCD();
outputToSerial(); // For debugging
previousUpdateTime = millis();
}
}
void updateLCD() {
lcd.setCursor( 0, 0 ); // go to the first row
lcd.print("Test Box Sensor Data");
char flowOutput[41];
flowSensor.getFormatedOutput(flowOutput);
lcd.setCursor( 0, 1 ); // go to the second row
lcd.print(flowOutput);
char pressureOutput[41];
pressureSensor.getFormatedOutput(pressureOutput);
lcd.setCursor( 0, 2 ); // go to the third row
lcd.print(pressureOutput);
char temperatureOutput[41];
temperatureSensor.getFormatedOutput(temperatureOutput);
lcd.setCursor( 0, 3 ); // go to the fourth row
lcd.print(temperatureOutput);
}
void outputToSerial(){
Serial.println("== Display updated ==");
char flowOutput[41];
flowSensor.getFormatedSerialOutput(flowOutput);
Serial.println(flowOutput);
char pressureOutput[41];
pressureSensor.getFormatedSerialOutput(pressureOutput);
Serial.println(pressureOutput);
char temperatureOutput[41];
temperatureSensor.getFormatedSerialOutput(temperatureOutput);
Serial.println(temperatureOutput);
temperatureSensor.readSensor(); // not sure why this is needed
}
|
f05ce4d0a7d124f317a8eb5fd5fe07f715310c13
|
[
"Markdown",
"C++"
] | 8
|
C++
|
robertgz/Testbox
|
e673b4b17155348588a56bc668de26b44fc1fa2f
|
cb623f52740fa9cc772958598b753322fa4caa28
|
refs/heads/master
|
<file_sep># feature 'User can create articles' do
# before do
# visit root_path
# click_on "New Article"
# end
# context "Succesfully create an article [HappyPath]" do
# before do
# fill_in "Title", with: 'Happy holidays'
# fill_in "Content", with: 'It is going to be fun!'
# click_on "Create Article"
# end
# it 'User should be on article show page' do
# article = Article.find_by(title: 'Happy holidays')
# expect(current_path).to eq article_path(article)
# end
# it 'User should see success message' do
# expect(page).to have_content 'Article successfully created.'
# end
# it 'User should see article title' do
# expect(page).to have_content 'Happy holidays'
# end
# it 'User should see article content' do
# expect(page).to have_content 'It is going to be fun!'
# end
# end
# context "User don't enter a title for the article [Sad Path]" do
# before do
# fill_in "Content", with: 'It is going to be fun!'
# click_on "Create Article"
# end
# it 'User should see error message' do
# expect(page). to have_content 'Title cannot be blank'
# end
# # more sad paths
# end
# end<file_sep>feature 'User can see specific article' do
before do
create(:article, title: 'This is going to be a long week', content: 'Because of Rails')
create(:article, title: 'Rails is awesome but confusing', content: 'because of too many files and folders')
visit root_path
click_on 'This is going to be a long week'
end
context 'Article displays' do
it 'title' do
expect(page).to have_content 'This is going to be a long week'
end
it 'content' do
expect(page).to have_content 'Because of Rails'
end
end
end<file_sep>require 'rails_helper'
feature 'List articles on index page' do
context 'with articles in db' do
before do
create(:article, title: 'This is going to be a long week')
create(:article, title: 'Rails is awesome but confusing')
visit root_path
end
it 'displays first article title' do
expect(page).to have_content 'This is going to be a long week'
end
it 'displays second article title' do
expect(page).to have_content 'Rails is awesome but confusing'
end
end
end<file_sep>feature 'visitor can write articles' do
before do
visit root_path
click_on "Write Article"
fill_in 'Title', with: "It is almost Friday"
fill_in 'Content', with: "Yes!!!! that makes me happy"
click_on 'Create Article'
end
describe 'Visitor can write an article' do
it 'visitor should see success message' do
expect(page).to have_content 'Article succesfully created'
end
end
end
|
94c94717590866722c7015348ca632bb9533e98f
|
[
"Ruby"
] | 4
|
Ruby
|
emiliano-ma/rails_demo
|
cac912e30eefd133a1067ef22527ebf0e5f4a587
|
a85cad5c365f688e2fcd294c0632ff0fa92f72d2
|
refs/heads/master
|
<repo_name>qstorey/dotfiles<file_sep>/README.md
# dotfiles
### A little place called home for my dotfiles.
This is a personal collection of dotfiles that is inspired by <NAME>'s
[dotfiles](https://github.com/holman/dotfiles) and thoughtbot's [dotfiles](https://github.com/thoughtbot/dotfiles).
Dotfiles should be available for people to view, get ideas from and share.
<NAME> wrote a nice
[post](https://zachholman.com/2010/08/dotfiles-are-meant-to-be-forked/) on this
topic. At the same time, dotfiles can contain personalised or private settings
that make forking a dotfiles repo a little trickier.
For that reason my dotfiles are broken up into two repos:
- dotfiles
> this repo
- dotfiles-private
> pesonalised or private includes like your git emaill address
or some server's IP address in your ssh config
### Requirements
- tmux (version >= 1.7)
- [oh-my-zsh](http://ohmyz.sh)
- [powerline10k](https://github.com/romkatv/powerlevel10k)
- Clone the Oh-My-ZSH theme `git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/themes/powerlevel10k`
- Run `p10k configure` to install the recommended fonts`
- [grip](https://github.com/joeyespo/grip) for GitHub flavoured Markdown Preview in VIM
### Download
```sh
git clone git@github.com:qstorey/dotfiles.git ~/.dotfiles
cd ~/.dotfiles
git submodule update --init --recursive
```
### Install
Symlink the components you want into your home directory.
git
```sh
ln -s ~/.dotfiles/git/gitconfig ~/.gitconfig
ln -s ~/.dotfiles/git/gitignore_global ~/.gitignore_global
```
neovim
```sh
ln -s ~/.dotfiles/nvim ~/.config/nvim
```
tmux
```sh
ln -s ~/.dotfiles/tmux/tmux.conf ~/.tmux.conf
```
zsh
```sh
ln -s ~/.dotfiles/zsh/zshrc ~/.zshrc
ln -s ~/.dotfiles/p10k/p10k.zsh ~/.p10k.zsh
```
<file_sep>/zsh/launch-tmux.sh
#!/bin/sh
# This script is a modified version of @Remz-Jay's tmux-launch script
# The original can be found here https://github.com/Remz-Jay/vim-config/blob/master/tmux-launch.sh
# abort if we're already inside a TMUX session
[ "$TMUX" == "" ] || exit 0
# present a menu for the user to choose which workspace to open
PS3="Please choose your session: "
options=("NEW SESSION" "Regular ZSH Shell" $(tmux list-sessions -F "#S"))
echo "Available sessions"
echo "------------------"
echo " "
select opt in "${options[@]}"
do
case $opt in
"NEW SESSION")
read -p "Enter new session name: " SESSION_NAME
tmux new -s "$SESSION_NAME"
break
;;
"Regular ZSH Shell")
break
;;
*)
tmux attach-session -t $opt
break
;;
esac
done
|
05c7da3995286e65089fec4d761bb966f533497d
|
[
"Markdown",
"Shell"
] | 2
|
Markdown
|
qstorey/dotfiles
|
44f825a6ad593e77e877f0e9b0b629a9ef53f348
|
a65a9b93fa2b0eb6491d79517ce2a193657b5705
|
refs/heads/master
|
<file_sep>#!/usr/bin/python
# Lab 13
# Physics 91SI
# Spring 2016
import unittest
import calc
class CalcTest(unittest.TestCase):
# TODO implement tests here to verify that your functions work!
def testAddition(self):
self.assertEqual(calc.calc('10+1'), 11)
def testSubtraction(self):
self.assertEqual(calc.calc('1-1'), 0)
def testMultiplciation(self):
self.assertEqual(calc.calc('1*2'), 2)
def testDivision(self):
self.assertEqual(calc.calc('10/5'), 2)
if __name__ == '__main__':
unittest.main()
<file_sep>#!/usr/bin/python
# Lab 13
# Physics 91SI
# Spring 2016
import sys
def main():
"""Join command-line arguments and pass them to unitcalc(), then print."""
calculation = ''.join(sys.argv[1:])
print calc(calculation)
def calc(s):
"""Parse a string describing an operation on quantities with units."""
# TODO make this robust for differently formatted inputs
operators = findOperators(s)
numbers = findNumbers(s)
num1 = numbers[0]
num2 = numbers[1]
operation = operators[0]
if operation=='+':
return int(num1)+int(num2)
elif operation=='-':
return int(num1)-int(num2)
elif operation=='*':
return int(num1)*int(num2)
elif operation=='/':
return int(num1)/int(num2)
def findNumbers(s):
numbers = []
for j in findOperators(s):
s.split(j)
for k in s:
if (type(k)==float):
numbers.append(k)
return numbers
def findOperators(s):
operators = []
for i in s:
if (type(i)!=float):
operators.append(i)
return operators
if __name__ == "__main__": main()
|
37a3e6e2595d4066bada613aa19507e1cf47019d
|
[
"Python"
] | 2
|
Python
|
physics91si/s1dav1s-lab13
|
87ad650816cb1eaa44ea8c0d6f3b2957e42d6c75
|
403236f0bc043419a3bde4cbe8a2b72d946a11f5
|
refs/heads/master
|
<repo_name>fox-squirrels-2013/Thing-O-danielearthy<file_sep>/README.md
Thing-O-danielearthy
====================
Day 1 in the Big Brother house : Release 1
============================================
Deploying the Sinatra app with Heroku to display a meaningful and explanatory index.html
<file_sep>/myapp.rb
require 'rubygems'
require 'sinatra'
require 'active_record'
require_relative './app/models/airplane'
ActiveRecord::Base.establish_connection(adapter: 'postgresql', database:'fleetfarm')
get '/' do
erb :index
end
post '/' do
plane = Airplane.new
plane.name = params[:name]
plane.registration = params[:registration]
plane.year = params[:year]
plane.save
redirect '/thankyou'
end
get '/thankyou' do
erb :thankyou
end
|
3c0eda4ecc4cd051f0b6d611cb18c68130d38e41
|
[
"Markdown",
"Ruby"
] | 2
|
Markdown
|
fox-squirrels-2013/Thing-O-danielearthy
|
9fb2b7984a6d7c02f73b32e4deff7fe9c9a2895e
|
3eee4de3ead7d1d0d88364aa6b8af01dd5f4a3a5
|
refs/heads/master
|
<file_sep>const hands = [...document.querySelectorAll('img')];
const span = [...document.querySelectorAll('span')];
const playButton = document.querySelector('button');
let wins = 0;
let draws = 0;
let losses = 0;
let playerHand = "";
let aiHand = "";
let gResult = "";
let gPublish = "";
const choosenHand = (e) => {
playerHand = (e.currentTarget).className;
}
const aiDraws = function () {
return hands[Math.floor(Math.random() * 3)].className;
}
const result = function (choosenHand, aiDraws) {
if (choosenHand === aiDraws) {
return "draw";
} else if ((choosenHand === "paper" && aiDraws === "rock") || (choosenHand === "rock" && aiDraws === "scissors") || (choosenHand === "scissors" && aiDraws === "paper")) {
return "win";
} else {
return "loss";
}
}
const publish = () => {
document.querySelector('.results_player-choice').textContent = "Your choice:" + " " + playerHand;
document.querySelector('.results_ai-choice').textContent = "AI choice:" + " " + aiHand;
if (gResult === "win") {
document.querySelector('.results_result').textContent = "The result is: player won!";
wins++;
document.querySelector('.stats_wins').textContent = "Wins: " + wins;
} else if (gResult === "loss") {
document.querySelector('.results_result').textContent = "The result is: computer won!";
losses++;
document.querySelector('.stats_losses').textContent = "Losses: " + losses;
} else {
document.querySelector('.results_result').textContent = "The result is: draw!";
draws++;
document.querySelector('.stats_draws').textContent = "Draws: " + draws;
}
}
const start = () => {
if (playerHand === "") {
alert("First, choose a hand!");
}
aiHand = aiDraws();
gResult = result(playerHand, aiHand);
gPublish = publish();
}
hands.forEach(hand => hand.addEventListener('click', choosenHand));
playButton.addEventListener('click', start);
|
8979b860106569f94eac48b0d561b80a4501d34e
|
[
"JavaScript"
] | 1
|
JavaScript
|
uwuwuewue/RockPaperScissorsApp
|
24b5bb03b89f46b9a6f0479f3f61b90ffcbe3ebf
|
23aeee8f08426c61f3f4966c9fcf99aa5f274277
|
refs/heads/master
|
<file_sep>/** Convenience identifiers for HTTP Codes. */
enum HttpResponseCode {
ok = 200,
serverError = 500,
}
export default HttpResponseCode
<file_sep>/** Network responses are untyped. Disable some linting rules to accomodate. */
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
import Candle from './candle'
import CandleProvider from './candle-provider'
import * as WebRequest from 'web-request'
import HttpResponseCode from './http-response-code'
import { Utils } from '@tacoinfra/harbinger-lib'
/** User agent for requests to the API. */
const USER_AGENT = 'harbinger-signer'
/** Granularity parameter for the Binance API. */
const GRANULARITY = '1m'
/** Binance REST API base URL */
const BINANCE_API_BASE_URL = 'https://api.binance.com'
/** Scale to report prices in. */
const SCALE = 6
/** Provides candles from the Binance API. */
export default class BinanceCandleProvider implements CandleProvider {
/**
* Get a description of the CandleProvider's backing service.
*
* @returns A string describing where the candles are pulled from.
*/
public getProviderName(): string {
return BINANCE_API_BASE_URL
}
/**
* Retrieves a candle from the Binance API.
*
* @param assetName The assetName to retrieve. For instance, "XTZ-USD".
*/
public async getCandle(assetName: string): Promise<Candle> {
// Binance ommits dashes in their API.
const normalizedAssetName = assetName.replace('-', '')
// Query the Binance API.
const requestPath = BinanceCandleProvider.makeRequestPath(
normalizedAssetName,
)
const apiURL = BINANCE_API_BASE_URL + requestPath
const response = await WebRequest.get(apiURL, {
headers: {
'User-Agent': USER_AGENT,
accept: 'json',
},
})
// Throw an error if API returned something other than a 200.
if (response.statusCode !== HttpResponseCode.ok) {
throw new Error(response.content)
}
// Binance returns an array of arrays. The outer array contains many candles and
// the inner array is the data for each candle.
const candles: Array<Array<number>> = JSON.parse(response.content)
// Grab and destructure the first candle, which is the most recent.
const [
startTimestamp,
open,
high,
low,
close,
volume,
endTimestamp,
] = candles[candles.length - 1]
// Return the data formatted as an {@link Candle}.
return {
assetName,
// Binance uses milliseconds instead of microseconds.
startTimestamp: Math.round(startTimestamp / 1000),
endTimestamp: Math.round(endTimestamp / 1000),
low: Utils.scale(low, SCALE),
high: Utils.scale(high, SCALE),
open: Utils.scale(open, SCALE),
close: Utils.scale(close, SCALE),
volume: Utils.scale(volume, SCALE),
}
}
/**
* Make an request path for the given asset in the Binance API.
*
* @param assetName The assetName to retrieve. For instance, "BATUSDC".
* @return The request path to hit.
*/
private static makeRequestPath(assetName: string): string {
return `/api/v3/klines?symbol=${assetName}&interval=${GRANULARITY}`
}
}
<file_sep>import CandleProvider from './candle-provider'
import Signer from './signer'
import { TezosParameterFormat } from 'conseiljs/dist/types/tezos/TezosChainTypes'
import { TezosMessageUtils } from '../node_modules/conseiljs/dist/chain/tezos/TezosMessageUtil'
import { Utils } from '@tacoinfra/harbinger-lib'
import Candle from './candle'
/**
* Schemas for Michelson messages.
*/
enum MichelsonSchema {
candle = 'pair string (pair timestamp (pair timestamp (pair nat (pair nat (pair nat (pair nat nat))))))',
revoke = 'option key',
}
/** Provides functionality for the oracle by composing a candle provider and signer. */
export default class OracleService {
/**
* Create a new Oracle Service.
*
* @param assetNames An array of asset names that this oracle service will serve.
* @param candleProvider A provider for Candles.
* @param signer A signer that will sign data.
*/
public constructor(
public readonly assetNames: Array<string>,
public readonly candleProvider: CandleProvider,
public readonly signer: Signer,
) {}
/**
* Handler for the oracle endpoint.
*/
public async oracle(): Promise<Record<string, unknown>> {
const candles: Array<Candle> = []
for (let i = 0; i < this.assetNames.length; i++) {
const assetName = this.assetNames[i]
try {
const candle = await this.candleProvider.getCandle(assetName)
candles.push(candle)
} catch (error) {
/* eslint-disable @typescript-eslint/no-unsafe-member-access */
/* eslint-disable @typescript-eslint/restrict-template-expressions */
console.log(
`Unable to produce a candle for ${assetName}: ${error.message}`,
)
/* eslint-enable @typescript-eslint/no-unsafe-member-access */
/* eslint-enable @typescript-eslint/restrict-template-expressions */
}
}
const michelsonCandles = candles.map((candle) => {
return `Pair "${candle.assetName}" (Pair ${candle.startTimestamp} (Pair ${candle.endTimestamp} (Pair ${candle.open} (Pair ${candle.high} (Pair ${candle.low} (Pair ${candle.close} ${candle.volume}))))))`
})
const packedCandles = michelsonCandles.map((michelsonCandle) => {
console.log('Packing ' + michelsonCandle)
const bytes = OracleService.pack(michelsonCandle, MichelsonSchema.candle)
return Utils.bytesToHex(bytes)
})
const byteCandles = michelsonCandles.map((michelsonCandles) => {
return OracleService.pack(michelsonCandles, MichelsonSchema.candle)
})
const signatures = await Promise.all(
byteCandles.map(async (byteCandle) => {
return await this.signer.sign(byteCandle)
}),
)
// The timestamp field is the most up to date timestamp.
const timestamp = candles.reduce(
(previous, current) => {
const currentTimestamp = current.startTimestamp
return previous > currentTimestamp ? previous : currentTimestamp
},
0
)
// Create an object that contains human readable prices.
// First map each candle to an object that constains a mapping of asset name to price.
const prices = candles.map((candle: Candle) => {
const assetName = candle.assetName
// TODO(keefertaylor): Refactor this to put Scale = 6 into harbinger-lib's constants and use
// harbinger-lib's Utils object to scale.
const priceNormalized = (candle.open + candle.close) / 2
const price = priceNormalized * Math.pow(10, -6)
// Typescript doesn't love objects.
const object: any = {}
object[`${assetName}`] = `${price}`
return object
// Then compress mappings to a single object.
}).reduce(
(previous, current) => {
return Object.assign(previous, current)
},
{}
)
return {
timestamp,
messages: packedCandles,
signatures: signatures,
prices
}
}
/**
* Handler for revoke endpoint.
*/
public async revoke(): Promise<string> {
const michelson = 'None'
const bytes = OracleService.pack(michelson, MichelsonSchema.revoke)
return await this.signer.sign(bytes)
}
/**
* Handler for info endpoint.
*/
public async info(): Promise<Record<string, unknown>> {
const candleProviderName = this.candleProvider.getProviderName()
const publicKey = await this.signer.getPublicKey()
return {
dataFeed: candleProviderName,
assetNames: this.assetNames,
publicKey: publicKey,
}
}
/**
* Pack the given Michelson to hex encoded bytes.
*
* @param michelson A Michelson string to pack.
* @param types A Michelson schema for the provided Michelson.
* @returns The inputs as packed bytes.
*/
private static pack(michelson: string, types: string): Uint8Array {
const packedHex = TezosMessageUtils.writePackedData(
michelson,
types,
TezosParameterFormat.Michelson,
)
return Utils.hexToBytes(packedHex)
}
}
<file_sep>import Signer from './signer'
import {
Prefixes,
Utils,
} from '@tacoinfra/harbinger-lib'
import { KmsKeyStore, KmsSigner } from '@tacoinfra/conseil-kms'
/** Provides signing capabilities from AWS KMS. */
export default class AwsSigner implements Signer {
/**
* Initialize a new AwsSigner.
*
* @param kmsKeyId The identifier of the KMS Key ID.
* @param region The region the KMS key is in.
*/
public static async from(
kmsKeyId: string,
region: string,
): Promise<AwsSigner> {
const keystore = await KmsKeyStore.from(kmsKeyId, region)
const signer = new KmsSigner(kmsKeyId, region)
return new AwsSigner(signer, keystore)
}
/** Private constructor. Please use the static `from` method. */
private constructor(
private readonly wrappedSigner: KmsSigner,
private readonly wrappedKeystore: KmsKeyStore,
) {}
/**
* Sign the given operation and produce a signature.
*
* @param bytes The bytes to sign.
* @returns A base58check encoded signature.
*/
public async sign(bytes: Uint8Array): Promise<string> {
const signedBytes = await this.wrappedSigner.signOperation(
Buffer.from(bytes),
)
return Utils.base58CheckEncode(signedBytes, Prefixes.secp256k1signature)
}
/**
* Returns the base58check encoded public key.
*/
public getPublicKey(): Promise<string> {
return new Promise((resolve, _reject) => {
resolve(this.wrappedKeystore.publicKey)
})
}
}
<file_sep>/**
* Candle data returned from an asset data feed.
*
* Note that by convention this data is expressed in natural numbers with six digits of
* precision. For instance, $123.42 USD would be expressed as 123_420_000.
*/
export default interface Candle {
/** Asset pair the candle identifies. Example: "XTZ-USD" */
assetName: string
/** Unix timestamp of the candle's start in seconds. */
startTimestamp: number
/** Unix timestamp of the candle's end in seconds. */
endTimestamp: number
/**
* Candle components expressed as natural numbers with 6 digits of precision.
* For instance, $123.42 USD would be expressed as 123_420_000.
*/
low: number
high: number
open: number
close: number
volume: number
}
<file_sep>/** Network responses are untyped. Disable some linting rules to accomodate. */
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
import Candle from './candle'
import CandleProvider from './candle-provider'
import * as WebRequest from 'web-request'
import HttpResponseCode from './http-response-code'
import crypto = require('crypto')
import { Utils } from '@tacoinfra/harbinger-lib'
/** User agent for requests to the API. */
const USER_AGENT = 'harbinger-signer'
/** Granularity parameter for Coinbase API. */
const GRANULARITY_SECONDS = 60
/** Coinbase REST API base URL */
const COINBASE_PRO_API_BASE_URL = 'https://api.pro.coinbase.com'
/** Scale to report prices in. */
const SCALE = 6
/** The number of times to retry an attempt to the Coinbase API */
const COINBASE_API_RETRY_COUNT = 10
/** Provides candles from the Coinbase Pro API. */
export default class CoinbaseCandleProvider implements CandleProvider {
/**
* Construct a new CoinbaseCandleProvider.
*
* @param coinbaseApiKeyId The ID for a Coinbase Pro API Key.
* @param coinbaseApiKeySecret The secret for a Coinbase Pro API Key.
* @param coinbaseApiKeyPassphrase The passphrase for a Coinbase Pro API Key.
*/
public constructor(
public readonly coinbaseApiKeyId: string,
public readonly coinbaseApiKeySecret: string,
public readonly coinbaseApiKeyPassphrase: string,
) {}
/**
* Get a description of the CandleProvider's backing service.
*
* @returns A string describing where the candles are pulled from.
*/
public getProviderName(): string {
return COINBASE_PRO_API_BASE_URL
}
/**
* Retrieves a candle from the Coinbase Pro API.
*
* @param assetName The assetName to retrieve. For instance, "XTZ-USD".
*/
public async getCandle(assetName: string): Promise<Candle> {
for (let i = 0; i < COINBASE_API_RETRY_COUNT; i++) {
try {
return await this.queryCoinbaseForCandle(assetName)
} catch (err) {
await Utils.sleep(1)
}
}
throw new Error('Could not get candle')
}
public async queryCoinbaseForCandle(assetName: string): Promise<Candle> {
// Query the Coinbase Pro API.
const requestPath = CoinbaseCandleProvider.makeRequestPath(assetName)
const apiURL = COINBASE_PRO_API_BASE_URL + requestPath
const timestamp = Date.now() / 1000
const method = 'GET'
const what = `${timestamp}${method}${requestPath}`
const secretKey = Buffer.from(this.coinbaseApiKeySecret, 'base64')
const hmac = crypto.createHmac('sha256', secretKey)
const signature = hmac.update(what).digest('base64')
const response = await WebRequest.get(apiURL, {
headers: {
'User-Agent': USER_AGENT,
'CB-ACCESS-KEY': this.coinbaseApiKeyId,
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-PASSPHRASE': this.coinbaseApiKeyPassphrase,
accept: 'json',
},
})
// Throw an error if API returned something other than a 200.
if (response.statusCode !== HttpResponseCode.ok) {
throw new Error(response.content)
}
// Coinbase returns an array of arrays. The outer array contains many candles and
// the inner array is the data for each candle.
const candles: Array<Array<number>> = JSON.parse(response.content)
// Grab and destructure the first candle, which is the most recent.
const [startTimestamp, low, high, open, close, volume] = candles[0]
// Return the data formatted as an {@link Candle}.
return {
assetName,
startTimestamp,
endTimestamp: startTimestamp + GRANULARITY_SECONDS,
low: Utils.scale(low, SCALE),
high: Utils.scale(high, SCALE),
open: Utils.scale(open, SCALE),
close: Utils.scale(close, SCALE),
volume: Utils.scale(volume, SCALE),
}
}
/**
* Make an request path for the given asset in the Coinbase Pro API.
*
* @param assetName The assetName to retrieve. For instance, "XTZ-USD".
* @return The request path to hit.
*/
private static makeRequestPath(assetName: string): string {
return `/products/${assetName}/candles?granularity=${GRANULARITY_SECONDS}`
}
}
<file_sep># Harbinger Serverless Price Feed Signer
## About
`harbinger-signer` is a signer for the Harbinger oracle system. `harbinger-signer` is a [Serverless Framework](https://serverless.com/) application written in Typescript and deployed to [Amazon Web Services](https://aws.amazon.com). To get started with Harbinger, visit the [main documentation](https://github.com/tacoinfra/harbinger).
Cryptographic signatures are generated by [Amazon Key Management Service](https://aws.amazon.com/kms/), a highly service that uses hardware security modules (HSMs). The app is extensible to produce feeds from other APIs and to use other signing solutions besides Amazon KMS. See 'Customizing Functionality' below.
This library provides functionality for signing a Harbinger price feed. Users interested in posting prices might also be interested in [Harbinger CLI](https://github.com/tacoinfra/harbinger-cli) and [Harbinger Poster](https://github.com/tacoinfra/harbinger-poster) which provide both command line and Serverless methods of posting signed price data on-chain. Developers of new Harbinger components may be interested in [harbinger-lib](https://github.com/tacoinfra/harbinger-lib).
### Introduction
The Harbinger Tezos price oracle consists of two main components:
- A <em>**signer**</em> that retrieves price candles from an exchange and cryptographically signs them.
- A <em>**poster**</em> that retrieves the signed price feed from a signer and sends the signed data to a smart contract on the Tezos network.
This project is a reference price feed signer that demonstrates how to retrieve prices from multiple exchanges including Coinbase Pro, Binance, Gemini, and OKEx, and sign them with a private key that is securely stored inside [Amazon Key Management Service](https://aws.amazon.com/kms/). It is a [Serverless Framework](https://serverless.com) application. Once deployed, there is no need to spend any time thinking about or managing servers.
## Setup Instructions
In order to setup the Serverless application, you'll need to perform the following setup tasks first:
1. [Install the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) on your system.
2. [Create an AWS access key](https://aws.amazon.com/premiumsupport/knowledge-center/create-access-key/) and configure the AWS CLI by running the `aws configure` command.
3. Login to the AWS console with an account that has the ability to create KMS keys and SSM parameters, and grant permissions to use them. An admin role will work best.
4. Be sure to select the correct region that you would like to deploy to. The [serverless.yml](serverless.yml) file in this repository is set to use the `eu-west-1` (Ireland) region, but you can easily edit this file and select a different region if you like. The important thing is to ensure that the region you select in the console is the same region that is specified in the Serverless configuration file.
5. In the AWS console, select the KMS service:

6. Click the "Create key" button:

7. Select <em>**Asymmetric**</em> under Key type, then select <em>**Sign and verify**</em> under Key usage, then select <em>**ECC_SECG_P256K1**</em> under Key spec, and click the <em>**Next**</em> button:

8. On the next page, input an <em>**Alias**</em> for the key and an optional <em>**Description**</em>, then click the <em>**Next**</em> button:

9. The next page is where you can define key administrators. There is no need to change any settings on this page, unless you would like to give additional IAM users or roles administrative permissions to the key. Click the <em>**Next**</em> button to continue:

10. This page is where you can define additional IAM users or roles that have key usage permissions. There is no need to change any settings on this page, unless you would like to give additional IAM users or roles usage permissions for the key. Click the <em>**Next**</em> button to continue:

11. Click <em>**Next**</em> to accept the default key policy, which only grants access to the root user. We'll edit this policy later to give the Serverless application rights to sign with the key.

12. Finally, click <em>**Finish**</em> and you should see a <em>**Success**</em> message similar to the following:

13. Copy the KMS key ID to your clipboard, or save it somewhere, then, launch <em>**Systems Manager**</em> from the <em>**Services**</em> section of the console (top left):

14. Select <em>**Parameter Store**</em> on the left navigation bar:

15. Click the <em>**Create Parameter**</em> button in the top left:

16. Name the parameter `/tezos/signer-kms-key-id` and give it an optional description. Leave the other settings at default (Standard tier, String type, Data type text), and paste or enter the KMS key ID you saved in step 13 as the value, without quotes or any surrounding characters, then click the <em>**Create parameter**</em> button:

## Coinbase Pro API Key Setup
The following steps are only required if you are planning on using the Coinbase Pro API as a price data provider. Other exchanges don't currently require creating an API key to view their price data. If you are using another exchange besides Coinbase Pro, skip to step 25.
17. Access your [Coinbase Pro API key settings](https://pro.coinbase.com/profile/api) either with the [link](https://pro.coinbase.com/profile/api) or by accessing your profile menu in the top right:

18. Click the <em>**New API Key**</em> button in the top right:

19. Give the API key a nickname, check the box for <em>**View**</em> permissions only, and either save the passphrase somewhere secure or replace the default random passphrase with a strong passphrase of your choice, then click the <em>**Create API Key**</em> button:

20. If you have 2-factor authentication enabled, you'll need to go through the 2-step verification, then click the <em>**Add an API Key**</em> button:

21. Store the API Secret in a secure place, then click the <em>**Done**</em> button:

22. Now you should see the View key that you just created, and you'll need to copy the API key itself and store it somewhere for the next steps. You can click the API key itself to copy it to your clipboard:

23. Create another parameter named `/tezos/coinbase-pro-api-key` and give it an optional description. This parameter should be of type `SecureString`, but you can leave the rest of the settings at their defaults, and input your Coinbase Pro API key (with view permissions) as the value, then click the <em>**Create parameter**</em> button:

24. Create two more parameters, one named `/tezos/coinbase-pro-api-passphrase` and the second one named `/tezos/coinbase-pro-api-secret` with the values that you saved previously in steps 19 and 21. These should both be of type `SecureString` as well.
## Deploying the Serverless Application
25. Clone this repository to your local system, install all NPM dependencies by typing `npm i` inside the repository directory, then type `sls deploy --stage {{ stage }}` (where stage is `coinbase`, `binance`, `gemini`, or `okex`) to deploy the application. If all goes well, you should see output similar to this. You'll want to save the two endpoints for use later.

26. Now, navigate back to <em>**KMS**</em> in the AWS console, and click on the <em>**Customer Managed Key**</em> you created earlier to modify the key policy. Click the button that says <em>**Switch to Policy View**</em>:

27. Now, click the button that says <em>**Edit**</em>:

28. Now we'll need to modify the key policy in order to enable the IAM role that the Serverless application will execute with to use the key for signing operations. You'll need to insert an additional JSON element or section into the <em>**Statement**</em> array. The section you'll need to insert is highlighted in the screenshot below. Don't forget to separate each statement with a comma. Here is the code you'll need to insert:
```JSON
,
{
"Sid": "Allow use of the key for digital signing",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{{ AWS Account ID }}:role/harbinger-signer-{{ Exchange }}-{{ AWS Region }}-lambdaRole"
},
"Action": [
"kms:Sign",
"kms:Verify",
"kms:GetPublicKey"
],
"Resource": "*"
}
```
**Important Note:** You must replace the 3 sections of the JSON in each statement that have `{{ }}` (double curly braces) surrounding them with the appropriate information. This string should also have no spaces in it.
* **AWS Account ID** - This is your 12-digit numeric AWS account ID. If you're not sure what it is, look at the `root` policy above the one we are adding, and you should be able to copy it from that ARN
* **Exchange** - This is the string `coinbase`, `binance`, `gemini`, or `okex` (all lower case) depending on which signer you are deploying
* **Region** - This is the AWS region you are deploying to, such as `eu-west-1`

## Determining the Tezos address of the KMS key used to generate signatures
Congratulations, you've just deployed a Serverless application that will automatically sign Coinbase Pro or Binance prices for the markets/order books you choose. Here is how to determine the Tezos public key (`sppk...`) for the private key that is used to sign the prices:
1. Curl the `info` endpoint that is displayed when you ran the last step (`sls deploy`) and it should output the `sppk...` address. You will need to include an `x-api-key` header that is set to the API key that was output by the previous `sls deploy` command. Here is the full command:
```
curl --silent -H 'x-api-key: {{ your API key }}' https://{{ your API gateway }}.execute-api.eu-west-1.amazonaws.com/binance/info
```
If you get a `{"message": "Internal server error"}` instead, you should check your Lambda logs inside the AWS console to see what went wrong. Most likely you have either not created all of the Systems Manager parameters correctly or the KMS key policy is not 100% correct. You should see output like this:

## Customizing Functionality
[`OracleService`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/oracle-service.ts) is a pluggable service that can handle all serverless requests for the signer. It is initialized with an object conforming to the [`Signer`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/signer.ts) interface, an object conforming to the [`CandleProvider`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/candle-provider.ts) interface and a list of assets to sign.
End users can customize this library with custom signers and candle providers.
### Custom Assets
An assets list is configured in [`serverless.yml`](https://github.com/tacoinfra/harbinger-signer/blob/master/serverless.yml#L60). This list can be customized to any set of assets.
### Custom Candle Providers
An object conforming to the [`CandleProvider`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/candle-provider.ts) interface can retrieve `Candle` objects from an external feed. [`Candle Provider`s are injected into the `OracleService` via constructor](https://github.com/tacoinfra/harbinger-signer/blob/dfd677ec8724b03483e65ac156a2213e22d771a0/handler.ts#L89).
`Harbinger-Signer` has several `CandleProvider`s built in:
- [`BinanceCandleProvider`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/binance-candle-provider.ts): Provides candles from the Binance API.
- [`CoinbaseCandleProvider`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/coinbase-candle-provider.ts): Provides candles from the Coinbase Pro API.
- [`GeminiCandleProvider`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/gemini-candle-provider.ts): Provides candles from the Gemini API.
- [`OkexCandleProvider`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/okex-candle-provider.ts): Provides candles from the OKEx API.
### Custom Signers
An object conforming to the [`Signer`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/signer.ts) interface can sign bytes and provide a public key. [`Signer`s are injected into `OracleService` via constructor](https://github.com/tacoinfra/harbinger-signer/blob/dfd677ec8724b03483e65ac156a2213e22d771a0/handler.ts#L89).
`Harbinger-Signer` has one signer built in, [`AwsSigner`](https://github.com/tacoinfra/harbinger-signer/blob/master/src/aws-signer.ts) which wraps calls to an [AWS KMS Service](https://aws.amazon.com/kms/).
## Credits
Harbinger is written and maintained by [<NAME>](https://github.com/lyoungblood) and [<NAME>](https://github.com/keefertaylor).
<file_sep>import Candle from './candle'
/**
* An interface for an object which provides candles for the Oracle.
*/
export default interface CandleProvider {
/**
* Get a description of the CandleProvider's backing service.
*
* @returns A string describing where the candles are pulled from.
*/
getProviderName(): string
/**
* Retrieve the latest candle to sign.
*
* @param assetName The asset name to retrieve. For instance "XTZ-USD".
* @returns The associated {@link Candle}.
*/
getCandle(assetName: string): Promise<Candle>
}
<file_sep>/** Disable to allow printing of error messages on arbitrarily thrown objects. */
/* eslint-disable @typescript-eslint/no-unsafe-member-access */
import { APIGatewayProxyHandler } from 'aws-lambda'
import OracleService from './src/oracle-service'
import { initOracleLib } from '@tacoinfra/harbinger-lib'
import AwsSigner from './src/aws-signer'
import HttpResponseCode from './src/http-response-code'
import BinanceCandleProvider from './src/binance-candle-provider'
import CoinbaseCandleProvider from './src/coinbase-candle-provider'
import GeminiCandleProvider from './src/gemini-candle-provider'
import OkexCandleProvider from './src/okex-candle-provider'
/** Handler for the Oracle Feed endpoint. */
export const oracle: APIGatewayProxyHandler = async (_event, _context) => {
try {
const oracleService = await getOracleService()
const body = await oracleService.oracle()
return {
statusCode: HttpResponseCode.ok,
body: JSON.stringify(body),
}
} catch (exception) {
console.log(exception.message)
console.log(exception.stack)
return {
statusCode: HttpResponseCode.serverError,
body: `Error: ${JSON.stringify(exception.message)}`,
}
}
}
/** Handler for the Revoke endpoint. */
export const revoke: APIGatewayProxyHandler = async (_event, _context) => {
try {
const oracleService = await getOracleService()
const body = await oracleService.revoke()
return {
statusCode: HttpResponseCode.ok,
body: body,
}
} catch (exception) {
console.log(exception.message)
console.log(exception.stack)
return {
statusCode: HttpResponseCode.serverError,
body: `Error: ${JSON.stringify(exception.message)}`,
}
}
}
/** Handler for the Info endpoint. */
export const info: APIGatewayProxyHandler = async (_event, _context) => {
try {
const oracleService = await getOracleService()
const body = await oracleService.info()
return {
statusCode: HttpResponseCode.ok,
body: JSON.stringify(body),
}
} catch (exception) {
console.log(exception.message)
console.log(exception.stack)
return {
statusCode: HttpResponseCode.serverError,
body: `Error: ${JSON.stringify(exception.message)}`,
}
}
}
/**
* Helper function to retrieve an {@link OracleService}.
*/
const getOracleService = async () => {
// Validate asset lists.
const assetList = process.env.ASSETS
if (assetList == undefined) {
throw new Error('No asset list defined. Please check your configuration')
}
const assets = assetList.split(',').sort()
// Initialize OracleLib.
initOracleLib()
const candleProvider = getCandleProvider()
const signer = await getSigner()
return new OracleService(assets, candleProvider, signer)
}
/**
* Helper function to validate inputs and create a {@link Signer}.
*/
const getSigner = () => {
const awsKmsKeyId = process.env.AWS_KMS_KEY_ID
const awsKmsKeyRegion = process.env.AWS_KMS_KEY_REGION
if (awsKmsKeyId === undefined || awsKmsKeyRegion === undefined) {
throw new Error(
'Fatal: Missing an input to create Signer. Please check your configuration.',
)
}
return AwsSigner.from(awsKmsKeyId, awsKmsKeyRegion)
}
/** Helper function to validate inputs and create a {@link CandleProvider}. */
const getCandleProvider = () => {
// Provide a candle provider based on the value of the CANDLE_PROVIDER env var.
const candleProvider = process.env.CANDLE_PROVIDER
if (candleProvider === 'COINBASE') {
return getCoinbaseCandleProvider()
} else if (candleProvider === 'BINANCE') {
return getBinanceCandleProvider()
} else if (candleProvider === 'GEMINI') {
return getGeminiCandleProvider()
} else if (candleProvider === 'OKEX') {
return getOkexCandleProvider()
} else {
throw new Error(
`Unknown CANDLE_PROVIDER passed in env var: ${JSON.stringify(
candleProvider,
)}`,
)
}
}
/** Helper function to return a `CoinbaseCandleProvider` */
const getCoinbaseCandleProvider = () => {
const coinbaseApiKeyId = process.env.COINBASE_API_KEY_ID
const coinbaseApiKeySecret = process.env.COINBASE_API_KEY_SECRET
const coinbaseApiKeyPassphrase = process.env.COINBASE_API_KEY_PASSPHRASE
if (
coinbaseApiKeyId === undefined ||
coinbaseApiKeySecret === undefined ||
coinbaseApiKeyPassphrase === undefined
) {
throw new Error(
'Fatal: Missing an input to create CandleProvider. Please check your configuration.',
)
}
return new CoinbaseCandleProvider(
coinbaseApiKeyId,
coinbaseApiKeySecret,
coinbaseApiKeyPassphrase,
)
}
/** Helper function to return a `BinanceCandleProvider` */
const getBinanceCandleProvider = () => {
return new BinanceCandleProvider()
}
/** Helper function to return a `GeminiCandleProvider` */
const getGeminiCandleProvider = () => {
return new GeminiCandleProvider()
}
/** Helper function to return an `OkexCandleProvider` */
const getOkexCandleProvider = () => {
return new OkexCandleProvider()
}
<file_sep>/**
* An interface for an object that can sign bytes.
*/
export default interface Signer {
/**
* Sign the given bytes.
*
* @param bytes The bytes to sign.
* @returns The signature encoded in base58check format.
*/
sign(bytes: Uint8Array): Promise<string>
/**
* Retrieve the public key used to verify signed messages.
*
* @returns The public key encoded in base58check format.
*/
getPublicKey(): Promise<string>
}
|
9a0780c89c71f0043e5b00d37e711af8043aa218
|
[
"Markdown",
"TypeScript"
] | 10
|
TypeScript
|
tacoinfra/harbinger-signer
|
0815e44dfcc2ca1222caacdd5b8be13485e9647b
|
60030099845770a808ad6eaf6a9b185e088ed6f9
|
refs/heads/main
|
<file_sep>document.getElementById("addBtn").addEventListener('click', (event)=>
{
const addList = document.getElementById('todoinput').value;
const list = document.getElementById('myUL');
const childList = document.createElement('li');
childList.innerText = addList;
list.appendChild(childList);
})
document.getElementById('myUL').addEventListener('click',(event)=>{
event.target.parentNode.removeChild(event.target);
alert(`Are you completed your work?
If Answer is "Yes".
Click "Ok"!
`);
})
{/* <i class="fas fa-times mx-5"></i><i class="fas fa-check "></i> */}
|
e36e147e21adeda3d81e49eb537ecc469a442f89
|
[
"JavaScript"
] | 1
|
JavaScript
|
dalwer001/To-do-app
|
a072e990be3019d20c93cc0129c6e7cdf863cf0d
|
1f9c3b1bf061e26d8a089ede8d8a29fb4c11d373
|
refs/heads/master
|
<file_sep>const gulp = require('gulp');
var browserSync = require('browser-sync').create();
var sass = require('gulp-sass');
//додаткові плагіни Gulp
const
browserSync = require('browser-sync').create(),
sass = require ('gulp-sass'), //конвертує SASS в CSS
cssnano = require ('gulp-cssnano'), //мінімізація CSS
autoprefixer = require ('gulp-autoprefixer'), //додавання префіксів в CSS для підтримки старих браузерів
imagemin = require ('gulp-imagemin'), //стиснення зображень
concat = require ('gulp-concat'), //об'єднання файлів - конкатенація
uglify = require ('gulp-uglify'), //мінімізація javascript
rename = require ('gulp-rename'); //перейменування файлів
// Compile sass into CSS & auto-inject into browsers
gulp.task('sass', function() {
return gulp.src(['node_modules/bootstrap/scss/bootstrap.scss', 'src/scss/*.scss'])
.pipe(sass())
.pipe(gulp.dest("src/css"))
.pipe(browserSync.stream());
});
// Move the javascript files into our /src/js folder
gulp.task('js', function() {
return gulp.src(['node_modules/bootstrap/dist/js/bootstrap.min.js', 'node_modules/jquery/dist/jquery.min.js', 'node_modules/popper.js/dist/umd/popper.min.js'])
.pipe(gulp.dest("src/js"))
.pipe(browserSync.stream());
});
// Static Server + watching scss/html files
gulp.task('serve', ['sass'], function() {
browserSync.init({
server: "./src"
});
gulp.watch(['node_modules/bootstrap/scss/bootstrap.scss', 'src/scss/*.scss'], ['sass']);
gulp.watch("src/*.html").on('change', browserSync.reload);
});
gulp.task('default', ['js','serve']);
//відслідковування змін у html та js файлах
gulp.task ('watch', function () {
gulp.watch ( ['app/*.html'], () => console.log('HTML was changed'));
gulp.watch ( ['app/js/*.js'], () => console.log('JS was changed'));
});
// в якості завдання за замовчуванням запустити сервер Browsersync і слідкувати за змінами у html/css/js файлах
gulp.task('default', function () {
browserSync.init({
watch: true,
server: "./app"
});
});<file_sep>var ctx = document.getElementById('lastgraf').getContext('2d');
var chart = new Chart(ctx, {
// The type of chart we want to create
type: 'line',
// The data for our dataset
data: {
labels: ['S','M','T','W','T','F','S'],
datasets: [{
//label: 'My First dataset',
pointBorderColor: '#fc4c7a',
borderColor: '#fc4c7a',
borderWidth:4.5,
backgroundColor: "transparent",
lineTension: 0, pointBackgroundColor:"transparent",
data: [21, 26, 28, 22, 21, 12, 8]
}
]
},
// Configuration options go here
options: {
scales: {
xAxes: [{
gridLines: {
display:false
}
}],
yAxes: [{
ticks: {
max: 30,
min: 0,
stepSize: 10
}
}]
},
legend: {
display: false
},
tooltips:
{
onlyshowDatasetIndex:[0,1],
callbacks:
{
label: function (tooltipItems,data){
return tooltipItems.yLabel;
},
title: function(tooltipItem,data){
return;
}
},
displayColors:false,
caretSize:5,
xPadding: 5,
yPadding:10
}
}
});
|
9377b0a9760a7165bfba316832adb94ff23c0533
|
[
"JavaScript"
] | 2
|
JavaScript
|
Bohdashga/web-page
|
b73d665d363ff62b5b6ff14fabbaa9a0f81054e3
|
0d59e4f4b38ae648debbbf0507108d8c10fab7da
|
refs/heads/master
|
<file_sep>import React from 'react'
import style from "./AuthorForm.module.css"
import axios from 'axios';
import { Link } from "react-router-dom"
const AuthorList = (props) => {
const { removeFromDom } = props;
const deleteAuthor = (authorId) => {
axios.delete('http://localhost:8000/api/authors/' + authorId)
.then(res => {
removeFromDom(authorId)
})
.catch(err => console.error(err));
}
return (
<div>
{props.authors.map( (author, i) =>{
let url=`/authors/${author._id}`;
return (
<div className={style.items} key={i}>
<h2><a href={url}>{author.name}</a></h2> <p>~~~~~</p>
<Link to={"/authors/" + author._id + "/edit"}>
<button>Edit</button>
</Link>
<button onClick={(e)=>{deleteAuthor(author._id)}}>
Delete
</button>
</div>)
}
)}
</div>
)
}
export default AuthorList;<file_sep>import React, { useEffect, useState } from 'react'
import axios from 'axios';
import AuthorList from './AuthorList';
import AuthorForm from './AuthorForm'
import style from "./AuthorForm.module.css"
export default () => {
const [authors, setAuthors]=useState([]);
const [loaded, setLoaded]=useState(false);
useEffect(()=>{
axios.get('http://localhost:8000/api/authors')
.then(res=>{
setAuthors(res.data);
setLoaded(true);
})
.catch(err => console.error(err));
},[authors]);
const removeFromDom = authorId => {
setAuthors(authors.filter(author => author._id != authorId));
}
return (
<div className={style.inventory}>
{/* <AuthorForm/>
<hr/> */}
<h2>CURRENT AUTHORS</h2>
{loaded && <AuthorList authors={authors} removeFromDom={removeFromDom}/>}
</div>
)
}<file_sep>
import React, { useEffect, useState } from 'react'
import axios from 'axios';
import { useParams,Link,BrowserRouter } from "react-router-dom";
import style from "./AuthorForm.module.css"
const Detail = (props) => {
const [author, setAuthor] = useState({})
const { id } = useParams();
const { removeFromDom } = props;
useEffect(() => {
axios.get('http://localhost:8000/api/authors/' +id)
.then(res => setAuthor(res.data))
.catch(err => console.error(err));
}, []);
const deleteAuthor = (authorId) => {
axios.delete('http://localhost:8000/api/authors/' + authorId)
.then(res => {
removeFromDom(authorId)
})
.catch(err => console.error(err));
}
return (
<div className={style.single}>
<h1>Title: {author.name}</h1>
<Link to={"/authors/" + author._id + "/edit"}>
Edit
</Link>
<Link to={"/authors/"}>
<button onClick={(e)=>{deleteAuthor(author._id)}}>
Delete
</button>
</Link>
</div>
)
}
export default Detail;<file_sep>import React, { useEffect, useState } from 'react'
import axios from 'axios';
import { useParams, useHistory } from "react-router-dom";
import style from "./AuthorForm.module.css"
const Update = (props) => {
const { id } = useParams();
const [name, setName] = useState('');
const history = useHistory();
const [nameError, setNameError] = useState('');
const handleName=e=>{
setName(e.target.value)
if(e.target.value.length>2){
setNameError('')
}
else{
setNameError('Name is too short!')
}
}
useEffect(() => {
console.log(id)
axios.get('http://localhost:8000/api/authors/' + id)
.then(res => {
console.log("UE", res.data)
setName(res.data.name);
})
}, []);
const updateAuthor = e => {
e.preventDefault();
axios.put('http://localhost:8000/api/authors/' + id, {
name,
})
.then(res => {
if(!res.data.errors){
history.push('/authors')
}
else{
setNameError(res.data.errors.name.message)
}
})
.catch(err => console.log(err));
}
return (
<div className={style.editor}>
<h1>Update {name}</h1>
<form onSubmit={updateAuthor}>
<p>
<label>Name:</label><br />
<input type="text" name="Name" value={name}
onChange={(e) => {handleName(e)}} />
</p>
<p>{nameError}</p>
<input type="submit" />
</form>
</div>
)
}
export default Update;
|
832478c8b98ae8e72431ade044925775be63e493
|
[
"JavaScript"
] | 4
|
JavaScript
|
toddaulwurm/authors
|
a7aa36c74b048649e3b82304a7bb01306a9e8328
|
951b1e2a68a72de5784b17cc4abb0d754df7c923
|
refs/heads/main
|
<repo_name>arsamnaqvi/chip-8-emulator<file_sep>/README.md
# chip-8-emulator
my try at a chip-8 emulator
<file_sep>/chip.h
#include <cstdint>
// macros for the class used below
#define NUM_REG 16
#define MEM_SIZE 4096
#define STACK_SIZE 16
#define KEYPAD_SIZE 16
#define VIDMEM_SIZE 2048 // 64*32
#define FONTSET_SIZE 80
/*
* class definition for the chip8 infrastructure
* the names are self explanatory and represent
* exactly what they say
*/
class chip8 {
public:
uint8_t registers[NUM_REG]{};
uint8_t memory[MEM_SIZE]{};
uint16_t index{};
uint16_t pc{};
uint16_t stack[STACK_SIZE]{};
uint8_t sp{};
uint8_t delaytimer{};
uint8_t soundtimer{};
uint8_t keypad[KEYPAD_SIZE]{};
uint32_t video[VIDMEM_SIZE]{};
uint16_t opcode;
chip8();
void loadROM(char const* filename);
// instructions - opcodes definitions
void 00E0();
void 00EE();
void 1nnn();
void 2nnn();
void 3xkk();
//
void 4xkk();
void 5xy0();
void 6xkk();
void 7xkk();
void 8xy0();
void 8xy1();
void 8xy2();
void 8xy3();
void 8xy4();
void 8xy5();
void 8xy6();
void 8xy7();
void 8xyE();
void 9xy0();
void Annn();
void Bnnn();
void Cxkk();
void Dxyn();
void Ex9E();
void ExA1();
void Fx07();
void Fx0A();
void Fx15();
void Fx18();
void Fx1E();
void Fx29();
void Fx33();
void Fx55();
void Fx65();
};
/*
* the fonntset that represents the characters on the
* right in the comments
* the representation in of a pixel art of the size 4 x 5
* example: 0 is:
* 11110000
* 10010000
* 10010000
* 10010000
* 11110000
*/
uint8_t fontset[FONTSET_SIZE] = {
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x9-, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xf0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
oxF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
};<file_sep>/chip.cpp
#include "chip.h"
#include <fstream>
#include <stdlib.h>
#include <time.h>
/* macro to define the address pointed to by the
* program counter upon boot time
*/
#define START_ADDRESS 0x200
#define FONT_START_ADDRESS 0x50
/*
* chip8 constructor
* initializes the system
* inputs: none
* outputs: settings initialized like the program counter
*/
chip8::chip8() {
// initialize the pc for the system
pc = START_ADDRESS;
// initialize the font set data into memory
for (int i = 0; i < FONTSET_SIZE; ++i) {
memory[FONT_START_ADDRESS + i] = fonntset[i];
}
}
/*
* loadROM function
* this is to the program file onto the chip8 memory
* to be interpreted by the system after
* inputs: name of the file which holds the program
* outputs: none
*/
void chip8::loadROM(char const* filename){
// open the file an dmove the file pointer to the end
std::ifstream file(filename, std::ios::binary | std::ios::ate);
// if the file is open then proceed
if (file.is_open()) {
// get the size of the file
std::streampos size = file.tellg();
// make a buffer to hold the file's content
char* buffer = new char[size];
// got to the start of the file and copy content
// into the tmep buffer
file.seekg(0, std::ios::beg);
file.read(buffer, size);
// close the file
file.close();
// copy the file content from the temp buffer into
// the chip8 memory
for (long i = 0; i < size; i++) {
memory[START_ADDRESS + i] * buffer[i];
}
// clean up local variables
delete[] buffer;
}
}
/*
* the following are the implementations of
* the opcode instructions for the chip-8
*/
/*
* OPcode 00E0
* clear the display
*/
void chip8::00E0() {
// set the video memory to hold all 0s
std::memset(video, 0, VIDMEM_SIZE);
}
/*
* OPcode 00EE
* returns fromt he subroutine
*/
void chip8::00EE() {
// decrement the stack pointer to point to the return
// address stored in the previous position
sp--;
// update the program counter to point to the correct
// instruction after the return
pc = stack[sp];
}
/*
* OPcode 1nnn
* jump to location nnn
*/
void chip8::1nnn() {
// update pc to hold the instruction address that the
// OPcode gives
pc = opcode & 0x0FFF;
}
/*
* OPcode 2nnn
* call instruction - call subroutine at location nnn
*/
void chip8::2nnn() {
// push the return address (pc) into the stack and increment stack pointer
stack[sp] = pc;
sp++;
// update pc to jump to the called subroutine
pc = opcode & 0x0FFF;
}
/*
* OPcode 3xkk
* skip the next instruction if register Vx holds the value kk
*/
void chip8::3xkk() {
// get the register nuber from the opcode
int regnum = (opcode & 0x0F00) >> 8;
// get the value kk
uint8_t val = opcode & 0x00FF;
// check if the register content matches the given value
if (registers[regnum] == val) {
// update the pc to skip the next instruction
// each instruction takes 2 bytes hence the +2
pc += 2;
}
}
/*
* OPcode 4xkk
* skip next instruction if the value in reg Vx is not equal to kk
*/
void chip8::4xkk() {
// find the register number from the opcode
int regnum = (opcode & 0x0F00) >> 8;
// get the value kk
uint8_t val = opcode & 0x00FF;
// check if the reg Vx holds the same value as kk
if (registers[regnum] != val) {
// if not update pc to skip next instruction
pc += 2;
}
}
/*
* OPcode 5xy0
* skip the next instruction if registers Vx and Vy hold the same value
*/
void chip8::5xy0() {
// get the register numbers from the opcode
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// check if they're the same
if (registers[x] == registers[y]) {
// skip next instruction if so
pc += 2;
}
}
/*
* OPcode 6xkk
* load the value kk into register Vx
*/
void chip8::6xkk() {
// get the reg number
int regnum = (opcode & 0x0F00) >> 8;
// update the value
registers[regnum] = opcode & 0x00FF;
}
/*
* OPcode 7xkk
* add the value kk to the value in register Vx
*/
void chip8::7xkk() {
// get the reg num and value
int regnum = (opcode & 0x0F00) >> 8;
uint8_t val = opcode & 0x00FF;
// add the value into the register value and store in the register
registers[regnum] += val;
}
/*
* OPcode 8xy0
* load the value in register y into register x (Vx = Vy)
*/
void chip8::8xy0() {
// get the reg numbers
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// updte reg x's value
registers[x] = registers[y];
}
/*
* OPcode 8xy1
* value in reg x = value in reg x ORed with value in reg y
*/
void chip8::8xy1() {
// get the reg numbers
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// update reg x's value
registers[x] |= registers[y];
}
/*
* OPcode 8xy2
* bitwise AND of values in reg x and y - stored in reg x
*/
void chip8::8xy2() {
// get the reg numbers
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// update reg x's value
registers[x] &= registers[y];
}
/*
* OPcode 8xy3
* bitwise XOR of values in reg x and y - stored in reg x
*/
void chip8::8xy3() {
// get the reg numbers
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// update reg x's value
registers[x] ^= registers[y];
}
/*
* OPcode 8xy4
* reg x = reg x + reg y
* sum is only 8 bits long, if carry found reg 0xF = 1, else 0
*/
void chip8::8xy4() {
// get the reg numbers
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// get the sum
uint16_t sum = registers[x] + registers[y];
// check for overflow and give reg0xF value accordingly
registers[0xF] = sum > 255 ? 1 : 0;
// update reg x's value
registers[x] = sum & 0x00FF;
}
/*
* OPcode 8xy5
* reg x = reg x - reg y
* if reg x > reg y, reg0xF = 1, else 0
*/
void chip8::8xy5() {
// get the reg numbers
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// set register0xF's value
registers[0xF] = registers[x] > registers[y] ? 1 : 0;
// update regx
registers[x] -= registers[y];
}
/*
* OPcode 8xy6
* right shift reg x's value by 1 (divide by 2)
* if least significant bit of reg x is 1, then reg0xF = 1, else 0
*/
void chip8::8xy6() {
// get the reg number
int x = (opcode & 0x0F00) >> 8;
// update register0xF
registers[0xF] = registers[x] & 0x01;
// update reg x
registers[x] >>= 1;
}
/*
* OPcode 8xy7
* reg x = reg y - reg x
* if reg y > reg x, reg0xF = 1, else 0
*/
void chip8::8xy7() {
// get the reg numbers
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// set register0xF's value
registers[0xF] = registers[y] > registers[x] ? 1 : 0;
// update regx
registers[x] = registers[y] - registers[x];
}
/*
* OPcode 8xyE
* left shift reg x's value by 1 (multiply by 2)
* if most significant bit of reg x is 1, then reg0xF = 1, else 0
*/
void chip8::8xyE() {
// get the reg number
int x = (opcode & 0x0F00) >> 8;
// update register0xF
registers[0xF] = (registers[x] & 0x80) >> 7;
// update reg x
registers[x] <<= 1;
}
/*
* OPcode 9xy0
* skip the next instruction if reg x is not equal to reg y
*/
void chip8::9xy0() {
// get the reg numbers
int x = (opcode & 0x0F00) >> 8;
int y = (opcode & 0x00F0) >> 4;
// check if regx != regy
if (registers[x] != registers[y]) {
// update pc to skip next instruction
pc += 2;
}
}
/*
* OPcode Annn
* load the address nnn into the index register
*/
void chip8::Annn() {
index = opcode & 0x0FFF;
}
/*
* OPcode Bnnn
* jump to location nnn + value in reg 0
*/
void chip8::Bnnn() {
// update pc to jump
//uint16_t adr = registers[0] + (opcode & 0x0FFF);
pc = registers[0] + (opcode & 0x0FFF);
}
/*
* OPcode Cxkk
* load reg x with the a random value ANDed with kk
*/
void chip8::Cxkk() {
// get the reg number
int x = (opcode & 0x0F00) >> 8;
// generate a random number between 0 and 255 inclusive
srand(time(NULL));
uint8_t rnd = rand() % 256;
// get the vlaue kk from the opcode
uint8_t val = opcode & 0x00FF;
// update reg x
registers[x] = rnd & val;
}
/*
* OPcode Dxyn
*
*/
void chip8::Dxyn();
void chip8::Ex9E();
void chip8::ExA1();
void chip8::Fx07();
void chip8::Fx0A();
void chip8::Fx15();
void chip8::Fx18();
void chip8::Fx1E();
void chip8::Fx29();
void chip8::Fx33();
void chip8::Fx55();
void chip8::Fx65();
|
d69e440b1fcca7be52bf236eff1566243c76bb36
|
[
"Markdown",
"C++"
] | 3
|
Markdown
|
arsamnaqvi/chip-8-emulator
|
752f576930c7e189567fa0f4071eedb159079690
|
c25b656ebbe1d701f73137c3c60a3c82ffec4db3
|
refs/heads/master
|
<file_sep>// $(document).ready(function(){
// });
const SHA256 = require('crypto-js/sha256');
const level = require('level');
const chainDB = './chaindata';
const db = level(chainDB);
function c(txt){
console.log(txt);
}
//---------((((((((((((((((((((((((((()))))))))))))))))))))))))))
// Add data to levelDB with key/value pair
function addLevelDBData(key,value){
db.put(key, value, function(err) {
if (err) return console.log('Block ' + key + ' submission failed', err);
// getLevelDBData(key);
})
}
// Get data from levelDB with key
function getLevelDBData(key){
return new Promise(function (resolve,reject){
db.get(key, function(err, value) {
if (err) return console.log('Not found!', err);
resolve(value);
})
});
}
// Add data to levelDB with value
function addDataToLevelDB(value) {
let i = 0;
db.createReadStream().on('data', function(data) {
i++;
}).on('error', function(err) {
return console.log('Unable to read data stream!', err)
}).on('close', function() {
console.log('Block #' + i);
addLevelDBData(i, value);
});
}
//--------((((((((((((((((((((((((((()))))))))))))))))))))))))))
/* ===== Block Class ===================================
| Class with a constructor for block data model |
| ====================================================*/
class Block{
constructor(data){
this.hash = "",
this.height = 0,
this.body = data,
this.time = 0,
this.previousBlockHash = ""
}
}
/* ===== BlockChain Class ===================================
| Class with a constructor for BlockChain |
| ====================================================*/
class BlockChain{
constructor(){
this.chain = [];
// this.addBlock(new Block("First block in the chain - Genesis block"));
}
// Add new block
addBlockTest(newBlock){
}
/*################################################
################ Add block ######################
################################################*/
addBlock(newBlock){
// return new Promise(function(resolve,reject){
// // Add new block
// // UTC timestamp
// newBlock.time = new Date().getTime().toString().slice(0,-3);
// // previous block hash
// // Block hash with SHA256 using newBlock and converting to a string
// newBlock.hash = SHA256(JSON.stringify(newBlock)).toString();
// // Adding block object to chain
// //-----------addDataToLevelDB
// let i = 0;
// db.createReadStream().on('data', function(data) {
// i++;
// }).on('error', function(err) {
// return console.log('Unable to read data stream!', err)
// }).on('close', function() {
// console.log('Block #' + i);
// // Block height
// newBlock.height =i;
// //-----------addLevelDBData(key,value){
// db.put(i, newBlock, function(err) {
// if (err) return console.log('Block ' + key + ' submission failed', err);
// // getLevelDBData(key);
// })
// });
// })
return new Promise(function(resolve,reject){
// let h = 0;
bc.getBlockHeight().then((h) => {
if(h>0){
c('\n=============================')
c('\ncatch h '+ h)
}
c('fn addBlock ' + h)
/// Block height
newBlock.height = h;
let objBlock=[];
objBlock.push(newBlock)
objBlock.push(h)
return objBlock;
}).then((objBlock) => {
//*************** formating block *****************
/* objBlock:-
- objBlock[0]...........newBlock
- objBlock[1]...........h
- objBlock[2]...........previousBlock
*/
// UTC timestamp
c('objBlock\t'+objBlock)
let newBlock=objBlock[0];
newBlock.time = new Date().getTime().toString().slice(0,-3);
c('newBlock.time\t'+newBlock.time)
let h=objBlock[1]
// let bc=new BlockChain();
if(h>0)
{
c('\nblock height >0 !!!!!!!!!!!! = '+h);
// Block height
newBlock.height = h;
// previous block hash
bc.getBlock(h-1).then((previousBlock) => {
c('previousBlock,,,,,\t'+previousBlock)
newBlock.previousBlockHash = JSON.parse((previousBlock)).hash;
c('previousBlock.hash\t'+JSON.parse((previousBlock)).hash);
//check existance of newBlock
c(newBlock)
// Block hash with SHA256 using newBlock and converting to a string
newBlock.hash = SHA256(JSON.stringify(newBlock)).toString();
c('newBlock.hash\t'+newBlock.hash);
//finally VERY IMPORTANT - stringify block
newBlock=JSON.stringify(newBlock).toString();
// Adding block object to chain
//*************** adding block to DB *****************
addLevelDBData(h,newBlock)
})
}
else{
// Block hash with SHA256 using newBlock and converting to a string
newBlock.hash = SHA256(JSON.stringify(newBlock)).toString();
c('GENESIS newBlock.hash\t'+newBlock.hash);
//finally VERY IMPORTANT - stringify block
newBlock=JSON.stringify(newBlock).toString();
c(newBlock)
// Adding block object to chain
//*************** adding block to DB *****************
addLevelDBData(0,newBlock)
}
resolve('saved')
// return newBlock;
})
});
}
/*################################################
################ Get block height ################
################################################*/
getBlockHeight(){
return new Promise(function(resolve,reject){
let h = 0;
db.createReadStream().on('data', function(data) {
h++;
}).on('error', function(err) {
return console.log('Unable to read data stream!', err)
}).on('close', function() {
// console.log('p BlockHeight\t' + h);
resolve(h);
})
})
}
/*################################################
################ Get block ######################
################################################*/
getBlock(blockHeight){
// return object as a single string
return new Promise(function (resolve,reject){
db.get(blockHeight, function(err, block) {
if (err) return console.log('Not found!', err);
resolve(block);
})
});
}
/*################################################
################ validate block #################
################################################*/
validateBlock(blockHeight){
return new Promise(function (resolve,reject){
let result
// get block chain
let bc = new BlockChain();
// get block object
bc.getBlock(blockHeight).then((b) => {
// let block=JSON.parse(block);
let block=JSON.parse(b);
// get block hash
let blockHash = block.hash
// c('block hash\t'+blockHash);
// remove block hash to test block integrity
block.hash = '';
// generate block hash
let validBlockHash = SHA256(JSON.stringify(block)).toString();
// Compare
if (blockHash===validBlockHash) {
// c('*** Matched ***')
// c('Block #'+blockHeight+' hash:\n'+blockHash+' === '+validBlockHash);
result = true;
} else {
console.log('Block #'+blockHeight+' invalid hash:\n'+blockHash+'<>'+validBlockHash);
result = false;
}
resolve(result);
});
// c(r);
})
}
/*################################################
################ validate Chain #################
################################################*/
validateChain(){
// return new Promise(function(resolve,reject){
let errorLog = [];
let bc = new BlockChain();
//get blockHieght
bc.getBlockHeight().then((h) => {
let result
(function theLoop (i) {
setTimeout(function () {
//validate blocks
c(i)
// let i=0
var promise_validateBlock = bc.validateBlock(i).then((result) => {
let isValidateBlock = result;
c(i+' isValidateBlock\t'+result)
return(result)
})
var promise_getBlock = bc.getBlock(i).then((b) => {
let block=JSON.parse(b);
let blockHash = block.hash;
// c('blockHash\t'+blockHash)
return(blockHash)
}).catch(function(error) {
console.log('error'+error);
});
var promise_getNextBlock = bc.getBlock(i+1).then((b) => {
let nextblock=JSON.parse(b);
let previousHash = nextblock.previousBlockHash;
// c('previousHash\t'+previousHash)
return(previousHash)
}).catch(function(error) {
console.log('error'+error);
});
Promise.all([promise_validateBlock, promise_getBlock,promise_getNextBlock]).then((values) => {
console.log('\nPromise.all\n');
let isValidateBlock=values[0];
c('isValidateBlock\t'+isValidateBlock);
let blockHash=values[1];
c('blockHash\t'+blockHash);
let previousHash=values[2];
c('previousHash\t'+previousHash);
c('ticking..\t'+i);
if (blockHash!==previousHash) {
errorLog.push(i);
}
i++;
if (i < h -1){
theLoop(i);
}
else{
console.log('no more blocks to check');
if (errorLog.length>0) {
console.log('Block errors = ' + errorLog.length);
console.log('Blocks: '+errorLog);
} else {
console.log('No errors detected');
}
}
}).catch(function(error) {
console.log('all errors'+error);
});
}, 2000);
})(0);
})
// })
}
showBlockChain(){
return new Promise(function(resolve,reject){
let i = 0;
// for ( n=0 ; n<h ; n++ ){
// }
let blocks=[];
db.createReadStream().on('data', function(data) {
// c(JSON.parse(data.value))
// let objBlock = {};
// objBlock.JSON.parse(data.value);
// resolve(objBlock);
// let objBlock=JSON.parse(JSON.stringify(data));
// c(objBlock);
i++;
blocks.push(data)
// c('block#'+data.key+"\tvalue:\t"+data.value);
// c(' obj hash ='+ obj.hash)
}).on('error', function(err) {
return console.log('Unable to read data stream!', err)
}).on('close', function() {
// console.log('closing Block #' + data);
// c('blocks..... '+blocks+'\n')
resolve(blocks)
});
});
}
}//<-------end BlockChain
//testing:
let bc = new BlockChain();
function addTestBlock(){
// let bc = new BlockChain();
let i = 0;
let newBlock = new Block('---test block----')
c(newBlock);
// Block height
newBlock.height = i;
// UTC timestamp
newBlock.time = new Date().getTime().toString().slice(0,-3);
// previous block hash
// if(this.chain.length>0){
// newBlock.previousBlockHash = this.chain[this.chain.length-1].hash;
// }
// Block hash with SHA256 using newBlock and converting to a string
newBlock.hash = SHA256(JSON.stringify(newBlock)).toString();
// Adding block object to chain
c(newBlock);
// this.chain.push(newBlock);
newBlock=JSON.stringify(newBlock).toString();
c(newBlock);
db.put(i, newBlock, function(err) {
if (err) return console.log('Block ' + key + ' submission failed', err);
})
// });
}
function runTest2(){
let bc = new BlockChain();
//GenesisBlock
// let GenesisBlock = new Block("First block in chain -Genesis Block - " + 0);
// bc.addBlock(GenesisBlock).then((result) => {
// c("Block DB \t#" + i +"\tGenesis") ;
// });
(function theLoop (i) {
setTimeout(function () {
let blockTest = new Block("Test Block - " + (i + 1));
bc.addBlock(blockTest).then((result) => {
console.log(result);
i++;
if (i < 10) theLoop(i);
});
}, 1000);
})(0);
// i=0;
// (function theLoop (i) {
// setTimeout(function () {
// let blockTest = new Block("Test Block - " + (i + 1));
// bc.addBlock(blockTest).then((result) => {
// i++;
// if (i < 10) theLoop(i);
// else {
// //testing
// //c(bc.chain)
// //c("getBlockHeight \t array \t" + bc.getBlockHeight())
// // c(bc.getBlock(0))
// // bc.getBlock(0).then((result) => {
// // c("Block DB \t#" + result) ;
// // });
// //c("getDBblockHeight \t" + bc.getDBblockHeight())
// }
// });
// }, 1000);
// })(0);
}
//runTest2();
// addTestBlock();
bc.validateChain()
// bc.showBlockChain().then((result) => {
// c(result);
// })
// bc.validateBlock(1).then((result) => {
// c(result)
// })
// c(bc.getBlock(0).then((b) => {
// c(JSON.parse(b).hash)
// }));
//===========================
//===========================
//===========================
//===========================
// bc.validateBlock(1).then((result) => {
// c(result)
// })
// bc.validateChain().then((result) => {
// c(result)
// })
// c("validateChain \t array \t" + bc.validateChain())
// c(bc.getBlock(0).then((b) => {
// // c("Block DB \t#" + block) ;
// // var hash =jQuery.parseJSON(JSON.stringify(block));
// // let block={};
// // block=b;
// // block.previousHash='ffffffff';
// // var hash =JSON.stringify(block);
// // JSON.parse(block).hash ;
// // c("hash\t"+hash);
// // c("block.previousHash\t"+block.hash);
// //c(b)
// // c(JSON.parse(b))
// c(JSON.parse(b).hash)
// }));
// c(bc.getBlockHeight().then((block) => {
// c("getBlockHeight\t"+block);
// }));
// c(bc.chain)
// c(bc.updateChain());
// c(bc.chain)
// c(bc.getBlock(0).then(function(value) {
// //c(block) ;
// // c(value.hash) ;
// c(JSON.parse((value)).hash)
// }));
//testPromise();
// testPromiseAll();
//updateChain();
// bc.chain
//c(bc.chain)
//fill Chain
// function updateChain(){
// let bc = new BlockChain();
// let i = 0;
// bc.chain=[];
// db.createReadStream().on('data', function(data) {
// i++;
// c('current block#'+i+data.key+"\tvalue:\t"+data.value);
// //pupulate chain with existing blocks in db
// let _block=data.value;
// bc.chain.push(_block);
// }).on('error', function(err) {
// return console.log('Unable to read data stream!', err)
// }).on('close', function() {
// c('all:'+bc.chain)
// });
// }
function setDelay(i) {
setTimeout(function(){
console.log('ticking..\t'+i);
}, 1000);
}
function testPromiseAll(){
var promise1 = Promise.resolve(3);
var promise2 = 42;
var promise3 = new Promise(function(resolve, reject) {
setTimeout(resolve, 100, 'foo');
});
Promise.all([promise1, promise2, promise3]).then(function(values) {
console.log(values);
});
}
function testPromise(){
return new Promise(function(resolve, reject) {
setTimeout(() => resolve(100), 1000); // (*)
}).then(function( a ) { // (**)
c(a); // 1
let r=[];
r.push(a*2)
return r;
}).then(function(r) { // (***)
c("Hello");
c(r); // 2
r.push(333);
return r;
}).then(function(x) {
c(x); // 4
c(x[1]); // 4
});
}
<file_sep>// let promiceToDo = new Promise(function(resolve,reject){
// //clean room
// let isClean = true;
// if(isClean){
// resolve('clean');
// }else{
// reject('dirty');
// }
// });
// promiceToDo.then(function(fromResolve){
// console.log('the room is '+ fromResolve);
// }).catch(function(fromReject){
// console.log('the room is '+ fromReject);
// })
// return new Promise((resolve, reject) => {
// db.get(key, function(err, value) {
// if (err) return console.log('Not found!', err);
// resolve(value);
// });
// })
// getBlockHeight().then((height) => {
// newBlock.Height = height + 1;
// })
var promise1 = new Promise(function(resolve, reject) {
setTimeout(function() {
resolve('foo');
}, 300);
});
promise1.then(function(value) {
console.log(value);
// expected output: "foo"
});
console.log(promise1);
// expected output: [object Promise]
// function getBlocksCount() {
// /*
// let self = this;
// return new Promise(function(resolve, reject) {
// let count = 0;
// //open db stream
// db.createReadStream()
// .on('data', function (data) {
// // Count each object inserted
// count++;
// })
// .on('error', function (err) {
// //Error accessing db stream
// console.log('Oh my!', err);
// reject(err);
// })
// .on('end', function () {
// //resolve with the count value
// console.log("end strem");
// console.log("Count =" + count);
// resolve(count-1);
// });
// });
<file_sep>addBlock(newBlock)
MEETS SPECIFICATIONS
1. SimpleChain.js includes the Node.js level library and configured to persist data within the project directory.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
const SHA256 = require('crypto-js/sha256');
const level = require('level');
const chainDB = './chaindata';
const db = level(chainDB);
===========================================================================
2. addBlock(newBlock) includes a method to store newBlock within LevelDB
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
bc.addBlock(GenesisBlock).then((result) => {
c("Block DB \t#" + i +"\tGenesis") ;
});
===========================================================================
.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
===========================================================================
validateBlock() function to validate a block stored within levelDB
|
97fcf80b3ecb1d6028581450e945c26ae78272e3
|
[
"JavaScript",
"Text"
] | 3
|
JavaScript
|
msharekh/Project2-Private-Blockchain
|
d592b78f1982672fe771a83d2b828ffffa56e471
|
969f248729ddf9171f166822357dff0d141666de
|
refs/heads/master
|
<repo_name>opsbears/obf-cli<file_sep>/src/OptionParser/CLIOptionParserException.php
<?php
namespace OBF\CLI\OptionParser;
use OBF\Lang\Exception;
class CLIOptionParserException extends Exception {
} <file_sep>/src/Terminal/TreeShellUpCommand.php
<?php
namespace OBF\CLI\Terminal;
use OBF\Lang\Object;
class TreeShellUpCommand extends Object implements iTreeCommandHandler {
public function handle(TreeShell $shell, TreeShellCommand $command, $parameters = array()) {
$shell->up();
}
}
|
d427ba79d1af999230a7c8d3adc39c202624c759
|
[
"PHP"
] | 2
|
PHP
|
opsbears/obf-cli
|
2e934b44b3520c92c2b5e848f975e2b064270791
|
57fe200b7c315dff2817a4a7cd330cd7c95c1b7c
|
refs/heads/master
|
<file_sep>#Напишите функцию-конструктор new Voter(options) для голосовалки.
Она должна получать элемент в options.elem, в следующей разметке:
```
<div id="voter" class="voter">
<span class="down">—</span>
<span class="vote">0</span>
<span class="up">+</span>
</div>
```
Публичный метод voter.setVote(vote) должен устанавливать текущее число – значение голоса.
Все остальные методы и свойства пусть будут приватными.<file_sep>#Cоздайте компонент «Часы» (Clock).
Интерфейс:
```
var clock = new Clock({
elem: элемент
});
clock.start(); // старт
clock.stop(); // стоп
```
Остальные методы, если нужны, должны быть приватными.
При нажатии на alert часы должны приостанавливаться, а затем продолжать идти с правильным временем.
<file_sep>/**
* Created by Sirozhka on 25.02.2017.
*/
function Message(error, elem, text) {
elem.parentElement.className = (error) ? 'form-group has-danger' : 'form-group has-success';
msg = elem.parentElement.getElementsByClassName('form-control-feedback')[0];
msg.innerHTML = text;
}
function Form() {
var inputs = $('input:not(:submit)');
var data = {};
$(inputs).each(function () {
data[this.id] = this.value;
});
$.ajax({
url: 'validator.php',
type: 'POST',
data: data,
dataType: 'json',
}).done(function (res, textStatus) {
$(inputs).each(function () {
Message(0, this, '');
});
//console.log(res);
$.each(res.error, function (key, value) {
var id = '#' + key.toLowerCase().replace(' ', '_');
var elem = $(id)[0];
Message(1, elem, value);
});
return res.result
});
return false
}
<file_sep>// 1. Создаём новый объект XMLHttpRequest
var xhr = new XMLHttpRequest();
// 2. Конфигурируем его: GET-запрос на URL 'phones.json'
xhr.open('GET', 'phones.json', false);
// 3. Отсылаем запрос
xhr.send();
// 4. Если код ответа сервера не 200, то это ошибка
if (xhr.status != 200) {
// обработать ошибку
alert( xhr.status + ': ' + xhr.statusText ); // пример вывода: 404: Not Found
} else {
// вывести результат
var phones = JSON.parse(xhr.responseText);
console.log(phones);
var cont = document.getElementsByClassName('container');
var list = document.createElement('ul');
for (var i =0 ; i<phones.length;i++){
var li = document.createElement('li');
li.innerHTML= phones[i].name;
list.appendChild(li);
}
cont[0].appendChild(list);
}
<file_sep>/**
* Created by Sirozhka on 26.02.2017.
*/
function Clock(elem) {
var switcher = 1;
this.t = 0;
this.start = function () {
switcher = 1;
// alert('start');
paint();
};
this.stop = function () {
switcher = 0;
};
var clock = document.querySelector(elem.elem);
if (clock) {
var canvas = document.createElement('canvas');
canvas.setAttribute('height', '400');
canvas.setAttribute('width', '400 ');
canvas.id = elem.elem + 'canvas';
var h = document.createElement('h3');
clock.appendChild(h);
var button = document.createElement('button');
button.className = 'btn btn-success';
button.innerHTML = 'Start';
button.addEventListener('click',this.start);
clock.appendChild(button);
var button2 = document.createElement('button');
button2.className = 'btn btn-danger float-right';
button2.innerHTML = 'Stop';
button2.addEventListener('click',this.stop);
clock.appendChild(button2);
clock.appendChild(canvas);
}
else {
console.error('Не найдено елемента ' + elem.elem);
}
function paint() {
if (!switcher) {
return 0;
} else {
var date = new Date();
var hours = (date.getHours()<10)?'0'+date.getHours():date.getHours();
var minutes = (date.getMinutes()<10)?'0'+date.getMinutes():date.getMinutes();
var seconds = (date.getSeconds()<10)?'0'+date.getSeconds():date.getSeconds();
var time = hours + " : " + minutes + " : " + seconds;
var h = document.querySelector(elem.elem + ' > h3');
h.innerHTML = time;
var clock = document.querySelector(elem.elem + ' > canvas').getContext("2d");
clock.save();
clock.clearRect(0,0,400,400);
clock.translate(200, 200);
clock.scale(0.4,0.4);
clock.rotate(-Math.PI/2);
clock.strokeStyle = "black";
clock.fillStyle = "black";
clock.lineWidth = 6;
clock.lineCap = "round";
clock.save();
clock.beginPath();
for (var i = 0; i < 12; i++) {
clock.rotate(Math.PI/6);
clock.moveTo(200,0);
clock.lineTo(220,0);
}
clock.stroke();// нарисовали то, что ранее описали
clock.restore();// достаем последний сохраненный контекст из стэка
clock.save();
// рисуем часовую стрелку, вращая холст
clock.rotate((Math.PI/6)*hours +
(Math.PI/360)*minutes +
(Math.PI/21600)*seconds);
clock.lineWidth = 14;
clock.beginPath();
clock.moveTo(-20,0);
clock.lineTo(150,0);
clock.stroke();
clock.restore();
clock.save();
// минутная стрелка
clock.rotate((Math.PI/30*minutes) +
(Math.PI/1800)*seconds);
clock.lineWidth = 10;
clock.beginPath();
clock.moveTo(-28,0);
clock.lineTo(212,0);
clock.stroke();
clock.restore();
clock.save();
// секундная стрелка
clock.rotate(seconds * Math.PI/30);
clock.strokeStyle = "#D40000";// цвет контура
clock.fillStyle = "#D40000";
clock.lineWidth = 6;
clock.beginPath();
clock.moveTo(-30,0);
clock.lineTo(183,0);
clock.stroke();
clock.restore();
clock.restore();
this.t = setTimeout(function () {
paint()
}, 100);
}
}
this.start();
}<file_sep>function makeDanger(elem, text) {
elem.parentElement.className = 'form-group has-danger';
msg = elem.parentElement.getElementsByClassName('form-control-feedback')[0];
msg.innerHTML = text;
}
function makeSucces(elem, text) {
elem.parentElement.className = 'form-group has-success';
msg = elem.parentElement.getElementsByClassName('form-control-feedback')[0];
msg.innerHTML = text;
}
function checkPhone(elem) {
if (/^\+7\(\d{3}\)-\d{3}-\d{2}-\d{2}$/.test(elem.value)) {
makeSucces(elem, 'Все получилось');
} else {
makeDanger(elem, 'Что-то не так! Телефон должен иметь вид:+7(123)-456-78-90')
}
}
function checkEmail(elem) {
if (/\w+@\w+.\w+/.test(elem.value)) {
makeSucces(elem, 'Все получилось');
} else {
makeDanger(elem, 'Что-то не так! Email должен иметь вид <EMAIL>')
}
}
function checkP(elem) {
if (/^[A-Z]{2} \d{7}$/.test(elem.value)) {
makeSucces(elem, 'Все получилось');
} else {
makeDanger(elem, 'Что-то не так! Код должен иметь вид BM 1234567')
}
}
function checkColor(elem) {
if (/^#?([a-f0-9]{6}|[a-f0-9]{3})$/i.test(elem.value)) {
makeSucces(elem, 'Все получилось');
} else {
makeDanger(elem, 'Что-то не так! Код должен иметь вид #123456 или #ABC')
}
}
function checkCell(elem) {
if (/^[A-H]{1}[1-8]{1}$/.test(elem.value)) {
makeSucces(elem, 'Все получилось');
} else {
makeDanger(elem, 'Что-то не так! Ячейка должна иметь вид A2')
}
}
function searchTime(elem) {
if (elem.value.match(/\d{2}[:-]\d{2}/g)) {
makeSucces(elem, elem.value.match(/\d{2}[:-]\d{2}/g));
} else {
makeDanger(elem, 'Ничего не нашел')
}
}
function searchNumbers(elem) {
if (elem.value.match(/\d+(\.\d+)?/g)) {
makeSucces(elem, elem.value.match(/\d+(\.\d+)?/g));
} else {
makeDanger(elem, 'Ничего не нашел')
}
}<file_sep>var CELL_CLASS = 'cell col-md-1 ';
var CELL_WHITE_CLASS = ' white ';
var CELL_BLACK_CLASS = ' black ';
var ROW_CLASS = 'row';
var CHESSBOARD_CLASS = 'chessboard';
function showMeaasge(m) {
msg = document.getElementById('msg');
m = "Выбрана ячейка: " + m;
msg.innerHTML = m;
}
function keyListener(key) {
if (prev = document.getElementsByClassName('active')[0]) {
id = prev.getAttribute("id");
var i = id.charCodeAt(0) - 64;
var j = parseInt(id[1], 10);
// alert(i, j);
switch (key) {
case 37:
chooseCell2((i - 1 > 0) ? i - 1 : 8, j);
break;
case 38:
chooseCell2(i, (j + 1 <= 8) ? j + 1 : 1);
break;
case 39:
chooseCell2((i + 1 <= 8) ? i + 1 : 1, j);
break;
case 40:
chooseCell2(i, (j - 1 > 0) ? j - 1 : 8);
break;
}
}
}
function chooseCell(a) {
// alert(a);
if (prev = document.getElementsByClassName('active')[0]) {
//console.log(prev);
prev.classList.remove('active');
}
cell = document.getElementById(a.trim());
cell.classList.add('active');
showMeaasge(a);
}
function chooseCell2(i, j) {
var ch = ' ' + toLetter(i) + j;
return chooseCell(ch);
}
function figure(e) {
console.log(e);
if (e.classList.contains('deleted')) {
e.classList.remove('deleted');
var d = document.getElementById(e.getAttribute("alt"));
d.appendChild(e);
} else {
e.classList.add('deleted');
e.setAttribute("alt", e.parentElement.getAttribute("id"));
if (e.getAttribute("color") == 'w') {
del = document.getElementById('delw');
del.appendChild(e);
} else {
del = document.getElementById('delb');
del.appendChild(e);
}
}
}
function toLetter(i) {
var A = 'A'.charCodeAt(0);
return String.fromCharCode(i + A - 1);
}
function Board(w, h) {
this.w = w;
this.h = h;
var board = generateChessBoard.call(this);
console.log(board);
function generateChessBoard() {
var chessboard = matrixArray(this.w + 1, this.h + 1);
for (var i = 1; i <= this.w; i++) {
for (var j = 1; j <= this.h; j++) {
chessboard[i][j] = {
color: (i + j) % 2 == 1 ? true : false,
figure: 0
}
}
}
return chessboard;
function matrixArray(columns, rows) {
var arr = new Array();
for (var i = 1; i <= columns; i++) {
arr[i] = new Array();
for (var j = 1; j <= rows; j++) {
arr[i][j] = i + j + 1;
}
}
return arr;
}
}
this.paint = function (idd) {
var cb = '';
for (var i = this.h; i >= 1; i--) {
cb += ' <div class=" ' + ROW_CLASS + '">';
cb += '<div class="' + CELL_CLASS + '">' + i + '</div>';
for (var j = 1; j <= this.w; j++) {
console.log(i, ' ', j);
var cellColor = (board[j][i].color == true) ? CELL_WHITE_CLASS : CELL_BLACK_CLASS;
var dclass = CELL_CLASS + cellColor;
cb += '<div class="' + dclass + '"' +
'id="' + toLetter(j) + i + '" onclick=chooseCell("' + toLetter(j) + i + '"); >';
cb = (board[j][i].figure) ? cb + figureToImg(board[j][i].figure) : cb;
cb += '</div>';
}
cb += '</div>';
}
cb += ' <div class=" ' + ROW_CLASS + '">';
cb += '<div class="' + CELL_CLASS + '">' + '</div>';
for (var i = 1; i <= this.w; i++) {
cb += '<div class="' + CELL_CLASS + '">' + toLetter(i) + '</div>';
}
cb += '</div>';
chess = document.querySelector(idd);
chess.innerHTML = cb;
}
this.putFigures = function () {
if ((this.h < 8) && (this.w < 8)) {
console.warn('не пихаю фигуры на маленькие доски')
} else {
for (var i = 1; i <= 8; i++) {
board[i][2].figure = 1;
}
for (var i = 1; i <= 8; i++) {
board[i][7].figure = 7;
}
for (var i = 1; i < 4; i++) {
board[i][1].figure = board[9-i][1].figure = i + 1;
}
board[4][1].figure = 5;
board[5][1].figure = 6;
for (var i = 1; i < 4; i++) {
board[i][8].figure = board[9-i][8].figure = i + 7;
}
board[4][8].figure = 11;
board[5][8].figure = 12;
}
console.log(board);
}
function figureToImg(i) {
switch (i) {
case 0:
return "";
case 1:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/0/04/Chess_plt60.png' onclick='figure(this)' color='w'> "
case 2:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/5/5c/Chess_rlt60.png'onclick='figure(this)' color='w'> "
case 3:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/2/28/Chess_nlt60.png'onclick='figure(this)' color='w'>"
case 4:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/9/9b/Chess_blt60.png'onclick='figure(this)' color='w'>"
case 5:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/4/49/Chess_qlt60.png'onclick='figure(this)' color='w'>"
case 6:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/3/3b/Chess_klt60.png'onclick='figure(this)' color='w'>"
case 7:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/c/cd/Chess_pdt60.png'onclick='figure(this)' color='b'>"
case 8:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/a/a0/Chess_rdt60.png'onclick='figure(this)' color='b'>"
case 9:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/f/f1/Chess_ndt60.png'onclick='figure(this)' color='b'>"
case 10:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/8/81/Chess_bdt60.png'onclick='figure(this)' color='b'>"
case 11:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/a/af/Chess_qdt60.png'onclick='figure(this)' color='b'>"
case 12:
return "<img src='https://upload.wikimedia.org/wikipedia/commons/e/e3/Chess_kdt60.png'onclick='figure(this)' color='b'>"
}
}
}
function ChessBoard() {
Board.apply(this, [8, 8]);
}
var cb = new ChessBoard();
cb.putFigures();
cb.paint('#chessboard');
|
c67a86ec96dcc4434e6b2f28af7640a0c4fcabae
|
[
"Markdown",
"JavaScript"
] | 7
|
Markdown
|
4m9fk/GeekBrainsJS2
|
251214761014c5a6e9ef7ae919b8408a5f801055
|
9189647c93c43a82c9ff59a5513d7a816e04309a
|
refs/heads/master
|
<repo_name>NaMuSiGa/namunodeappTest<file_sep>/app.js
//asdfsdfdsfsf
///dddd
|
4335a34cdf14d377c943b2bd043fd932ba339e37
|
[
"JavaScript"
] | 1
|
JavaScript
|
NaMuSiGa/namunodeappTest
|
9fa8aa63d9c2f87bbeee9636235f3b8416bd2b93
|
f8b61691316b4bd1a1075aec0ffea4dee086c931
|
refs/heads/master
|
<repo_name>dennyqi/aei_dinner_vs_cs<file_sep>/README.md
# WpfApp2
This is Windows application using C# which is deleloped in Visual Stdio 2017
<file_sep>/WpfApp2/MainWindow.xaml.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using System.Drawing;
//using System.Drawing.Imaging;
using System.IO;
namespace WpfApp2
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
double count = 0;
public MainWindow()
{
InitializeComponent();
var date = DateTime.Now;
//String filePath = @"C:\dqi\tftpd32\fish.jpg";
//mydish.Source = new Uri(@"C:\dqi\tftpd32\fish.jpg", UriKind.Relative);
today_date.Content = date.ToString();
mycombox.SelectedIndex = 0;
//Uri uri = new Uri(@"C:\dqi\tftpd32\fish.jpg", UriKind.Relative);
//objImage.Source = new BitmapImage(new Uri("http://domain/images/myOwnImage.jpg", UriKind.Absolute));
//Uri uri = new Uri(@"C:\dqi\tftpd32\fish.bmp", UriKind.Relative);
//Uri uri = new Uri(@"C:/dqi/tftpd32/fish.jpg", UriKind.Absolute);
//Uri uri = new Uri(@"C:\download\tftpd32\fish.jpg", UriKind.Relative);
//Uri uri = new Uri(@"C:\dqi\tftpd32\fish.jpg");
/*
// Create source
BitmapImage myBitmapImage = new BitmapImage();
// BitmapImage.UriSource must be in a BeginInit/EndInit block
myBitmapImage.BeginInit();
*/
//ImageSource imgSource = new BitmapImage(uri);
/*
BitmapImage myBitmapImage = new BitmapImage();
myBitmapImage.BeginInit();
myBitmapImage.UriSource = new Uri(@"C:\dqi\tftpd32\fish.jpg");
myBitmapImage.EndInit();
mydish.Source = myBitmapImage;
*/
//BitmapImage imgSource = new BitmapImage(uri);
//mydish.Source = imgSource;
/*
// Create the image element.
Image simpleImage = new Image();
simpleImage.Width = 200;
simpleImage.Margin = new Thickness(5);
*/
// Create source.
BitmapImage bi = new BitmapImage();
// BitmapImage.UriSource must be in a BeginInit/EndInit block.
bi.BeginInit();
bi.UriSource = new Uri(@"C:\download\tftpd32\fishyellow.jpg", UriKind.RelativeOrAbsolute);
bi.EndInit();
// Set the image source.
//simpleImage.Source = bi;
mydish.Source = bi;
}
private void count_orders(object sender, RoutedEventArgs e)
{
var date = DateTime.Now;
double index = 0;
string dinner;
today_date.Content = date.ToString();
count = 0;
if (leon_zhang.IsChecked.Value)
count++;
if (dennyqi.IsChecked.Value)
count++;
if (yirun.IsChecked.Value)
count++;
index = mycombox.SelectedIndex;
dinner = mycombox.Text;
///total_orders.Content = $"Total Orders: {count}";
//total_orders.Content = $"Total Orders: {count} Dinner: {index} {dinner}";
//total_orders.Content = $"Total Orders: {count} \nDinner: {dinner}";
total_orders.Content = $"Total Orders: {count} \nDinner: {mycombox.Text}";
///MessageBox.Show(dinner);
}
private void leon_zhang_checked(object sender, RoutedEventArgs e)
{
/// total_orders.Content = "Total Orders: 1";
count++;
/// today_date.Content = $"{date: d} at {date: t}";
/// total_orders.Content = $"Total Orders: {count}";
}
private void leon_zhang_unchecked(object sender, RoutedEventArgs e)
{
count--;
/// today_date.Content = $"{date: d} at {date: t}";
///total_orders.Content = $"Total Orders: {count}";
}
private void dennyqi_unchecked(object sender, RoutedEventArgs e)
{
count--;
}
private void dennyqi_checked(object sender, RoutedEventArgs e)
{
count++;
}
private void myimage(object sender, EventArgs e)
{
/*
BitmapImage bitmap = new BitmapImage();
bitmap.BeginInit();
// bitmap.UriSource = new Uri(@"C:\download\tftpd32\aei-white-2.png");
bitmap.UriSource = new Uri(@"aei-white-2.png");
bitmap.EndInit();
*/
//actiontec.Source = bitmap;
}
private void myexit(object sender, RoutedEventArgs e)
{
Application.Current.Shutdown();
}
private void screenshot(object sender, RoutedEventArgs e)
{
/*
var fileName = String.Format("MyImage_{0:}.jpg", DateTime.Now.Ticks);
WriteableBitmap bmpCurrentScreenImage = new WriteableBitmap((int)this.ActualWidth, (int)this.ActualHeight);
bmpCurrentScreenImage.Render(LayoutRoot, new MatrixTransform());
bmpCurrentScreenImage.Invalidate();
SaveToMediaLibrary(bmpCurrentScreenImage, fileName, 100);
MessageBox.Show("Captured image " + fileName + " Saved Sucessfully", "WP Capture Screen", MessageBoxButton.OK);
currentFileName = fileName;
*/
/*
Bitmap bitmap = new Bitmap(Screen.PrimaryScreen.Bounds.Width, Screen.PrimaryScreen.Bounds.Height);
Graphics graphics = Graphics.FromImage(bitmap as Image);
graphics.CopyFromScreen(0, 0, 0, 0, bitmap.Size);
bitmap.Save("c:\\screenshot.jpeg", ImageFormat.Jpeg);
*/
/*
// Store the size of the map control
int Width = (int)MyMap.RenderSize.Width;
int Height = (int)MyMap.RenderSize.Height;
System.Windows.Point relativePoint = MyMap.TransformToAncestor(Application.Current.MainWindow).Transform(new System.Windows.Point(0, 0));
int X = (int)relativePoint.X;
int Y = (int)relativePoint.Y;
Bitmap Screenshot = new Bitmap(Width, Height);
Graphics G = Graphics.FromImage(Screenshot);
// snip wanted area
G.CopyFromScreen(X, Y, 0, 0, new System.Drawing.Size(Width, Height), CopyPixelOperation.SourceCopy);
string fileName = "C:\\myCapture.bmp";
System.IO.FileStream fs = System.IO.File.Open(fileName, System.IO.FileMode.OpenOrCreate);
Screenshot.Save(fs, System.Drawing.Imaging.ImageFormat.Bmp);
fs.Close();
*/
///Bitmap bm = (Bitmap)Bitmap.FromFile(@"D:\Temp\MyImage.bmp");
}
private void newdish(object sender, DependencyPropertyChangedEventArgs e)
{
MessageBox.Show("new dish ?");
}
private void focuschanged(object sender, DependencyPropertyChangedEventArgs e)
{
MessageBox.Show("new focus ?");
}
private void gotfocus(object sender, RoutedEventArgs e)
{
//MessageBox.Show("got focus ?");
}
private void selectchanged(object sender, SelectionChangedEventArgs e)
{
double index = 0;
string dinner;
String filePath = @"C:\download\tftpd32\fish.jpg";
index = mycombox.SelectedIndex;
dinner = mycombox.Text;
//MessageBox.Show($"select changed {index}:{dinner}");
if (index == 0)
{
filePath = @"C:\download\tftpd32\fish.jpg";
}
else if (index == 1)
{
filePath = @"C:\download\tftpd32\shrimp.jpg";
}
else if (index == 2)
{
filePath = @"C:\download\tftpd32\chicken.jpg";
}
else if (index == 3)
{
filePath = @"C:\download\tftpd32\pork.jpg";
}
else if (index == 4)
{
filePath = @"C:\download\tftpd32\vege.jpg";
}
BitmapImage bi = new BitmapImage();
bi.BeginInit();
bi.UriSource = new Uri(filePath, UriKind.RelativeOrAbsolute);
bi.EndInit();
// Set the image source.
//simpleImage.Source = bi;
mydish.Source = bi;
}
}
}
|
34629bec00dcedd49635678e7de48e5bc4875f64
|
[
"Markdown",
"C#"
] | 2
|
Markdown
|
dennyqi/aei_dinner_vs_cs
|
ed93a7f2368926fef079d9a4f74e62352105d50e
|
69b9b6b53dd810655393434cccc4dfae4843f001
|
refs/heads/master
|
<file_sep><?php
/**
* Created by PhpStorm.
* User: gerha
* Date: 05.04.2019
* Time: 20:37
*/
namespace App\Mails;
class VerificationTransactional
{
public function toMail($notifiable)
{
dd($this->verificationUrl());
}
protected function verificationUrl($notifiable)
{
return URL::temporarySignedRoute(
'verification.verify',
Carbon::now()->addMinutes(Config::get('auth.verification.expire', 60)),
['id' => $notifiable->getKey()]
);
}
}<file_sep><?php
/*
|--------------------------------------------------------------------------
| Web Routes
|--------------------------------------------------------------------------
|
| Here is where you can register web routes for your application. These
| routes are loaded by the RouteServiceProvider within a group which
| contains the "web" middleware group. Now create something great!
|
*/
Auth::routes(['verify' => true]);
Route::get('/', function () {
return view('welcome');
});
Route::get('nm', function (){
event( new App\Events\NewMessage('hello'));
});
Route::get('env', function (){
dd(env('APP_ENV'));
});
Route::get('chat', function (){
return view('chat');
});
Auth::routes();
Route::get('/home', 'HomeController@index')->name('home');
Route::get('/trans', function (){
dd(App\User::first()->notify(new App\Notifications\VerifyEmail));
});
<file_sep><?php
/**
* Created by PhpStorm.
* User: gerha
* Date: 06.04.2019
* Time: 07:59
*/
namespace App\Channels;
use CS_REST_Transactional_SmartEmail;
use Illuminate\Notifications\Notification;
class CampaignChannel
{
protected $email_id;
/**
* Send the given notification.
*
* @param mixed $notifiable
* @param \Illuminate\Notifications\Notification $notification
* @return void
*/
public function send($notifiable, Notification $notification)
{
$sender = new CS_REST_Transactional_SmartEmail($notification->email_id, ['api_key' => config('services.campaign_monitor.key')]);
$message = $notification->toCampaign($notifiable);
$consent_to_track = 'yes'; # Valid: 'yes', 'no', 'unchanged'
$result = $sender->send($message, $consent_to_track);
if( ! $result->was_successful()){
throw new \Exception("Verification Mail could not be sent. $result->Message");
}
}
}
|
d01f355c3d1dfef040fba62ec5e5ca20e2641218
|
[
"PHP"
] | 3
|
PHP
|
gepopp/visboo
|
67c6f70b5f262d25d20ec7fc43ac2281883ba605
|
738679e7b04a98b969b26ef0e5829fd7673c8661
|
refs/heads/master
|
<file_sep>// take input from the user
let maxNumber = parseInt(prompt("Kindly enter your maximum number"));
let chosenNumber = Math.floor(Math.random() * maxNumber) + 1;
//ask the user to input a guess
let guess = parseInt(prompt("Kindly enter a guess"));
let attempt = -1
// if guess is higher than original guess print guess is higher try again
while (parseInt(guess) !== chosenNumber) {
attempt++;
if ( guess === "q") {
console.log("your quitted");
break;
}
if (guess > chosenNumber){
guess = prompt("You guess higher try something lower");
} else{
guess = prompt("You guess lower try something higher");
}
}
if (guess === "q"){
// console.log(`you are such a sore LOSER!!!! you made ${attempt} attempts `);
alert(`you are such a sore LOSER!!!! you made ${attempt} attempts `);
} else {
console.log("You got it!, Thanks for playing. Dammie Appreciates!");
alert(`You got it!, Thanks for playing. Dammie Appreciates! and you made ${attemps} attemps. Yipee!`);
}
// const numbers = [1,2,3,4,5,6,7,8,9]; //DON'T CHANGE THIS LINE PLEASE!
// // WRITE YOUR LOOP BELOW THIS LINE:
// for ( let row1 of numbers) {
// console.log(row1 * row1);
// }
<file_sep># javascriptGuessingGame
This is a guessing game i implemented with javascript
https://damso9.github.io/javascriptGuessingGame/
|
43d0da270fbf276cc779b9769d4f3a09d685c87b
|
[
"JavaScript",
"Markdown"
] | 2
|
JavaScript
|
damso9/javascriptGuessingGame
|
7ab8b652a3829f917ae9928f2d8e720f898c4100
|
57c57526c1cc99dd74a065a670f94e743a51710a
|
refs/heads/master
|
<file_sep># Blender Previewsettings Add-on
# Contributor(s): <NAME> (github.com/SimonStorlSchulke)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
bl_info = {
"name": "Previewsettings",
"description": "When using Cycles, displays the most relevant Eevee settings for the Material Preview in the Viewport Shading Menu",
"author": "<NAME>",
"version": (1, 1, 0),
"blender": (3, 2, 2),
"location": "View3D → Header → Material Preview Shading Foldout-Menu",
"category": "Interface" }
import bpy
class PREVIEWSETTINGS_PT_panel(bpy.types.Panel):
bl_parent_id = "VIEW3D_PT_shading"
bl_space_type = 'VIEW_3D'
bl_region_type = 'HEADER'
bl_label = "Preview Rendersettings"
@classmethod
def poll(cls, context):
view = context.space_data
if view.type == 'VIEW_3D':
return view.shading.type == "MATERIAL" and context.scene.render.engine == "CYCLES"
else:
return context.scene.display.shading.type == "MATERIAL" and context.scene.render.engine == "CYCLES"
def draw(self, context):
layout: bpy.types.UILayout = self.layout
props: bpy.types.SceneEEVEE = context.scene.eevee
layout.prop(props, "taa_samples")
layout.prop(props, "use_bloom")
layout.prop(props, "bloom_intensity", text="Bloom Intensity")
layout.prop(props, "bloom_radius", text="Bloom Radius")
layout.prop(props, "use_gtao")
layout.prop(props, "gtao_distance", text="AO Distance")
layout.prop(props, "gtao_factor", text="AO Factor")
layout.prop(props, "use_ssr")
layout.prop(props, "use_ssr_refraction")
layout.prop(props, "volumetric_start", text="Volumetric Start")
layout.prop(props, "volumetric_end", text="Volumetric End")
layout.prop(props, "volumetric_tile_size", text="Tile Size")
layout.prop(props, "use_volumetric_lights")
layout.prop(props, "use_volumetric_shadows")
def draw_material_settings(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
mat = context.material
layout.prop(mat, "use_backface_culling")
layout.prop(mat, "blend_method")
layout.prop(mat, "shadow_method")
row = layout.row()
row.active = ((mat.blend_method == 'CLIP') or (mat.shadow_method == 'CLIP'))
row.prop(mat, "alpha_threshold")
if mat.blend_method not in {'OPAQUE', 'CLIP', 'HASHED'}:
layout.prop(mat, "show_transparent_back")
layout.prop(mat, "use_screen_refraction")
layout.prop(mat, "refraction_depth")
layout.prop(mat, "use_sss_translucency")
layout.prop(mat, "pass_index")
class materialSettingsPanel(bpy.types.Panel):
bl_label = "Material Preview Settings"
bl_context = "material"
@classmethod
def poll(cls, context):
mat = context.material
return mat and context.engine == "CYCLES"
class EEVEE_MATERIAL_PT_viewport_settings(materialSettingsPanel):
bl_region_type = 'WINDOW'
bl_space_type = 'PROPERTIES'
bl_parent_id = "MATERIAL_PT_viewport"
def draw(self, context):
draw_material_settings(self, context)
class EEVEE_MATERIAL_PT_viewport_settings_Node_Editor(materialSettingsPanel):
bl_space_type = "NODE_EDITOR"
bl_region_type = "UI"
bl_category = "Options"
#bl_parent_id = "MATERIAL_PT_viewport" - throws an Error (parent not found) because somehow the Viewport Display Panel is only defined for the Properties space type(?)
def draw(self, context):
draw_material_settings(self, context)
def register():
bpy.utils.register_class(PREVIEWSETTINGS_PT_panel)
bpy.utils.register_class(EEVEE_MATERIAL_PT_viewport_settings)
bpy.utils.register_class(EEVEE_MATERIAL_PT_viewport_settings_Node_Editor)
def unregister():
bpy.utils.unregister_class(PREVIEWSETTINGS_PT_panel)
bpy.utils.unregister_class(EEVEE_MATERIAL_PT_viewport_settings)
bpy.utils.unregister_class(EEVEE_MATERIAL_PT_viewport_settings_Node_Editor)
if __name__ == "__main__":
register()
<file_sep># Blender Addons
Here I will upload small Blender Addons that are not worthy of their own repository.
## Add Lightblocker [(Download)](https://raw.githubusercontent.com/SimonStorlSchulke/blender-addons/master/add_lightblocker.py)
Creates a Lightblocker Object with an material preset from the Light Context Menu.

## Add Grass Shrubs [(Download)](https://raw.githubusercontent.com/SimonStorlSchulke/blender-addons/master/add_grass_shrubs.py)
Creates a collection of customizable grass shrubs for use in particle systems.

## Hardify [(Download)](https://raw.githubusercontent.com/SimonStorlSchulke/blender-addons/master/hardify.py)
Quickly Shades an Object Smooth and turns on Autosmooth + Optional Weighted Normals Modifier. Located in Object Mode → Select Mesh Object → Object Context Menu (Rightclick or W)

## Cycles - Eevee-Previewsettings [(Download)](https://raw.githubusercontent.com/SimonStorlSchulke/blender-addons/master/cycles_eevee-previewsettings.py)
When using Cycles, displays the most relevant Eevee settings for the Material Preview in the Viewport Shading Menu, as well as Material preview Settings in the Materials-/ and Node Editor Area.


<file_sep># This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bpy
import bmesh
import math
import mathutils
import random
bl_info = {
"name" : "Grass Shrub Generator",
"description" : "Creates a Collection of customizable grass shrubs for use in particle systems.",
"author" : "<NAME> ",
"version" : (1, 0, 0),
"blender" : (3, 10, 0),
"location": "View3D > Add > Mesh",
"doc_url": "https://github.com/SimonStorlSchulke/blender-addons",
"tracker_url": "https://github.com/SimonStorlSchulke/blender-addons/issues",
"category" : "Add Mesh",
}
def map_range(v, from_min, from_max, to_min, to_max):
"""Maps a value v from an old scale (from_min, from_max)to a new scale (to_min, to_max)"""
return to_min + (v - from_min) * (to_max - to_min) / (from_max - from_min)
class GRASSSHRUBGENERATOR_OT_add_grass_shrubs(bpy.types.Operator):
bl_idname = "grassshrubgenerator.add_gras_shrubs"
bl_label = "Grass Shrubs"
bl_description = "Add a collection of random Grass Shrubs for use in particle systems"
bl_options = {"REGISTER", "UNDO"}
AMMOUNT: bpy.props.IntProperty(name="Shrubs Ammount", default=1, min=1, max=25)
BLADES: bpy.props.IntProperty(name="Blades", default=8, min=1, max=50)
SPREAD: bpy.props.FloatProperty(name="Spread", default=0.1, min=0)
WIDTH_EXPONENT: bpy.props.FloatProperty(name="Width Exponent", default=0.8, min=0)
RANDOM_BLADE_ROT: bpy.props.FloatProperty(name="Random Blade Rotation", default=25)
WIDTH_BASE: bpy.props.FloatProperty(name="Base Width", default=0.01, min=0)
WIDTH_TIP: bpy.props.FloatProperty(name="Tip Width", default=0, min=0)
HEIGHT_MIN: bpy.props.FloatProperty(name="Meight Min", default=0.05, min=0)
HEIGHT_MAX: bpy.props.FloatProperty(name="Height Max", default=0.15, min=0.01)
RESOLUTION: bpy.props.IntProperty(name="resolution", default = 10)
ROT_BASE_MIN: bpy.props.FloatProperty(name="Rotation Base Min", default=3)
ROT_BASE_MAX: bpy.props.FloatProperty(name="Rotation Base Max", default=25)
ROT_TIP_MIN: bpy.props.FloatProperty(name="Rotation Tip Min", default=30, min=0)
ROT_TIP_MAX: bpy.props.FloatProperty(name="Rotation Tip Max", default=90, min=0)
ROT_FALLOFF: bpy.props.FloatProperty(name="Rotation Falloff", default=5, min=0.01)
SEED: bpy.props.IntProperty(name="Seed")
def generate_shrub(self) -> bpy.types.Object:
grass_mesh = bpy.data.meshes.new("grass_shrub_mesh")
grass_object = bpy.data.objects.new("grass shrub", grass_mesh)
bm = bmesh.new()
bm.from_mesh(grass_mesh)
uv_layer = bm.loops.layers.uv.new()
for i in range(self.BLADES):
blade_height = random.uniform(self.HEIGHT_MIN, self.HEIGHT_MAX)
blade_res = int(self.RESOLUTION * 10 * blade_height)
c_blade = []
c_rot_base = random.uniform(self.ROT_BASE_MIN, self.ROT_BASE_MAX)
c_rot_tip = random.uniform(self.ROT_TIP_MIN, self.ROT_TIP_MAX)
last_vert_1 = None
last_vert_2 = None
for i in range(blade_res):
progress = i / (blade_res-1)
gradient = math.pow(progress, self.WIDTH_EXPONENT)
pos_x = map_range(gradient, 0, 1, self.WIDTH_BASE, self.WIDTH_TIP)
pos_y = progress * blade_height - self.HEIGHT_MAX * 0.15
vert_1: bmesh.types.BMVert = bm.verts.new((-pos_x, 0, pos_y))
vert_2: bmesh.types.BMVert = bm.verts.new((pos_x, 0, pos_y))
# Rotate blade verts more the further at the top they are
rot_angle = map_range(math.pow(progress, self.ROT_FALLOFF), 0, 1, c_rot_base, c_rot_tip)
rot_matrix = mathutils.Matrix.Rotation(math.radians(rot_angle), 4, 'X')
bmesh.ops.rotate(bm, cent=(0, 0, 0), matrix=rot_matrix, verts=[vert_1, vert_2])
# Don't generate Polygon at first iteration (only 2 verts exist then)
if i != 0:
face = bm.faces.new((last_vert_1,last_vert_2,vert_2,vert_1))
face.smooth = True
# Generate UVs per face deoending on current vertex index
for i_vert, vert in enumerate(face.loops):
vert[uv_layer].uv = ((i_vert==0 or i_vert==3), (i-(i_vert<2)) / blade_res)
c_blade.append(vert_1)
c_blade.append(vert_2)
last_vert_1 = vert_1
last_vert_2 = vert_2
# random offset per blade
offset: mathutils.Vector = mathutils.Vector((random.uniform(-1,1), random.uniform(-1,1), 0))
offset = offset.normalized() * random.uniform(0, self.SPREAD)
# alignrotation to offset + random
blade_rotation: mathutils.Quaternion = offset.normalized().to_track_quat("-Y", "Z")
random_z_angle = random.uniform(-self.RANDOM_BLADE_ROT, self.RANDOM_BLADE_ROT)
blade_rotation.rotate(mathutils.Euler((0, 0, -math.radians(random_z_angle))))
bmesh.ops.rotate(bm, cent=(0, 0, 0), matrix = blade_rotation.to_matrix(), verts=c_blade)
bmesh.ops.translate(bm, vec=offset, verts=c_blade)
bm.to_mesh(grass_mesh)
bm.free()
return grass_object
@classmethod
def poll(cls, context):
return context.mode == "OBJECT"
def execute(self, context):
random.seed(self.SEED)
grass_collection: bpy.types.Collection
if "Grass Shrubs" in bpy.data.collections:
grass_collection = bpy.data.collections["Grass Shrubs"]
else:
grass_collection = bpy.data.collections.new("Grass Shrubs")
try:
bpy.context.scene.collection.children.link(grass_collection)
except:
... # collction already linked
for i in range(self.AMMOUNT):
c_shrub: bpy.types.Object = self.generate_shrub()
grass_collection.objects.link(c_shrub)
c_shrub.location.x = (i-(self.AMMOUNT-1)/2) * self.SPREAD * 3
return {"FINISHED"}
def menu_func(self, context):
self.layout.operator(GRASSSHRUBGENERATOR_OT_add_grass_shrubs.bl_idname, icon='OUTLINER_OB_HAIR')
def register():
bpy.utils.register_class(GRASSSHRUBGENERATOR_OT_add_grass_shrubs)
bpy.types.VIEW3D_MT_mesh_add.append(menu_func)
def unregister():
bpy.utils.unregister_class(GRASSSHRUBGENERATOR_OT_add_grass_shrubs)
bpy.types.VIEW3D_MT_mesh_add.remove(menu_func)<file_sep># This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
bl_info = {
"name": "Add Lightblocker",
"author": "<NAME>",
"description": "Adds a simple Light Blocker with material setup for the selected Light.",
"version": (1, 0),
"blender": (3, 10, 0),
"location": "View3D > Light Context Menu > Add Lightblocker",
"warning": "",
"doc_url": "https://github.com/SimonStorlSchulke/blender-addons",
"tracker_url": "https://github.com/SimonStorlSchulke/blender-addons/issues",
"category": "Lighting",
}
import bpy
def main(context):
light = bpy.context.object
# create Plane facing the Light
bpy.ops.mesh.primitive_plane_add()
blocker = bpy.context.object
bpy.ops.object.constraint_add(type='TRACK_TO')
blocker.constraints["Track To"].track_axis = 'TRACK_Z'
blocker.constraints["Track To"].up_axis = 'UP_Y'
blocker.constraints["Track To"].target = light
blocker.name = "Lightblocker"
if "Lightblocker Addon" not in bpy.data.node_groups:
create_group()
create_material(context)
def create_material(context):
mat = bpy.data.materials.new("Lightblocker")
mat.use_nodes = True
mat.shadow_method = "CLIP"
mat.blend_method = "CLIP"
mat.diffuse_color[3] = 0.2
tree = mat.node_tree
tree.nodes.remove(tree.nodes["Principled BSDF"])
tree.nodes["Material Output"].location = (500, 0)
# Setup Nodes
n_group = tree.nodes.new(type="ShaderNodeGroup")
n_group.node_tree = bpy.data.node_groups["Lightblocker Addon"]
n_group.location = (300, 0)
n_ramp = tree.nodes.new("ShaderNodeValToRGB")
n_ramp.color_ramp.elements[0].position = 0.4
n_ramp.color_ramp.elements[1].position = 0.6
n_ramp.location = (0, 0)
n_noise = tree.nodes.new("ShaderNodeTexNoise")
n_noise.noise_dimensions = "2D"
n_noise.location = (-200, 0)
# Create Links
tree.links.new(n_noise.outputs[0], n_ramp.inputs[0])
tree.links.new(n_ramp.outputs[0], n_group.inputs[0])
tree.links.new(n_group.outputs[0], tree.nodes["Material Output"].inputs[0])
bpy.context.object.data.materials.append(mat)
def create_group():
group = bpy.data.node_groups.new("Lightblocker Addon", "ShaderNodeTree")
n_inputs = group.nodes.new('NodeGroupInput')
group.inputs.new('NodeSocketFloat','Transparency')
n_outputs = group.nodes.new('NodeGroupOutput')
group.outputs.new('NodeSocketShader','Shader')
# Create Nodes
n_lightpath = group.nodes.new('ShaderNodeLightPath')
n_add = group.nodes.new('ShaderNodeMath')
n_invert = group.nodes.new('ShaderNodeMath')
n_invert.operation = "SUBTRACT"
n_invert.inputs[0].default_value = 1
n_diffuse = group.nodes.new('ShaderNodeBsdfDiffuse')
n_transparent = group.nodes.new('ShaderNodeBsdfTransparent')
n_mix = group.nodes.new('ShaderNodeMixShader')
# Node Layout
n_outputs.location = (400,0)
n_mix.location = (200,0)
n_diffuse.location = (0,-20)
n_transparent.location = (0,-150)
n_add.location = (0,150)
n_invert.location = (-200,0)
n_lightpath.location = (-400,0)
n_inputs.location = (-400,100)
# Node Links
group.links.new(n_inputs.outputs[0], n_add.inputs[0])
group.links.new(n_lightpath.outputs[1], n_invert.inputs[1])
group.links.new(n_invert.outputs[0], n_add.inputs[1])
group.links.new(n_diffuse.outputs[0], n_mix.inputs[1])
group.links.new(n_transparent.outputs[0], n_mix.inputs[2])
group.links.new(n_add.outputs[0], n_mix.inputs[0])
group.links.new(n_mix.outputs[0], n_outputs.inputs[0])
class OT_add_lightblocker(bpy.types.Operator):
"""Add a simple Light Blocker for the selected Light"""
bl_idname = "light.addlighblocker"
bl_label = "Add Light Blocker"
@classmethod
def poll(cls, context):
return context.object is not None and context.object.type == 'LIGHT'
def execute(self, context):
main(context)
return {'FINISHED'}
def add_contextmenu_entry(self, context):
if context.object is not None and context.object.type == 'LIGHT':
layout = self.layout
layout.operator("light.addlighblocker", text="Add Lightblocker")
def register():
bpy.utils.register_class(OT_add_lightblocker)
bpy.types.VIEW3D_MT_object_context_menu.append(add_contextmenu_entry)
def unregister():
bpy.utils.unregister_class(OT_add_lightblocker)
bpy.types.VIEW3D_MT_object_context_menu.remove(add_contextmenu_entry)
if __name__ == "__main__":
register()
<file_sep># This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
bl_info = {
"name" : "Hardify",
"author" : "<NAME>",
"description" : "Shade Object smooth and turn on Autosmooth + Optional Weighted Normals Modifier",
"version" : (1, 0, 0),
"blender" : (3, 10, 0),
"location" : "Object Mode > Select Mesh Object > Object Context menu (Rightclick or W)",
"warning" : "",
"doc_url": "https://github.com/SimonStorlSchulke/blender-addons",
"tracker_url": "https://github.com/SimonStorlSchulke/blender-addons/issues",
"category" : "Mesh"
}
import bpy
import math
class OT_hardify(bpy.types.Operator):
bl_idname = "object.hardify"
bl_label = "Hardify Object"
bl_description = "Shade Object smooth and turn on Autosmooth + Optional Weighted Normals Modifier"
bl_options = {"REGISTER", "UNDO"}
autosmooth_angle: bpy.props.FloatProperty(
name = "Autosmooth Angle",
description = "Autosmooth Angle",
default = 30,
)
use_weighted_normal: bpy.props.BoolProperty(
name = "Use Weighted Normals",
description = "Use Weighted Normals",
default = False,
)
@classmethod
def poll(cls, context):
return bpy.context.object and context.object.type == 'MESH'
def execute(self, context):
bpy.ops.object.shade_smooth()
bpy.context.object.data.use_auto_smooth = True
bpy.context.object.data.auto_smooth_angle = math.radians(self.autosmooth_angle)
if self.use_weighted_normal:
for modifier in bpy.context.object.modifiers:
if modifier.type == "WEIGHTED_NORMAL":
return {"FINISHED"}
bpy.ops.object.modifier_add(type='WEIGHTED_NORMAL')
return {"FINISHED"}
def add_contextmenu_entry(self, context):
if context.object is not None and context.object.type == 'MESH':
layout = self.layout
layout.operator(OT_hardify.bl_idname, text="Hardify", icon="MOD_NORMALEDIT")
def register():
bpy.utils.register_class(OT_hardify)
bpy.types.VIEW3D_MT_object_context_menu.prepend(add_contextmenu_entry)
def unregister():
bpy.utils.unregister_class(OT_hardify)
bpy.types.VIEW3D_MT_object_context_menu.remove(add_contextmenu_entry)
|
c6e64ccc550a16ba1652613d4bd8cbc1365254e7
|
[
"Markdown",
"Python"
] | 5
|
Python
|
SimonStorlSchulke/blender-addons
|
1a12ac54b9c870d4562ae069a17676a091ff4b8e
|
46045f7016ec85d19782bcd0e7f20e7f8c619e60
|
refs/heads/master
|
<repo_name>steen919/middleman-foundation-haml-template<file_sep>/Gemfile
source :rubygems
gem "zurb-foundation", "~> 3.0.5"
gem 'middleman', "~> 3.0.0"
gem "redcarpet", "~> 2.1.1"
gem "sass", "~> 3.2.0.alpha.277"
<file_sep>/readme.md
# Middleman-Foundation-Haml-template
This is a simple template to get started with Middleman 3.0, Zurb Foundation 3.0 and Haml as template language.
To get started you need middleman:
```bash
gem install middleman
```
Copy this template to ~/.middleman/foundation
```bash
mkdir ~/.middleman
cd ~/.middleman
git clone git://github.com/steen919/middleman-foundation-haml-template.git
```
Create a new middleman project in a folder of your choosing (I use ~/dev for my devs...)
```bash
cd ~/dev
middleman init <new_project_name> --template=middleman-foundation-haml-template
cd ~/dev/<new_project_name>
bundle install
```
Start server (open in web browser at localhost:4567)
```bash
bundle exec middleman server
```
Stop server with ctrl-c
When you are ready to deploy you can build your project (the result is in a new folder called 'build'):
```bash
bundle exec middleman build
```
|
ec296c5ba82e32ea6c663b69cbb7b3763fde4e51
|
[
"Markdown",
"Ruby"
] | 2
|
Ruby
|
steen919/middleman-foundation-haml-template
|
7941f91eef076a4913a6c16eba0dda213512e004
|
98a5a4b4c5f1888308158f24ef42c68411c2ae7c
|
refs/heads/master
|
<repo_name>AravindhanV/AddressBookJSONServer<file_sep>/js/contact_form.js
window.addEventListener("DOMContentLoaded", function (event) {
const name = document.getElementById("name");
const textError = document.querySelector(".text-error");
name.addEventListener("input", function () {
if (name.value.length == 0) {
textError.textContent = "";
return;
}
try {
new AddressBookData().name = name.value;
textError.textContent = "";
} catch (e) {
textError.textContent = e;
}
});
});
|
172a3fcbabca8ab823b2c9faa7d861eccb80940d
|
[
"JavaScript"
] | 1
|
JavaScript
|
AravindhanV/AddressBookJSONServer
|
d16f0ad48cc296c7820320f1779820b44e468d3e
|
75716f049ac16156825e5c09dc9ce5949213d477
|
refs/heads/master
|
<file_sep>import { InMemoryDbService } from 'angular-in-memory-web-api';
export class TestData implements InMemoryDbService {
createDb() {
const bookDetails = [
{id: 100, name: 'Angular 2', category: 'Web Technology'},
{id: 101, name: 'C++', category: 'Programming'},
{id: 102, name: 'Java', category: 'Server Side'},
];
return {books: bookDetails};
}
}
<file_sep>import { EmployeeService } from './employee.service';
import { Observable } from 'rxjs/internal/Observable';
import { BookService } from './book.service';
import { Component } from '@angular/core';
import { Book } from './book';
import 'rxjs/add/operator/map';
import { Emp } from './emp';
import { FormGroup, FormBuilder, Validators } from '@angular/forms';
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css']
})
export class AppComponent {
title = 'Angular Observable';
softBook: Book[];
// softBook: Observable<Book[]>; /* Observable with async pipe and ngFor */
// softBook: Observable<Book>; /* Observable with async pipe and ngIf */
// softBook: Observable<string>; /* Observable Map */
// softBook: string; /* Observable Map with subscribe */
empInfo: Emp[];
datasaved = false;
bookForm: FormGroup;
allbook: Observable<Book[]>;
bookidToUpdate = null;
constructor(private formbuilder: FormBuilder,
private bookservices: BookService,
private empservice: EmployeeService) {}
// tslint:disable-next-line:use-life-cycle-interface
ngOnInit() {
this.bookForm = this.formbuilder.group({
name: ['', [Validators.required]],
category: ['', [Validators.required]],
writer: ['', [Validators.required]]
});
this.getSoftBook();
this.getEmpInfo();
}
booktoEdit(bookid: string) {
this.bookservices.getbookbyid(bookid).subscribe(book => {
this.bookidToUpdate = bookid;
this.bookForm.controls['name'].setValue(book.name);
this.bookForm.controls['category'].setValue(book.category);
this.bookForm.controls['writer'].setValue(book.writer);
});
}
onSubmit() {
this.datasaved = false;
let book = this.bookForm.value;
this.createbooks(book);
this.bookForm.reset();
}
createbooks(book: Book) {
if(this.bookidToUpdate == null) {
this.bookservices.createBook(book).subscribe(book => {
this.datasaved = true;
this.getSoftBook();
this.bookidToUpdate = null;
});
} else {
book.id = this.bookidToUpdate;
this.bookservices.updatebook(book).subscribe(book => {
this.datasaved = true;
this.getSoftBook();
this.bookidToUpdate = null;
})
}
}
bookDelete(bookid: string) {
this.bookservices.deleteBook(bookid).subscribe(book => {
this.getSoftBook();
});
}
getSoftBook() {
this.allbook = this.bookservices.getBookFromStore();
}
getEmpInfo() {
this.empservice.getEmployeeDetails().subscribe(employees => this.empInfo = employees);
}
// getSoftBook() {
// this.bookservices.getBookFromStore().subscribe(books => this.softBook = books);
// }
/* Observable with async pipe and ngFor and map */
// getSoftBook() {
// this.softBook = this.bookservices.getBookFromStore(100).map(book => 'Name: ' + book.name);
// }
/* Observable Map with subscribe */
// getSoftBook() {
// this.bookservices.getBookFromStore(100).map(book => 'Name: ' + book.name).subscribe(name => {
// this.softBook = name;
// });
// }
}
<file_sep>import { InMemoryDbService } from 'angular-in-memory-web-api';
export class TestDb implements InMemoryDbService {
createDb() {
const empDetails = [
{id: 101, name: 'Hemant', position: 'UI Developer'},
{id: 102, name: 'Raj', position: 'Java Developer'},
{id: 103, name: 'Kunal', position: 'Python Developer'},
];
const bookDetails = [
{id: 100, name: 'Angular 2', category: 'Web Technology', writer: 'Jon'},
];
return {emp: empDetails, books: bookDetails}
}
}
<file_sep>import { Injectable } from '@angular/core';
import { Observable, of } from 'rxjs';
import { HttpClient } from '@angular/common/http';
import { Emp } from './emp';
@Injectable({
providedIn: 'root'
})
export class EmployeeService {
empUrl = '/api/emp';
constructor(private http: HttpClient) { }
getEmployeeDetails(): Observable<Emp[]> {
return this.http.get<Emp[]>(this.empUrl);
}
}
|
b378090f59d1b613a51f16121f693f1b8bd4f17a
|
[
"TypeScript"
] | 4
|
TypeScript
|
hpwani/angularObservable
|
e07fd007199330cbe59741f74d9f777257227636
|
19d8e91f5064e95238df741361b948be751fc489
|
refs/heads/master
|
<file_sep>package PageOject;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.support.ui.ExpectedConditions;
import org.openqa.selenium.support.ui.WebDriverWait;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import resources.BaseClass;
import resources.TestUtils;
public class LandingPageTest extends BaseClass
{
BaseClass bassClass;
LandingPage landingPage;
WebDriver driver;
Properties prop;
TestUtils testUtils;
WebDriverWait wait;
String title ;
@BeforeClass
public void init() throws IOException
{
bassClass = new BaseClass();
prop = bassClass.initProp();
driver = bassClass.initBrowser(prop);
String url = prop.getProperty("url");
driver.get(url);
landingPage = new LandingPage(driver);
driver.manage().timeouts().implicitlyWait(TestUtils.TIMETOLOAD , TimeUnit.SECONDS);
}
@Test(priority=1)
public void validateTheTitle()
{
title = landingPage.landingPageTitle();
System.out.println("Title of landing page " + title);
Assert.assertEquals(title , testUtils.homePageTitle);
}
/* @Test(priority=2)
public void validatehomeLink()
{
wait = new WebDriverWait(driver , 10);
System.out.println(landingPage.homeLink().getText());
wait.until(ExpectedConditions.elementToBeClickable(landingPage.homeLink));
landingPage.homeLink.click();
}*/
/* @Test(priority=3)
public void validatehomeLinkTitle()
{
title = driver.getTitle();
Assert.assertEquals(title , testUtils.homePageTitle);
}*/
/* @Test(priority=4)
public void verfySignUpClickTest()
{
landingPage.verfySignUpClick();
}
@Test(priority=5)
public void validateSignUpLinkTitle()
{
title = driver.getTitle();
Assert.assertEquals(title , testUtils.homePageTitle);
}
*/
@AfterClass
public void afterClasssetup()
{
System.out.println("All test methods of this test class executed");
driver.quit();
}
}
<file_sep>package PageOject;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.FindBy;
import org.openqa.selenium.support.PageFactory;
import resources.BaseClass;
public class LandingPage extends BaseClass {
WebDriver driver;
LandingPage(WebDriver driver)
{
this.driver = driver;
PageFactory.initElements(driver, this);
}
@FindBy(xpath="//input[@name='username']")
WebElement userName;
@FindBy(xpath="//input[@name='password']")
WebElement passWord;
@FindBy(xpath = "//input[@class='btn btn-small']")
WebElement loginButton;
// @FindBy(linkText = "Home")
// @FindBy(partialLinkText = "index.html")
@FindBy(xpath = "//div[@id='navbar-collapse']/ul/li/a")
WebElement homeLink;
// @FindBy(linkText = "Sign Up")
// @FindBy(partialLinkText = "index.html")
@FindBy(xpath = "//*[@id='navbar-collapse']/ul/li[2]/a")
WebElement SingUpClick;
public WebElement verfySignUpClick()
{
SingUpClick.click();
return SingUpClick;
}
public WebElement homeLink()
{
return homeLink;
}
public String landingPageTitle()
{
return driver.getTitle();
}
public HomePage loginIntoCRMLandingPage(String user1 , String pass1) throws InterruptedException
{
userName.sendKeys(user1);
passWord.sendKeys(<PASSWORD>);
// WebDriverWait waitDriver = new WebDriverWait(driver , 5);
// waitDriver.until(ExpectedConditions.elementToBeClickable(loginButton));
Thread.sleep(2000);
loginButton.click();
return new HomePage(driver);
}
}
<file_sep>package PageOject;
import java.io.IOException;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.openqa.selenium.By;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.FindAll;
import org.openqa.selenium.support.FindBy;
import org.openqa.selenium.support.PageFactory;
import org.openqa.selenium.support.ui.Select;
import resources.BaseClass;
import resources.ReuseMethod;
import resources.TestUtils;
public class NewContactPage extends BaseClass {
Select selectTitle;
ArrayList<String> selectTitleList;
TestUtils testUtils;
HashMap<String , String> map;
ReuseMethod reuseMethod;
int len;
String KeyVal;
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("dd-MM-yyyy");
int currentMonthInt;
NewContactPage(WebDriver driver)
{
this.driver = driver;
PageFactory.initElements(driver , this);
}
@FindBy(xpath="//select[@name='title']")
WebElement titleDD;
@FindBy(xpath="//*[@id='first_name']")
WebElement firstName;
@FindBy(xpath="//*[@id='f_trigger_c_birthday']")
WebElement dob;
@FindBy(xpath="//div[@class='calendar']/table/thead/tr/td[@class='title']")
WebElement monthYearVAlue;
@FindBy(xpath="//div[@class='calendar']/table/thead/tr[2]/td[5]")
WebElement nextYear;
@FindBy(xpath="//div[@class='calendar']/table/thead/tr[2]/td")
WebElement prevYear;
@FindBy(xpath="//div[@class='calendar']/table/thead/tr[2]/td[4]")
WebElement nextMonth;
@FindBy(xpath="//div[@class='calendar']/table/thead/tr[2]/td[2]")
WebElement prevMonth;
/*
* @FindAll({@FindBy(tagName = "frame")})
List<WebElement> frames ;
* *
*/
@FindAll({@FindBy(xpath="//div[@class='calendar']/table/tbody/tr")})
List<WebElement> totalRow;
@FindBy(xpath="//div[@class='calendar']/table/tbody/tr[1]/td")
List<WebElement> totalCol;
String xpath1="//div[@class='calendar']/table/tbody/tr[";
String xpath2="]/td[";
String xpath3 = "]";
public void verifydob()
{
KeyVal = getDatafromMap("DOB"); //get data from excel
dob.click();
LocalDate varDate = LocalDate.parse(KeyVal , dtf) ; //fetch day , month and year from the fetched data from excel
int excelDate = varDate.getDayOfMonth();
int excelMonth = varDate.getMonthValue();
int excelYear = varDate.getYear();
String[] splitIt = monthYearVAlue.getText().split(","); //read the header of the calender in the web page
String calenderMonth = splitIt[0].trim();
String calenderYear = splitIt[1].trim();
int currentYearInt = Integer.parseInt(calenderYear); //convert the string year to integer year.
switch(calenderMonth)
{ //convert the string month into integer month value.
case "January" :
currentMonthInt = 1;
break;
case "February" :
currentMonthInt = 2;
break;
case "March" :
currentMonthInt = 3;
break;
case "April" :
currentMonthInt = 4;
break;
case "May" :
currentMonthInt = 5;
break;
case "June" :
currentMonthInt = 6;
break;
case "July" :
currentMonthInt = 7;
break;
case "August" :
currentMonthInt = 8;
break;
case "September" :
currentMonthInt = 9;
break;
case "October" :
currentMonthInt = 10;
break;
case "November" :
currentMonthInt = 11;
break;
case "December" :
currentMonthInt = 12;
break;
}
while (excelYear>currentYearInt) // if input year is > than present year click next year
{
nextYear.click();
currentYearInt++;
}
while (excelYear<currentYearInt) // if input year is > than present year click prev year
{
prevYear.click();
currentYearInt--;
}
while(excelMonth>currentMonthInt) // if input month is > than present month click next month
{
nextMonth.click();
currentMonthInt++;
}
while(excelMonth<currentMonthInt) // if input month is > than present month click next month
{
prevMonth.click();
currentMonthInt--;
}
int calenderRowCount = totalRow.size();
int calenderColCount = totalCol.size();
String excelDateString = Integer.toString(excelDate);
for(int i=1 ; i<=calenderRowCount ; i++)
{
for(int j=2;j<=calenderColCount ; j++)
{
String dateString = xpath1 + i + xpath2 + j + xpath3 ;
String dateDataString = driver.findElement(By.xpath(dateString)).getText();
// int dateCell = Integer.parseInt(dateDataString);
// if(dateCell ==excelDate)
if(excelDateString.equalsIgnoreCase(dateDataString))
{
driver.findElement(By.xpath(dateString)).click();
}
}
}
KeyVal=null;
}
public void verifyTitleList()
{
selectTitle = new Select(titleDD);
List<WebElement> s = selectTitle.getOptions();
for (int i=0; i< s.size() ; i++)
{
System.out.println("list :"+ s.get(i).getText());
}
}
public void setFirstName()
{
KeyVal = getDatafromMap("<NAME>");
firstName.sendKeys(KeyVal);
KeyVal=null;
}
public void setTitleValue()
{
KeyVal = getDatafromMap("Title");
selectTitle.selectByValue(KeyVal);
KeyVal=null;
}
public void getDataFromExcel() throws IOException
{
map = new HashMap<String, String>();
map = ReuseMethod.getTestCaseData("TestData", "Add New Contact");
System.out.println(map);
}
public String getDatafromMap(String keyValue)
{
String valueForKey = null;
len = map.size();
for(int i =0 ; i <len ; i++)
{
if(map.containsKey(keyValue))
{
valueForKey = map.get(keyValue);
}
}
return valueForKey;
}
}
<file_sep>browser=chrome
url=https://classic.crmpro.com/
username=naveenautomation
pwd=<PASSWORD>
excel=C:\\Users\\Ojha\\CRMSite\\testData.xlsx<file_sep>package PageOject;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.openqa.selenium.WebDriver;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import extendsReportPackage.ExtentReporterNG;
import resources.BaseClass;
import resources.TestUtils;
public class HomePageTest extends ExtentReporterNG {
BaseClass baseClass;
Properties prop;
WebDriver driver;
LandingPage landingPage;
HomePage homePage;
LandingPageTest landingPageTest;
@BeforeClass
public void init() throws IOException, InterruptedException {
//landingPageTest.init();
baseClass = new BaseClass();
prop= baseClass.initProp();
driver = baseClass.initBrowser(prop);
driver.get(prop.getProperty("url"));
landingPage = new LandingPage(driver);
driver.manage().timeouts().implicitlyWait(TestUtils.TIMETOLOAD , TimeUnit.SECONDS);
homePage = landingPage.loginIntoCRMLandingPage(prop.getProperty("username"), prop.getProperty("pwd"));
//driver.manage().timeouts().implicitlyWait(TestUtils.TIMETOLOAD , TimeUnit.SECONDS);
}
@Test(priority=1)
public void verifyHomePageTitle()
{
String title = homePage.verifyHomePageTitle();
System.out.println(title);
}
@Test(priority=2)
public void verifyFrameInfo()
{
homePage.verifyFrameInfo();
}
@Test(priority=3)
public void verifyNewContact()
{
homePage.verifyNewContact();
driver.manage().timeouts().implicitlyWait(TestUtils.TIMETOLOAD , TimeUnit.SECONDS);
}
@AfterClass
public void endHomePAge()
{
driver.quit();
}
}
<file_sep>package PageOject;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.ui.Select;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import resources.BaseClass;
import resources.ReuseMethod;
import resources.TestUtils;
public class NewContactPageTest
{
BaseClass baseClass;
Properties prop;
WebDriver driver;
LandingPage landingPage;
HomePage homePage;
ReuseMethod reuseMethod;
LandingPageTest landingPageTest;
NewContactPage newContactPage;
Select selectTitle;
ArrayList<String> s;
TestUtils testUtils;
@BeforeClass
public void init() throws IOException, InterruptedException {
baseClass = new BaseClass();
prop= baseClass.initProp();
driver = baseClass.initBrowser(prop);
driver.get(prop.getProperty("url"));
landingPage = new LandingPage(driver);
driver.manage().timeouts().implicitlyWait(TestUtils.TIMETOLOAD , TimeUnit.SECONDS);
homePage = landingPage.loginIntoCRMLandingPage(prop.getProperty("username"), prop.getProperty("pwd"));
//driver.manage().timeouts().implicitlyWait(TestUtils.TIMETOLOAD , TimeUnit.SECONDS);
newContactPage = homePage.verifyNewContact();
newContactPage.getDataFromExcel();
}
@Test(priority=1)
public void verifyTitle()
{
System.out.println(" title of new contact page is :"+driver.getTitle());
}
@Test(priority=2)
public void verifyTitleList() throws IOException
{
newContactPage.verifyTitleList();
newContactPage.setTitleValue();
}
@Test(priority=3)
public void verifyFirstName() throws IOException
{
newContactPage.setFirstName();
newContactPage.verifydob();
}
/*@AfterClass
public void closeTab()
{
driver.quit();
}*/
}
|
f8c09cb72c7ce8ffebf3b191357eb93eaa4a46cc
|
[
"Java",
"INI"
] | 6
|
Java
|
garimaOjha21/CRMSite
|
b084f9162a195bee9ceabaa89692a0d998cee3a2
|
71ef354fdcf6d1b3376d69b9a6c7005ddfd69726
|
refs/heads/master
|
<repo_name>giladsegal/AbstractNotifications<file_sep>/src/AbstractFeedTree.ts
import * as vscode from 'vscode';
import {FeedProvider} from './FeedProvider';
export function showAbstractFeedTree(
context: vscode.ExtensionContext,
accessToken: string
) {
const treeDataProvider = new FeedProvider(context, accessToken);
vscode.window.createTreeView('notiFeed', {
treeDataProvider
});
return treeDataProvider;
}
<file_sep>/README.md
# NOTIFEED (Beta)
View Abstract's commit log get notified of new commits as they arrive, without leaving your IDE!
## Features
- A sidebar that display all the commits to your Abstract project master branch.
- Click on a commit to view it in Abstract web.
- Your UX/UI designer changed the design you're working on? No problerm! You will be notified immediatly 😎.
## Requirements
1. An access token to Abstract is required. Get it from [here](https://app.goabstract.com/account/tokens)
1. Make sure you're invited to your designer's Abstract Project.
## Known Issues
This extension is in beta and so does Abstract's SDK, **EXPECT THINGS TO BREAK**.
## Release Notes
This plugin is the result of [Design Tools Hackathon 2018](https://www.designtoolstlv.com/).
Powered by [Abstract SDK](https://sdk.goabstract.com/).
### 0.1.0
Initial release:
- View all of your organization's project's commits.
- Click on a commit to open it in Abstract Web.
- A notification will pop every time a commit was added to master.
**Enjoy!**
<file_sep>/types.d.ts
declare module 'abstract-sdk' {
namespace AbstractSdk {
export enum TRANSPORTS {
API,
CLI
}
export function Client(params: {
accessToken: string;
transport: TRANSPORTS;
}): AbstractClient;
export type AbstractClient = {
branches: {
list: (
description: ProjectDescriptor,
filter: {filter?: 'active' | 'archived' | 'mine'}
) => Promise<Branch[]>;
info: (descripiton: BranchDescriptor) => Promise<Branch>;
};
commits: {
list: (descriptor: BranchDescriptor) => Promise<Commit[]>;
info(descriptor: CommitDescriptor): Promise<Commit>;
};
projects: {
list: (descriptor?: string) => Promise<Project[]>;
};
organizations: {
list: () => Promise<Organization[]>;
};
};
export type Branch = {
createdAt: string;
description: string;
divergedFromBranchId: string;
head: string;
id: string;
mergeSha: string;
mergedIntoBranchId: string;
name: string;
parent: string;
projectId: string;
startedAtSha: string;
status: BranchStatus;
updatedAt: string;
userId: string;
userName: string;
};
export type BranchStatus =
| 'active'
| 'wip'
| 'feedback'
| 'review'
| 'merged'
| 'archived'
| 'deleted'
| 'diverged';
export type Commit = {
description: string;
destinationBranchId: string;
destinationBranchName: string;
fileIds: string[];
parents: string[];
projectId: string;
sha: string;
sourceBranchId: string;
sourceBranchName: string;
time: string;
title: string;
type: CommitType;
userId: string;
userName: string;
};
export type CommitType =
| 'NORMAL'
| 'PROJECT_CREATED'
| 'FILE_ADDED'
| 'FILE_RENAMED'
| 'FILE_DELETED'
| 'FILE_REPLACED'
| 'LIBRARY_ADDED'
| 'LIBRARY_REMOVED'
| 'RESTORE'
| 'UPDATE'
| 'MERGE';
export type Project = {
about: string;
archivedAt: string;
color: string;
createdAt: string;
createdByUser: any;
description: string;
firstPushedAt: string;
id: string;
name: string;
organizationId: string;
pushedAt: string;
repoCreatedAt: string;
sizeInBytes: number;
updatedAt: string;
visibility: 'organization' | 'specific';
};
export type ProjectDescriptor = {
projectId: string;
};
export type BranchDescriptor = {
projectId: string;
branchId: string;
};
export type CommitDescriptor = {
projectId: string;
branchId: string | 'master';
sha?: string;
};
export type Organization = {
createdAt: string;
hasBillingInfo: boolean;
id: string;
isUsernameOrganization: boolean;
isWithinSubscriptionTerm: boolean;
logoUrl: string;
name: string;
restrictedToDomains: string[];
trialEndsAt: string;
updatedAt: string;
userId: string;
};
}
export = AbstractSdk;
}
/*
export = WixEventually;
declare function WixEventually(fn: Function, opts?: WixEventually.Opts): Promise<void>;
declare namespace WixEventually {
export interface Opts {
timeout?: number;
interval?: number;
}
function _with(overrides: Opts): (fn: Function, opts?: WixEventually.Opts) => Promise<void>;
export { _with as with }
}
*/
<file_sep>/src/Timer.ts
export class Timer {
private isRunning = false;
private intervalHandle?: NodeJS.Timer;
constructor(private readonly duration: number) {}
stop = () => {
this.isRunning = false;
this.intervalHandle && clearInterval(this.intervalHandle);
};
run = (task: () => Promise<void>) => {
if (this.isRunning) {
this.stop();
}
const executeTask = async () => {
if (this.isRunning) {
return;
}
this.isRunning = true;
await task();
this.isRunning = false;
};
this.intervalHandle = setInterval(executeTask, this.duration);
executeTask();
};
}
<file_sep>/src/dataService.ts
import * as Abstract from 'abstract-sdk';
export class DataService {
abstract: Abstract.AbstractClient;
constructor(accessToken: string) {
this.abstract = Abstract.Client({
accessToken,
transport: Abstract.TRANSPORTS.API
});
}
getAllProjects = (): Promise<Abstract.Project[]> => {
return this.abstract.projects.list();
};
getAllCommits = ({
projectId,
branchId
}: Abstract.CommitDescriptor): Promise<Abstract.Commit[]> => {
return this.abstract.commits.list({
projectId,
branchId
});
};
getAllOrganizations = (): Promise<Abstract.Organization[]> => {
return this.abstract.organizations.list();
};
getProject = (
projects: Abstract.Project[],
{name}: {name: string}
): Abstract.Project | undefined => {
return projects.find(project => project.name === name);
};
getBranch = ({
projectId,
branchId
}: Abstract.BranchDescriptor): Promise<Abstract.Branch> => {
return this.abstract.branches.info({
projectId,
branchId
});
};
getCommitUrl = (commit: Abstract.Commit) => {
return `https://app.goabstract.com/projects/${
commit.projectId
}/branches/master/commits/${commit.sha}`;
};
}
<file_sep>/src/commands.ts
import * as vscode from 'vscode';
// import {GlobalStateManager} from './globalState';
import {
showAccessTokenInputBox,
showTokenDeletedMessage,
showTokenSavedMessage
} from './messages';
export type RegisterCommandFn = (
registerCommand: string,
callback: (...args: any[]) => any,
thisArg?: any
) => vscode.Disposable;
let commands: vscode.Disposable[];
export function registerCommands(
registerCommand: RegisterCommandFn,
globalState: any
): vscode.Disposable[] {
if (commands) {
return commands;
}
const commandDisposer1 = registerCommand(
'notifeed.saveAccessToken',
async () => {
const accessToken = await showAccessTokenInputBox();
if (accessToken) {
await globalState.setAccessToken(accessToken);
showTokenSavedMessage();
}
}
);
const commandDisposer2 = registerCommand(
'notifeed.deleteAccessToken',
async () => {
await globalState.setAccessToken(undefined);
showTokenDeletedMessage();
}
);
commands = [commandDisposer1, commandDisposer2];
return commands;
}
<file_sep>/src/globalState.ts
import * as vscode from 'vscode';
const ABSTRACT_TOKEN_KEY = 'abstractApi';
export const GlobalStateManager = (globalState: vscode.Memento) => {
return {
setAccessToken: (accessToken: string) => {
return globalState.update(ABSTRACT_TOKEN_KEY, accessToken);
},
getAccessToken: () => {
return globalState.get<string>(ABSTRACT_TOKEN_KEY);
}
};
};
<file_sep>/src/messages.ts
import * as vscode from 'vscode';
export const showAccessTokenInputBox = () => {
return vscode.window.showInputBox({
prompt: 'Enter your Abstract access token',
ignoreFocusOut: true,
password: true,
placeHolder: 'Your access token...'
});
};
export const showMissingTokenError = () => {
vscode.window.showErrorMessage(
'Cannot use Abstract Notifications without an access token'
);
};
export const showTokenSavedMessage = () => {
vscode.window.showInformationMessage('Token saved successfully');
};
export const showTokenDeletedMessage = () => {
vscode.window.showInformationMessage('Token deleted successfully');
};
export const showProjectChangedWarning = (projectName: string) => {
vscode.window.showWarningMessage(`${projectName} was updated!`);
};
<file_sep>/src/extension.ts
'use strict';
// The module 'vscode' contains the VS Code extensibility API
// Import the module and reference it with the alias vscode in your code below
import * as vscode from 'vscode';
import {showAbstractFeedTree} from './AbstractFeedTree';
import {FeedProvider} from './FeedProvider';
import {registerCommands} from './commands';
import {GlobalStateManager} from './globalState';
import {
showAccessTokenInputBox,
showMissingTokenError,
showProjectChangedWarning
} from './messages';
let treeDataProvider: FeedProvider;
let disposables: vscode.Disposable[] = [];
// this method is called when your extension is activated
// your extension is activated the very first time the command is executed
export async function activate(context: vscode.ExtensionContext) {
// retrieve existing api key
let globalState = GlobalStateManager(context.globalState);
disposables = registerCommands(vscode.commands.registerCommand, globalState);
let accessToken = globalState.getAccessToken();
if (!accessToken) {
// request user to enter his API key
accessToken = await showAccessTokenInputBox();
// user canceled the input box
if (accessToken) {
await globalState.setAccessToken(accessToken);
// save the user input into the global state
} else {
showMissingTokenError();
return;
}
}
treeDataProvider = showAbstractFeedTree(context, accessToken!);
treeDataProvider.beginUpdating();
treeDataProvider.onProjectChanged(({projectName}) => {
showProjectChangedWarning(projectName);
});
}
// this method is called when your extension is deactivated
export function deactivate() {
treeDataProvider.stopUpdating();
disposables.forEach(disposable => disposable.dispose());
}
<file_sep>/src/FeedProvider.ts
import * as vscode from 'vscode';
import * as path from 'path';
import {DataService} from './dataService';
import {Timer} from './Timer';
import {Organization, Project, Commit} from 'abstract-sdk';
import deepEqual = require('deep-equal');
export interface Entry {
uri?: vscode.Uri;
id: string;
title: string;
type: string;
obj?: any;
}
export interface TreeData {
organizations: Entry[];
projects: Entry[];
commits: Entry[];
}
export interface ProjectChange {
projectName: string;
}
export class FeedProvider implements vscode.TreeDataProvider<Entry> {
extensionPath: string;
dataService: DataService;
timer = new Timer(5 * 60 * 1000);
currentTreeData?: TreeData;
private _onDidChangeTreeData: vscode.EventEmitter<
Entry
> = new vscode.EventEmitter<Entry>();
readonly onDidChangeTreeData: vscode.Event<Entry> = this._onDidChangeTreeData
.event;
private _onProjectChanged: vscode.EventEmitter<
ProjectChange
> = new vscode.EventEmitter<ProjectChange>();
onProjectChanged = this._onProjectChanged.event;
constructor(context: vscode.ExtensionContext, accessToken: string) {
this.extensionPath = context.extensionPath;
this.dataService = new DataService(accessToken);
}
getTreeItem(element: Entry): vscode.TreeItem {
const treeItem = new vscode.TreeItem(
element.uri || (element.title as any),
vscode.TreeItemCollapsibleState.Collapsed
);
treeItem.label = element.title;
treeItem.iconPath = vscode.Uri.file(
path.join(this.extensionPath, 'resources', element.type + '.svg')
);
if (element.type === 'merge') {
treeItem.collapsibleState = vscode.TreeItemCollapsibleState.None;
// opens layer in inspect mode in browser
treeItem.command = {
command: 'vscode.open',
title: 'Open',
arguments: [
vscode.Uri.parse(this.dataService.getCommitUrl(element.obj))
]
};
}
return treeItem;
}
async getChildren(element?: Entry | undefined): Promise<Entry[]> {
if (!this.currentTreeData) {
this.currentTreeData = await this.getTreeData();
}
if (!element) {
return this.currentTreeData.organizations;
} else if (element.type === 'organization') {
return this.currentTreeData.projects;
} else if (element.type === 'project') {
return this.currentTreeData.commits.filter(
commit => commit.obj.projectId === element.id
);
}
return [];
}
beginUpdating(): void {
this.timer.run(async () => {
console.log('Checking for changes....');
const newTreeData = await this.getTreeData();
if (!deepEqual(this.currentTreeData, newTreeData, {strict: true})) {
// change that is not the initial fetch
if (this.currentTreeData && newTreeData) {
const currentProjects = this.currentTreeData.projects;
const newProjects = newTreeData.projects;
const currentProjectsWithChanges = currentProjects.filter(
currentProject =>
newProjects.some(
newProject =>
newProject.id === currentProject.id &&
currentProject.obj.pushedAt !== newProject.obj.pushedAt &&
this.getProjectCommitCount(
this.currentTreeData!,
currentProject.id
) !== this.getProjectCommitCount(newTreeData, newProject.id)
)
);
console.log(`${currentProjectsWithChanges.length} projects changed!`);
currentProjectsWithChanges.forEach(p => {
this._onProjectChanged.fire({projectName: p.title});
});
}
this.currentTreeData = newTreeData;
this._onDidChangeTreeData.fire();
}
console.log('going to sleep...');
});
}
stopUpdating(): void {
this.timer.stop();
}
private getTreeData = async (): Promise<TreeData> => {
const organizations = await this.dataService.getAllOrganizations();
const projects = await this.dataService.getAllProjects();
const commitsPerProject = await Promise.all(
projects
.map(p => p.id)
.map(pid =>
this.dataService.getAllCommits({projectId: pid, branchId: 'master'})
)
);
const allCommits: Commit[] = Array.prototype.concat.apply(
[],
commitsPerProject
);
const commits = allCommits.filter(commit => commit.type === 'MERGE');
return {
organizations: organizations.map(this.oraganizationToEntry),
projects: projects.map(this.projectToEntry),
commits: commits.map(this.commitToEntry)
};
};
private getProjectCommitCount = (treeData: TreeData, projectId: string) => {
return treeData.commits.filter(c => c.obj!.projectId === projectId).length;
};
private oraganizationToEntry = (organization: Organization): Entry => ({
uri: vscode.Uri.parse('abstract://org/' + organization.id),
id: organization.id,
title: organization.name,
type: 'organization',
obj: organization
});
private projectToEntry = (project: Project): Entry => ({
uri: vscode.Uri.parse('abstract://project/' + project.id),
id: project.id,
title: project.name,
type: 'project',
obj: project
});
private commitToEntry = (commit: Commit): Entry => ({
type: 'merge',
id: commit.sha,
title: commit.title,
obj: commit
});
}
|
5952201d557651b8039e4748574d0859e24bea6c
|
[
"Markdown",
"TypeScript"
] | 10
|
TypeScript
|
giladsegal/AbstractNotifications
|
bf4da7f36b6c97b9e583dce7317caa5fbbc920e0
|
5001108248d6132dcc647a6ad39aa02f0f086d3e
|
refs/heads/master
|
<repo_name>tyn520215/reboxUpdate<file_sep>/jquery-rebox.js
/*
* jQuery Rebox [http://trentrichardson.com/examples/jQuery-Rebox]
* By: <NAME> [http://trentrichardson.com]
*
* Copyright 2014 <NAME>
* Dual licensed under the MIT license.
* http://trentrichardson.com/Impromptu/MIT-LICENSE.txt
*/
(function($){
$.rebox = function($this, options){
this.settings = $.extend(true, {}, $.rebox.defaults, options);
this.$el = $this; // parent container holding items
this.$box = null; // the lightbox modal
this.$items = null; // recomputed each time its opened
this.idx = 0; // of the $items which index are we on
this.enable();
};
$.rebox.defaults = {
theme: 'rebox', // class name parent gets (for your css)
selector: null, // the selector to delegate to, should be to the <a> which contains an <img>
prev: '←', // use an image, text, whatever for the previous button
next: '→', // use an image, text, whatever for the next button
deg:0,
changeDeg:90,
scale:1,
changeScale:0.2,
blowup:'+',
shrink:'-',
param:{ x:0,y:0,left:100,top:100,flags:false},
leftRote:'<img style="width:32px" src="'+basePath+'/js/picxc/leftRotate.png"/>',
rightRote:'<img style="width:32px" src="'+basePath+'/js/picxc/rightRotate.png"/>',
loading: '%', // use an image, text, whatever for the loading notification
close: '×', // use an image, text, whatever for the close button
speed: 400, // speed to fade in or out
zIndex: 9999, // zIndex to apply to the outer container
cycle: true, // whether to cycle through galleries or stop at ends
captionAttr: 'title', // name of the attribute to grab the caption from
template: 'image', // the default template to be used (see templates below)
templates: { // define templates to create the elements you need function($item, settings)
image: function($item, settings, callback){
return $('<img src="'+ $item.attr('href') +'" id="contentImg" style="position: absolute;cursor: move;" class="'+ settings.theme +'-content" />').load(callback);
}
}
};
$.rebox.setDefaults = function(options){
$.rebox.defaults = $.extend(true, {}, $.rebox.defaults, options);
};
$.rebox.lookup = { i: 0 };
$.extend($.rebox.prototype, {
enable: function(){
var t = this;
return t.$el.on('click.rebox', t.settings.selector, function(e){
e.preventDefault();
t.open(this);
});
},
open: function(i){
var t = this;
// figure out where to start
t.$items = t.settings.selector === null? t.$el : t.$el.find(t.settings.selector);
if(isNaN(i)){
i = t.$items.index(i);
}
// build the rebox
t.$box = $('<div class="'+ t.settings.theme +'" style="display:none;">'+
'<a href="#" class="'+ t.settings.theme +'-close '+ t.settings.theme +'-button">'+ t.settings.close +'</a>' +
'<a href="#" class="'+ t.settings.theme +'-prev '+ t.settings.theme +'-button">'+ t.settings.prev +'</a>' +
'<a href="#" class="'+ t.settings.theme +'-next '+ t.settings.theme +'-button">'+ t.settings.next +'</a>' +
'<a href="#" class="'+ t.settings.theme +'-blowup '+ t.settings.theme +'-button">'+ t.settings.blowup +'</a>' +
'<a href="#" class="'+ t.settings.theme +'-shrink '+ t.settings.theme +'-button">'+ t.settings.shrink +'</a>' +
'<a href="#" class="'+ t.settings.theme +'-leftRote '+ t.settings.theme +'-button">'+ t.settings.leftRote +'</a>' +
'<a href="#" class="'+ t.settings.theme +'-rightRote '+ t.settings.theme +'-button">'+ t.settings.rightRote +'</a>' +
'<div class="'+ t.settings.theme +'-contents"></div>'+
'<div class="'+ t.settings.theme +'-caption"><p></p></div>' +
'</div>').appendTo('body').css('zIndex',t.settings.zIndex).fadeIn(t.settings.speed)
.on('click.rebox','.'+t.settings.theme +'-close', function(e){ e.preventDefault(); t.close(); })
.on('click.rebox','.'+t.settings.theme +'-next', function(e){ e.preventDefault(); t.next(); })
.on('click.rebox','.'+t.settings.theme +'-prev', function(e){ e.preventDefault(); t.prev(); })
.on('click.rebox','.'+t.settings.theme +'-blowup', function(e){ e.preventDefault(); t.blowup(); })
.on('click.rebox','.'+t.settings.theme +'-shrink', function(e){ e.preventDefault(); t.shrink(); })
.on('click.rebox','.'+t.settings.theme +'-leftRote', function(e){ e.preventDefault(); t.leftRote(); })
.on('click.rebox','.'+t.settings.theme +'-rightRote', function(e){ e.preventDefault(); t.rightRote(); });
// add some key hooks
$(document).on('swipeLeft.rebox', function(e){ t.next(); })
.on('swipeRight.rebox', function(e){ t.prev(); })
.on('keydown.rebox', function(e){
e.preventDefault();
var key = (window.event) ? event.keyCode : e.keyCode;
switch(key){
case 27: t.close(); break; // escape key closes
case 37: t.prev(); break; // left arrow to prev
case 39: t.next(); break; // right arrow to next
}
});
t.$el.trigger('rebox:open',[t]);
t.goto(i);
return t.$el;
},
close: function(){
var t = this;
if(t.$box && t.$box.length){
t.$box.fadeOut(t.settings.speed, function(e){
t.$box.remove();
t.$box = null;
t.$el.trigger('rebox:close',[t]);
});
}
$(document).off('.rebox');
return t.$el;
},
move:function(){
var t =this;
var contImg = document.getElementById('contentImg');
var param = t.settings.param;
document.onmousemove = MyMouseMove;
function MyMouseMove(event){
if(param.flags){
var nowX = event.clientX, nowY = event.clientY;
var disX = nowX - param.x, disY = nowY - param.y;
contImg.style.left = parseInt(param.left) + disX + "px";
contImg.style.top = parseInt(param.top) + disY + "px";
}
}
document.onmouseup= MyMouseUp;
function MyMouseUp(event){
param.flags=false;
param.left=t.getCss(contImg,'left');
param.top=t.getCss(contImg,'top');
}
},
getCss:function(o,key){
return o.currentStyle? o.currentStyle[key] : document.defaultView.getComputedStyle(o,false)[key];
},
blowup:function(){
var t = this;
var scale = t.settings.scale;
scale+=t.settings.changeScale;
var reImg = t.$box.find('.rebox-content');
if(reImg){
reImg.css('transform','scale('+scale+','+scale+')')
}
t.settings.scale=scale
},
shrink:function(){
var t = this;
var scale = t.settings.scale;
scale-=t.settings.changeScale;
var reImg = t.$box.find('.rebox-content');
if(reImg&&scale>=0.2){
reImg.css('transform','scale('+scale+','+scale+')');
t.settings.scale=scale
}
},
leftRote:function(){
var t = this;
var deg = t.settings.deg;
deg-=t.settings.changeDeg;
var reImg = t.$box.find('.rebox-content');
if(reImg){
reImg.css('transform','rotate('+deg+'deg)')
}
t.settings.deg=deg;
},
rightRote:function(){
var t = this;
var deg = t.settings.deg;
deg+=t.settings.changeDeg;
var reImg = t.$box.find('.rebox-content');
if(reImg){
reImg.css('transform','rotate('+deg+'deg)')
}
t.settings.deg=deg;
},
goto: function(i){
var t = this,
$item = $(t.$items[i]),
captionVal = $item.attr(t.settings.captionAttr),
$cap = t.$box.children('.'+ t.settings.theme +'-caption')[captionVal?'show':'hide']().children('p').text(captionVal),
$bi = t.$box.children('.'+ t.settings.theme +'-contents'),
$img = null;
if($item.length){
t.idx = i;
$bi.html('<div class="'+ t.settings.theme +'-loading '+ t.settings.theme +'-button">'+ t.settings.loading +'</div>');
$img = t.settings.templates[$item.data('rebox-template') || t.settings.template]($item, t.settings, function(content){
$bi.empty().append($(this));
$(this).mousedown(function(event){
t.settings.param.flags=true;
t.settings.param.x=event.clientX;
t.settings.param.y=event.clientY;
});
t.move()
});
if(t.$items.length == 1 || !t.settings.cycle){
t.$box.children('.'+ t.settings.theme +'-prev')[i<=0 ? 'hide' : 'show']();
t.$box.children('.'+ t.settings.theme +'-next')[i>=t.$items.length-1 ? 'hide' : 'show']();
}
t.$el.trigger('rebox:goto',[t, i, $item, $img]);
}
return t.$el;
},
prev: function(){
var t = this;
t.setDeful();
return t.goto(t.idx===0? t.$items.length-1 : t.idx-1);
},
next: function(){
var t = this;
t.setDeful();
return t.goto(t.idx===t.$items.length-1? 0 : t.idx+1);
},
setDeful:function(){
var t = this;
t.settings.deg=0;
t.settings.scale=1;
},
disable: function(){
var t = this;
return t.close().off('.rebox').trigger('rebox:disable',[t]);
},
destroy: function(){
var t = this;
return t.disable().removeData('rebox').trigger('rebox:destroy');
},
option: function(key, val){
var t = this;
if(val !== undefined){
t.settings[key] = val;
return t.disable().enable();
}
return t.settings[key];
}
});
$.fn.rebox = function(o) {
o = o || {};
var tmp_args = Array.prototype.slice.call(arguments);
if (typeof(o) == 'string'){
if(o == 'option' && typeof(tmp_args[1]) == 'string' && tmp_args.length === 2){
var inst = $.rebox.lookup[$(this).data('rebox')];
return inst[o].apply(inst, tmp_args.slice(1));
}
else return this.each(function() {
var inst = $.rebox.lookup[$(this).data('rebox')];
inst[o].apply(inst, tmp_args.slice(1));
});
} else return this.each(function() {
var $t = $(this);
$.rebox.lookup[++$.rebox.lookup.i] = new $.rebox($t, o);
$t.data('rebox', $.rebox.lookup.i);
});
};
})(window.jQuery || window.Zepto || window.$);
<file_sep>/README.md
# reboxUpdate
对rebox进行修改添加了缩放旋转功能<br/>
1.通过changeDeg参数设置你每次要旋转的角度<br/>
2.通过changeScale参数设置你每次要缩放的比例<br/>
3.添加了图片拖动功能<br/>
|
ae21d2d32af1a7615c3397e8136e502dfedbf98d
|
[
"JavaScript",
"Markdown"
] | 2
|
JavaScript
|
tyn520215/reboxUpdate
|
239759e42b0bbc2c4cc60e826254471d307a6ac6
|
60173737d82faa7fb8633eeeca19aeeefb4f1ffe
|
refs/heads/master
|
<file_sep>package main
import (
"flag"
"fmt"
"strings"
"strconv"
"os"
"bufio"
"github.com/gologme/log"
"github.com/cerisara/activityserve"
)
/*
Notes:
- les messages que l'on recoit lorsqu'on follow qqun ne sont pas stockes en local: ils sont envoyes instantannement aux followers, qui les traitent mais ne les sauvent pas
*/
var actor activityserve.Actor
// const actype = "Service"
const actype = "Person"
func myMsg() {
fmt.Printf("mymsg %v\n",actor)
}
func cli() {
reader := bufio.NewReader(os.Stdin)
for ;; {
fmt.Print("-> ")
text, _ := reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
if strings.HasPrefix(text,"post ") {
post(text[5:])
} else if strings.Compare(text,"l") == 0 { myMsg()
} else if strings.Compare(text,"w") == 0 {
fmt.Printf("whoami %v\n",actor.WhoAmI())
} else if strings.Compare(text,"following") == 0 {
fl := actor.Following()
fmt.Printf("following: %v\n",fl)
} else if strings.Compare(text,"followers") == 0 {
fl := actor.Followers()
fmt.Printf("followers: %v\n",fl)
} else if strings.HasPrefix(text,"follow ") {
follow(text[7:])
} else if strings.Compare("quit", text) == 0 {
break
}
}
}
func post(s string) {
fmt.Println("posting "+s)
actor.CreateNote(s, "")
}
func follow(u string) {
fmt.Println("following "+u)
actor.Follow(u)
}
func gotmsg(o map[string]interface{}) {
fmt.Printf("INCOMING MSG: FROM %v\n",o["attributedTo"])
fmt.Printf("%v\n",o["content"])
/*
gokey attributedTo http://actpub.duckdns.org/detson
gokey cc http://actpub.duckdns.org/detson/followers
gokey content toto est beau
gokey id http://actpub.duckdns.org/detson/item/h2V5X80ZLmy7rUYZ
gokey published 2020-02-23T11:44:20+01:00
gokey to https://www.w3.org/ns/activitystreams#Public
gokey type Note
gokey url http://actpub.duckdns.org/detson/item/h2V5X80ZLmy7rUYZ
*/
}
func main() {
debugFlag := flag.Bool("debug", false, "set to true to get debugging information in the console")
flag.Parse()
if *debugFlag == true {
log.EnableLevel("error")
} else {
log.DisableLevel("info")
}
activityserve.Setup("config.ini", *debugFlag)
// get the port and actor name also here
file, err := os.Open("config.ini")
if err != nil { log.Fatal(err) }
defer file.Close()
scanner := bufio.NewScanner(file)
var userag string = "newact"
var userdesc string = "I'm a bot"
var port int = 8081
for scanner.Scan() {
s := scanner.Text()
if strings.HasPrefix(s,"userAgent") {
ss := strings.Split(s,"\"")
userag = ss[len(ss)-2]
} else if strings.HasPrefix(s,"userDesc") {
ss := strings.Split(s,"\"")
userdesc = ss[len(ss)-2]
} else if strings.HasPrefix(s,"port") {
ss := strings.Split(s," ")
port,_ = strconv.Atoi(ss[len(ss)-1])
}
}
fmt.Println("loaded userag "+userag+ " desc "+userdesc + " port "+strconv.Itoa(port))
// This creates the actor if it doesn't exist.
actor, _ = activityserve.GetActor(userag, userdesc, actype)
// actor.Follow("https://pleroma.site/users/qwazix")
// actor.CreateNote("Hello World!", "")
// let's boost @tzo's fox
// actor.Announce("https://cybre.space/@tzo/102564367759300737")
// this can be run any subsequent time
// actor, _ := activityserve.LoadActor("activityserve_test_actor_2")
// available actor events at this point are .OnReceiveContent and .OnFollow
actor.OnReceiveContent = func(activity map[string]interface{}) {
object := activity["object"].(map[string]interface{})
gotmsg(object)
}
go func() {
activityserve.ServeSingleActor(actor,port)
}()
fmt.Println("starting cli")
cli()
}
<file_sep>[general]
baseURL = https://bctpub.duckdns.org
port = 7938
storage = storage ; can be relative or absolute path
userAgent = "polson"
userDesc = "I'm the second bot in this simple instance"
<file_sep># fedigocli
This is just a small test to make use of writeas/activityserve
This code creates an ActivityPub agent and serves it on a given port
It supports follow and post for federation
TODO:
- after following, need to quit and relaunch so that following becomes visible (refresh issue ?)
lorsque je cherche ce compte depuis Masto, il fait d'abord une request webfinger pour trouver l'URL du compte,
qui marche; il obtient en meme temps l'URL de outbox, il fait donc une request sur outbox, et il obtient le nb
de posts (qu'il affiche bien), et une autre url vers le 1er post ("first").
A ce moment, ca ne marche pas, car mastodon devrait aller chercher le 1er post avec cette URL "first", mais
apparemment, ca ne marche pas, il ne fait pas la requete. Pourquoi ?
|
7decf8bd64ce17267d58a93cae943d32324253a6
|
[
"Markdown",
"Go",
"INI"
] | 3
|
Go
|
cerisara/fedigocli
|
09d2891f09f0dc5ca86bb5b2775e158df94a1c39
|
ea014b8ed6a194ce267c327147c3dd264b028cf9
|
refs/heads/master
|
<file_sep>import requests
from bs4 import BeautifulSoup
import time
from random import random
from tqdm import tqdm
import pdb
from makedir import make_dir
def all_crowling(category: str=""):
"""
カテゴリ内のすべての
検索ページのHTMLを引き抜いてくる
category: URLで使われているカテゴリ名
"""
with open("prefectures.txt", "r") as f:
prefectures = f.read().split("\n")[:-1]
# 都道府県ごとにスクレイピング
for pref in tqdm(prefectures):
print(pref)
url = "https://itp.ne.jp/" + pref + "/genre_dir/" + category + "/pg/"
pref_dir = dir_name + pref + "/"
make_dir(pref_dir)
crowling_to_prefecture(url, pref_dir)
def crowling_to_prefecture(baseurl: str="", pref_dir: str=""):
"""
都道府県ごとにカテゴリ内の検索ページの
HTMLを取得し,テキストファイルに保存
baseurl : TownPageのURL
pref_dir : テキストファイルを保存するディレクトリ
"""
# 都道府県のindexは0~9 * 10 + 1~5 で決まっている
for i in range(0, 10):
for j in range(1, 6):
k = 1
idx = i * 10 + j
while k < 101:
nexttime = time.time() + 10 + random()
# htmlをクローリング
url = baseurl + str(k) + "/?nad=1&sr=1&st=4&evdc=1&idx=" + str(idx)
req = requests.get(url, headers={"User-Agent": agent})
content = req.content
soup = BeautifulSoup(content, "html.parser")
# 10s+a だけ待つ
time.sleep(nexttime - time.time())
# 何も情報がなければwhileを抜ける
if not soup.find(class_="noResult") is None:
break
# テキストに保持
with open(pref_dir + str(idx) + "_" + str(k) + ".html", "w") as f:
f.write(str(soup))
k += 1
def main():
global dir_name
global agent
# カテゴリを指定
category = "sweets"
dir_name = category + "_search_pages/"
# ユーザーエージェントを指定
agent = "Mozilla/5.0 (Linux; Android 4.0.3; SC-02C Build/IML74K) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Mobile Safari/537.31"
# 保持用のディレクトリを作成
make_dir(dir_name)
# クローリング開始
all_crowling(category)
if __name__ == "__main__":
main()
<file_sep>beautifulsoup4==4.6.0
certifi==2018.1.18
chardet==3.0.4
idna==2.6
mojimoji==0.0.7
numpy==1.14.0
pandas==0.22.0
python-dateutil==2.6.1
pytz==2018.3
requests==2.18.4
six==1.11.0
tqdm==4.19.5
urllib3==1.22
<file_sep>import pandas as pd
import glob
import re
import mojimoji as moji
"""
店名のリストを作成する
"""
def makeshoplist(directory: str=""):
"""
directory : shoplistを作りたいファイルが格納されているディレクトリ
directory内のファイルをすべて参照し,ショップの名前リストを作る
"""
# 都道府県の店名リストをすべて格納
shoplist = [name for filename in glob.glob(directory + "/*") for name in get_shoplist_pref(filename)]
# 単一出現かつソートを行う
shoplist = sorted(list(set(shoplist)))
# 〇〇店のような重複を削除する
distance = -1
for i in range(1, len(shoplist)):
if shoplist[distance] in shoplist[i] and\
((len(shoplist[i]) - len(shoplist[distance])) > 2 or
shoplist[i][-1] == "店"):
shoplist[i] = " "
elif shoplist[i - 1] in shoplist[i] and\
((len(shoplist[i]) - len(shoplist[i - 1])) > 2 or
shoplist[i][-1] == "店"):
shoplist[i] = " "
distance = i - 1
else:
distance = -1
# 保存
savename = directory[:directory.find("_")] + "_shops.txt"
with open(savename, "w") as f:
f.write("\n".join([shop for shop in shoplist if shop != " "]))
def get_shoplist_pref(filedir: str=""):
"""
filedir : 都道府県ごとの店情報が格納されたテキストファイルの場所
1つの都道府県に関して,店の名前のリストを作成し,返す
"""
shoplist = []
contents = pd.read_csv(filedir)
ltd = re.compile(r"([(株式)(有限)(合資)]+会社){1}")
bracket = re.compile(r"\(.+\)")
for shopname in contents["name"]:
# カタカナ以外の文字を半角へ
shopname = moji.zen_to_han(shopname, kana=False)
# 括弧に囲まれた文字列を削除
shopname = bracket.sub("", shopname)
# 〇〇会社という文字列は除く
shopname = ltd.sub("", shopname)
# /で区切られていたら区切られる前の文字列と
# 区切り文字を消した文字列を格納する
if shopname.find("/") > -1:
shoplist.append(shopname[:shopname.find("/")])
shopname = shopname.replace("/", "")
shoplist.append(shopname)
return shoplist
def main():
makeshoplist("sweets_shop_info")
if __name__ == "__main__":
main()
<file_sep># TownPageCrowling
## やること
タウンぺージからの情報抽出
## 使い方
1. crawling.pyの75行目に指定されているカテゴリを変更する
```
# デフォルトはsweetsカテゴリ
category = "sweets"
```
2. 以下を実行
```
python crawling.py # 時間めっちゃかかります
python saveshopinfo.py
python makeshoplist.py
```
## その他
* ここを読んで使う -> [https://itp.ne.jp/guide/web/notice/index.html](https://itp.ne.jp/guide/web/notice/index.html)
* crawling.py のUserAgentは変更可
<file_sep>import os
def make_dir(path: str=""):
"""
ディレクトリを作成
例外処理によって既にディレクトリがあっても処理が終わらない
path : 作成したいディレクトリのパス
"""
try:
os.mkdir(path)
except:
print(path + " already exists")
<file_sep># -*- coding: utf-8 -*-
import re
import glob
"""
glob.globの結果をソートして返すためのモジュール
数字は数字の大きさ順にソートされる
"""
def sort_glob(path):
"""
path : フォルダ名
"""
return sorted(glob.glob(path), key=__numerical_sort)
def __numerical_sort(value):
"""
数字の順番を考慮したソートでファイル名を取得
"""
numbers = re.compile(r"(\d+)")
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
<file_sep>from bs4 import BeautifulSoup
from improveglob import sort_glob
from makedir import make_dir
import pandas as pd
def extract_infomation(text: str=""):
"""
HTMLから書かれている情報を抽出する
店名,郵便番号,住所,電話番号, URL, メールアドレス
text : HTMLの内容
"""
soup = BeautifulSoup(text, "html.parser")
shops = ([s.text.replace("詳細", "").replace("\n", "") for s in soup.find_all(class_="row titleRow")])
urls = []
emails = []
postalcodes = []
addresses = []
telnumbers = []
idx = 0
# 郵便番号,住所,電話番号, URL, e-mailを保存
for s in soup.find_all("dl")[10:]:
details = s.find_all("dt")
info = s.find_all("dd")
urls.append("")
emails.append("")
postalcodes.append("")
addresses.append("")
telnumbers.append("")
for i in range(len(details)):
# 郵便番号と住所を保存
if str(details[i]) == "<dt>【住所】</dt>":
# 郵便番号と住所を分割し保存
codes = info[i].text.split(" ")
postalcodes[idx] = codes[0]
addresses[idx] = codes[1]
# 電話番号を保存
elif str(details[i]) == "<dt>【電話番号】</dt>":
telnumbers[idx] = info[i].text
# お店のURLを保存
elif str(details[i]) == "<dt>【URL】</dt>":
urls[idx] = info[i].text
# メールアドレスを保存
elif str(details[i]) == "<dt>【e-mail】</dt>":
emails[idx] = info[i].text
idx += 1
return shops, postalcodes, addresses, telnumbers, urls, emails
def pref_save_shopinfo(path: str):
"""
指定した都道府県のお店・企業の情報を保存する
path : 都道府県のディレクトリ
"""
data = []
shops = []
urls = []
emails = []
postalcodes = []
addresses = []
telnumbers = []
# HTMLに書かれているお店・企業の情報を抽出する
for filedir in sort_glob(path + "/*"):
# print(filedir)
with open(filedir, "r") as f:
text = f.read()
info = extract_infomation(text)
shops.extend(info[0])
postalcodes.extend(info[1])
addresses.extend(info[2])
telnumbers.extend(info[3])
urls.extend(info[4])
emails.extend(info[5])
# pandasの形式に変換してcsvに保存
for (s, p, a, t, u, e) in zip(shops, postalcodes, addresses, telnumbers, urls, emails):
data.append([s, p, a, t, u, e])
df = pd.DataFrame(data,
columns=["name", "postalcode", "address", "telnumber", "url", "email"])
df.to_csv(directory + "/" + path[path.index("/"):] + ".csv", index=False)
def all_save_shopinfo(paths: str):
"""
カテゴリ内の情報を保存する
paths : カテゴリのディレクトリ
"""
# 情報を保持するディレクトリを作成
make_dir(directory)
# 都道府県ごとに情報を保存
for prefdir in sort_glob(paths + "/*"):
pref_save_shopinfo(prefdir)
def main():
global directory
category = "sweets"
paths = category + "_search_pages"
directory = category + "_shop_info"
all_save_shopinfo(paths)
if __name__ == "__main__":
main()
|
f02c5ab7b868bc60378edacddfe44bc27d8731b8
|
[
"Markdown",
"Python",
"Text"
] | 7
|
Python
|
s14t284/TownPageCrawling
|
f922de0d4fb180ac445d9e79d3877ae39a4cd352
|
a02ebdf6a055b523e10b75b7586eecc0eb1fa542
|
refs/heads/master
|
<repo_name>jae-jae/QueryList-Rule-Baidu<file_sep>/Baidu.php
<?php
/**
* Created by PhpStorm.
* User: Jaeger <<EMAIL>>
* Date: 2017/10/1
* Baidu searcher
*/
namespace QL\Ext;
use QL\Contracts\PluginContract;
use QL\QueryList;
class Baidu implements PluginContract
{
protected $ql;
protected $keyword;
protected $pageNumber = 10;
protected $httpOpt = [
'headers' => [
'User-Agent' => 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Accept-Encoding' => 'gzip, deflate, br',
]
];
const API = 'https://www.baidu.com/s';
const RULES = [
'title' => ['h3','text'],
'link' => ['h3>a','href']
];
const RANGE = '.result';
public function __construct(QueryList $ql, $pageNumber)
{
$this->ql = $ql->rules(self::RULES)->range(self::RANGE);
$this->pageNumber = $pageNumber;
}
public static function install(QueryList $queryList, ...$opt)
{
$name = $opt[0] ?? 'baidu';
$queryList->bind($name,function ($pageNumber = 10){
return new Baidu($this,$pageNumber);
});
}
public function setHttpOpt(array $httpOpt = [])
{
$this->httpOpt = $httpOpt;
return $this;
}
public function search($keyword)
{
$this->keyword = $keyword;
return $this;
}
public function page($page = 1,$realURL = false)
{
return $this->query($page)->query()->getData(function ($item) use($realURL){
$realURL && $item['link'] = $this->getRealURL($item['link']);
return $item;
});
}
public function getCount()
{
$count = 0;
$text = $this->query(1)->find('.nums')->text();
if(preg_match('/[\d,]+/',$text,$arr))
{
$count = str_replace(',','',$arr[0]);
}
return (int)$count;
}
public function getCountPage()
{
$count = $this->getCount();
$countPage = ceil($count / $this->pageNumber);
return $countPage;
}
protected function query($page = 1)
{
$this->ql->get(self::API,[
'wd' => $this->keyword,
'rn' => $this->pageNumber,
'pn' => $this->pageNumber * ($page-1)
],$this->httpOpt);
return $this->ql;
}
/**
* 得到百度跳转的真正地址
* @param $url
* @return mixed
*/
protected function getRealURL($url)
{
if(empty($url)) return $url;
$header = get_headers($url,1);
if (strpos($header[0],'301') || strpos($header[0],'302'))
{
if(is_array($header['Location']))
{
//return $header['Location'][count($header['Location'])-1];
return $header['Location'][0];
}
else
{
return $header['Location'];
}
}
else
{
return $url;
}
}
}<file_sep>/README.md
# QueryList-Rule-Baidu
QueryList Plugin: Baidu searcher.
QueryList插件:百度搜索引擎
> QueryList:[https://github.com/jae-jae/QueryList](https://github.com/jae-jae/QueryList)
## Installation for QueryList4
```
composer require jaeger/querylist-rule-baidu
```
## API
- Baidu **baidu($pageNumber = 10)**:get Baidu Searcher.
class **Baidu**:
- Baidu **search($keyword)**:set search keyword.
- Baidu **setHttpOpt(array $httpOpt = [])**:Set the http option,see: [GuzzleHttp options](http://docs.guzzlephp.org/en/stable/request-options.html)
- int **getCount()**:Get the total number of search results.
- int **getCountPage()**:Get the total number of pages.
- Collection **page($page = 1,$realURL = false)**:Get search results
## Usage
- Installation Plugin
```php
use QL\QueryList;
use QL\Ext\Baidu;
$ql = QueryList::getInstance();
$ql->use(Baidu::class);
//or Custom function name
$ql->use(Baidu::class,'baidu');
```
- Example-1
```php
$baidu = $ql->baidu(10)
$searcher = $baidu->search('QueryList');
$count = $searcher->getCount();
$data = $searcher->page(1);
$data = $searcher->page(2);
$searcher = $baidu->search('php');
$countPage = $searcher->getCountPage();
for ($page = 1; $page <= $countPage; $page++)
{
$data = $searcher->page($page);
}
```
- Example-2
```php
$searcher = $ql->baidu()->search('QueryList');
$data = $searcher->setHttpOpt([
// Set the http proxy
'proxy' => 'http://192.168.3.11:8118',
// Set the timeout time in seconds
'timeout' => 30,
])->page(1);
```
- Example-3
```php
$baidu = $ql->baidu(3)
$searcher = $baidu->search('QueryList');
$data = $searcher->page(1);
print_r($data->all());
// Get real url
$data = $searcher->page(1,true);
print_r($data->all());
```
Out:
```
Array
(
[0] => Array
(
[title] => QueryList|基于phpQuery的无比强大的PHP采集工具
[link] => http://www.baidu.com/link?url=qRAXrUIcrxuLQ4Pn_rL25HvpDwugxgLkmwB74wTBuLflWaDTNY1d27gdxMwddbfn
)
[1] => Array
(
[title] => 介绍- QueryList指导文档
[link] => http://www.baidu.com/link?url=NgoB517LCcb7tt37_x74uF0N-8pfhSemhA5qoB0SHf8HY9P_MwKbN80nf9zvd3V5
)
[2] => Array
(
[title] => PHP 用QueryList抓取网页内容 - wb145230 - 博客园
[link] => http://www.baidu.com/link?url=kDkpY9eZ6CsiT1SWomRWEYPauHseHn2FseSdPnsOoulWCkD3DK6QMT75urFGHLyeG_M9yTD0BCm-s5jGQRi_S_
)
)
Array
(
[0] => Array
(
[title] => QueryList|基于phpQuery的无比强大的PHP采集工具
[link] => http://www.querylist.cc/
)
[1] => Array
(
[title] => 介绍- QueryList指导文档
[link] => http://doc.querylist.cc/
)
[2] => Array
(
[title] => PHP 用QueryList抓取网页内容 - wb145230 - 博客园
[link] => http://www.cnblogs.com/wb145230/p/4716403.html
)
)
```
|
9e629cd93040579e143059c8d326a5796f9ea10a
|
[
"Markdown",
"PHP"
] | 2
|
PHP
|
jae-jae/QueryList-Rule-Baidu
|
ab0af74f1289caa9dac09e9340023ae9d51a3a0c
|
e50ea5b382c82e835edd9c396be656686d18bb6a
|
refs/heads/main
|
<repo_name>abhi-gm/Parallel-Processing-with--OMP-and-MPI<file_sep>/part3.c
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
/* Define length of dot product vectors */
#define VECLEN 100
int main (int argc, char* argv[])
{
int i,myid, numprocs, len=VECLEN;
double *a, *b;
double mysum, allsum;
/* MPI Initialization */
MPI_Init (&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank (MPI_COMM_WORLD, &myid);
/*
Each MPI task performs the dot product, obtains its partial sum, and then calls
MPI_Reduce to obtain the global sum.
*/
if (myid == 0)
printf("Starting MPI for dot-product of a and b on %d processors\n",numprocs);
/* Assign storage for dot product vectors */
a = (double*) malloc (len*sizeof(double));
b = (double*) malloc (len*sizeof(double));
/* Initialize dot product vectors */
for (i=0; i<len; i++) {
a[i]=1.0;
b[i]=a[i];
}
/* Perform the dot product */
mysum = 0.0;
for (i=0; i<len; i++)
{
mysum += a[i] * b[i];
}
printf("Task %d partial sum = %f\n",myid, mysum);
/* After the dot product, perform a summation of results on each node */
MPI_Reduce (&mysum, &allsum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (myid == 0)
printf ("Done. MPI version: global sum = %f \n", allsum);
free (a);
free (b);
MPI_Finalize();
} <file_sep>/part2.c
// HW1 - Part 3
// Please implement OpenMP directives for this code.
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define CHUNKSIZE 10
#define N 100
int main (int argc, char *argv[])
{
int nthreads, tid, i, chunk,omp_id,tmp;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
// Parallelize the following whole part with OpenMP directives,
// and specify shared variables and private variable
#pragma omp parallel shared(a,b,c,nthreads,chunk) private(omp_id,i)
{
omp_id = omp_get_thread_num();
if (omp_id == 0)
{
nthreads = omp_get_num_threads();
}
// parallelize the for loop using dynamic schedule
#pragma omp for schedule(dynamic,chunk)
for (i=0; i<N; i++)
{
tmp = 2.0* a[i];
a[i] = tmp;
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",omp_id,i,c[i]);
}
}
}
<file_sep>/README.md
# **Parallel-Processing-with--OMP-and-MPI**
# **Several reasons to use parallel computing**
1. Save time/money
2. Solve Larger/More Complex problems like Web search engines/databases processing millions of transactions every second
3. Provide Concurrency, for example Collaborative Networks provide a global venue where people from around the world can meet and conduct work virtually
4. Take Advantage of Non – local Resources that is using compute resources on a wide area network, or even the internet when local compute resources are scarce or insufficient
5. Make better use of Underlying Parallel Hardware
# **Parallel Programming Models**
There are several parallel programming models in common use:
1. Shared Memory (without threads)
2. Threads
3. Distributed Memory / Message Passing
4. Data Parallel
5. Hybrid
6. SPMD and MPMD
# **Amdahl's Law**
It states that potential program speedup is defined by the fraction of code (P) that can be parallelized:
# Speedup = **<img src="https://render.githubusercontent.com/render/math?math=\frac{1}{(1-p)}">**
The number of processors performing the parallel fraction of work, the relationship can be modeled by:
Speedup= **<img src="https://render.githubusercontent.com/render/math?math=\frac{1}{((P/N)+S)}">**
### where
P = parallel fraction,
N = number of processors and
S = serial fraction
These code were tested on
Discovery - https://rc.northeastern.edu/
The output
Output of
## **part1.c** - using OpenMP routines

## **part2.c** - using OpenMP routines

## **part3.c** - serial program of a dot product for two vectors
1. Each MPI task performs the dot product of a and b based on the serial code to obtain
its sum on each processor.
2. the dot product on each processor, perform a summation of results from each
processor by using MPI_Reduce to obtain the global sum

Referecnces
1. https://computing.llnl.gov/tutorials/parallel_comp/
2. https://computing.llnl.gov/tutorials/openMP/
|
4d33a7d0ea09f4cc7d59fe6f2871b51ae6ecaacb
|
[
"Markdown",
"C"
] | 3
|
C
|
abhi-gm/Parallel-Processing-with--OMP-and-MPI
|
afc7eca3ba905056db9c889e69040ef867e9f1df
|
9129137ecd473c0a61347a7232249ca7e9675dda
|
refs/heads/master
|
<repo_name>LeeSpin/Test<file_sep>/rasp_serial/src/rasp_serial.c
#include <stdio.h>
#include <unistd.h>
int main(void)
{
int i;
printf("fuck!!!!\n");
return 0;
}
|
d7e4c4d5f363057fd9a4d66ebaf2cc6f12d06932
|
[
"C"
] | 1
|
C
|
LeeSpin/Test
|
3ac3c39a9f8c0ec772a8f1c120bd760c08d235c6
|
ea90f74bc3d17fcefd005e67f8bf5fa23e02f800
|
refs/heads/master
|
<repo_name>overflowsith/dotfiles<file_sep>/.bsh/oldbashrc.bash
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
fi
if [ -x /usr/bin/cowsay -a -x /usr/bin/fortune ]; then
fortune -a | cowsay
fi
<file_sep>/.bsh/aliases.bash
alias vi='vim'
alias vim='vim'
# config
alias i3config='vim ~/.config/i3/config'
alias vimrc='vim ~/.vimrc'
alias bashrc='vim ~/.bashrc'
alias reload='source ~/.bashrc'
alias dev='cd ~/dev'
# filesystem stuff
alias ll='ls -lh --full-time --time-style=long-iso'
alias la='ls -la'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias cs='du -hsx * | sort -rh | head -n 10'
alias mkdir='mkdir -pv'
# git stuff
alias g='git'
alias ga='git add'
alias gb='git branch'
alias gs='git status'
alias gd='git diff HEAD '
alias gc='git checkout '
alias gr='git checkout -- '
alias gp='git pull --rebase'
alias gob='git checkout -b '
# utility stuff
alias k='kanban'
alias fm='thunar .'
alias sfm='sudo thunar .'
#alias i='python -m SimpleHTTPServer'
alias pi='php -S localhost:8765'
alias py3='python3'
alias sudo='sudo '
alias hosts='sudo vim /etc/hosts'
alias fuck='sudo $(history -p \!\!)'
alias a='atom .'
alias s='subl .'
alias ports='netstat -tulanp'
alias wget='wget -c'
alias cal='cal -m'
# alias cat='bat'
# PHP stuff
alias artisan='php artisan'
alias cons='bin/console'
alias cda='composer dump-autoload'
alias pu='./vendor/bin/phpunit'
# grep
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
# docker
alias d-up='docker-compose up -d'
alias d-stop='docker-compose stop'
alias d-down='docker-compose down'
alias d-bash='docker-compose exec web bash'
alias docker-stop-all='docker stop $(docker ps -a -q)'
alias docker-rm-all='docker rm $(docker ps -a -q)'
alias dockdel='docker rm $(docker ps -a -q)'
alias dockrem='docker rmi $(docker images -q)'
# heroku
alias deployheroku='git push heroku master'
<file_sep>/.config/termite/config
[options]
font = Fira Mono 10
mouse_autohide = true
clickable_url = true
allow_bold = true
cursor_blink = off
cursor_shape = block
scrollback_lines = 10000
[colors]
# special
foreground = #f8f8f2
foreground_bold = #f8f8f2
cursor = #f8f8f2
background = #282a36
hightlight = #44475a
# black
color0 = #44475a
color8 = #44475a
# red
color1 = #ff5555
color9 = #ff5555
# green
color2 = #50fa7b
color10 = #50fa7b
# yellow
color3 = #f1fa8c
color11 = #f1fa8c
# blue
color4 = #6272a4
color12 = #6272a4
# magenta
color5 = #ff79c6
color13 = #bd93f9
# cyan
color6 = #8be9fd
color14 = #8be9fd
# white
color7 = #f8f8f2
color15 = #f8f8f2
# vim: ft=dosini cms=#%s
<file_sep>/.local/bin/i3exit
#!/bin/sh
case "$1" in
lock)
i3lock -c 282C34 -f -n
;;
logout)
i3-msg exit
;;
suspend)
i3lock -c 282C34 -n && systemctl suspend
;;
hibernate)
i3lock -c 282C34 -n && systemctl hibernate
;;
reboot)
systemctl reboot
;;
shutdown)
systemctl poweroff
;;
*)
echo "Usage $0 {lock|logout|suspend|hibernate|reboot|shutdown}"
exit 2
esac
exit 0
# i3lock-color -c 282c34 --insidevercolor=0000a0bf --insidewrongcolor=ff8000bf --insidecolor=282c34ff --ringvercolor=0020ffff --ringwrongcolor=4040ffff --ringcolor=abb2bfff --textcolor=abb2bfff --separatorcolor=aaaaaaff --keyhlcolor=30ccccff --bshlcolor=ff8000ff -r
<file_sep>/.config/firefox/readme.md
copy these files into `~/.mozilla/firefox/${profile}/chrome`
<file_sep>/.config/i3/blocks/datetime
#!/bin/sh
case $BLOCK_BUTTON in
1) gsimplecal ;; # left click, open config
esac
print_date() {
echo "`date +'%a %d %b'`"
}
print_time() {
echo "`date +'%H:%M'`"
}
whatprint="${1:-date}"
echo $whatprint
echo $whatprint
echo $whatprint
if [[ "${whatprint}" = "time" ]]; then
print_time()
else
print_date()
fi
<file_sep>/.bsh/default.bash
function set_prompt()
{
local COLOR_DEFAULT='\[\e[0m\]'
local COLOR_BLACK='\[\e[0;30m\]'
local COLOR_BLUE='\[\e[0;34m\]'
local COLOR_GREEN='\[\e[0;32m\]'
local COLOR_CYAN='\[\e[0;36m\]'
local COLOR_RED='\[\e[0;31m\]'
local COLOR_PURPLE='\[\e[0;35m\]'
local COLOR_BROWN='\[\e[0;33m\]'
local COLOR_GRAY='\[\e[0;37m\]'
local COLOR_DARK_GRAY='\[\e[1;30m\]'
local COLOR_L_BLUE='\[\e[1;34m\]'
local COLOR_L_GREEN='\[\e[1;32m\]'
local COLOR_L_CYAN='\[\e[1;36m\]'
local COLOR_L_RED='\[\e[1;31m\]'
local COLOR_L_PURPLE='\[\e[1;35m\]'
local COLOR_YELLOW='\[\e[1;33m\]'
local COLOR_WHITE='\[\e[1;37m\]'
local PS1_SET_TITLE='\[\e]0;\w\a\]'
local PS1_SET_TIME="${COLOR_DEFAULT}\d \t"
local PS1_SET_RET_CODE="${COLOR_L_RED}(\$?)${COLOR_DEFAULT}"
local PS1_SET_USER="${COLOR_L_GREEN}\u${COLOR_DEFAULT}"
local PS1_SET_HOST="${COLOR_DARK_GRAY}\h${COLOR_DEFAULT}"
local PS1_SET_PWD="${COLOR_L_BLUE}\w${COLOR_DEFAULT}"
local PS1_SET_SYMBOL="${COLOR_L_GREEN}\$${COLOR_DEFAULT}"
local PS1_LN_1=""
local PS1_LN_2="${PS1_SET_HOST} ${PS1_SET_PWD} "
local PS1_GIT="${COLOR_YELLOW}\$(__git_ps1 '%s')${COLOR_DEFAULT}"
echo "${PS1_LN_1}\n${PS1_LN_2}${PS1_GIT}\n "
}
PS1=$(set_prompt)
<file_sep>/.bsh/paths.bash
# set PATH so it includes user's private bin if it exists
if [ -d ~/.local/bin ] ; then
PATH=~/.local/bin:$PATH
fi
if [ -d ~/.composer/vendor/bin ] ; then
PATH=~/.composer/vendor/bin:$PATH
fi
if [ -d ~/.local/bin/heroku/bin ] ; then
PATH=$PATH:~/.local/bin/heroku/bin
fi
if [ -d ~/.gem/ruby ] ; then
PATH=$PATH:~/.gem/ruby
fi
if [ -d ~/.gem/ruby/2.4.0/bin ] ; then
PATH=$PATH:~/.gem/ruby/2.4.0/bin
fi
if [ -d ~/.gem/ruby/2.5.0/bin ] ; then
PATH=$PATH:~/.gem/ruby/2.5.0/bin
fi
export GOPATH=~/.local/go
if [ -d $GOPATH/bin ] ; then
PATH=$PATH:$GOPATH/bin
fi
if [ -d ~/Android/Sdk/tools ] ; then
PATH=$PATH:~/Android/Sdk/tools
fi
if [ -d ~/Android/Sdk/platform-tools ] ; then
PATH=$PATH:~/Android/Sdk/platform-tools
fi
NODE_PATH=~/.local/lib/node_modules:$NODE_PATH
<file_sep>/.bashrc
# if not interactively, don't do anything
[[ $- != *i* ]] && return
if [[ -d ~/.bsh ]]; then
for rc in ~/.bsh/*.bash; do
source "$rc"
done
unset rc;
fi
<file_sep>/.bsh/options.bash
option=(
histappend
cmdhist
extglob
dotglob
globstar
checkwinsize
autocd
cdspell
)
shopt -s ${option[@]} 2> /dev/null
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTCONTROL=ignoreboth:erasedups
HISTIGNORE='rm *:git*:ls*:ll*:cd*:reboot:poweroff:*screenlayout*:'
HISTSIZE=10000
HISTFILESIZE=$HISTSIZE
HISTTIMEFORMAT="%s "
PROMPT_COMMAND='history -a'
export EDITOR=/usr/bin/vim
export LESSOPEN="| /usr/bin/src-hilite-lesspipe.sh %s"
export LESS=' -R '
export TERMINAL=urxvtc
export RANGER_LOAD_DEFAULT_RC=false
export _JAVA_OPTIONS='-Dawt.useSystemAAFontSettings=on -Dswing.aatext=true'
export JAVA_FONTS=/usr/share/fonts/TTF
<file_sep>/.config/termite/config.bak
[options]
font = Fira Mono 10
mouse_autohide = true
clickable_url = true
allow_bold = true
cursor_blink = off
cursor_shape = block
scrollback_lines = 10000
[colors]
# special
foreground = #c5c8c6
foreground_bold = #c5c8c6
cursor = #c5c8c6
background = #1d1f21
# black
color0 = #1d1f21
color8 = #969896
# red
color1 = #cc6666
color9 = #cc6666
# green
color2 = #b5bd68
color10 = #b5bd68
# yellow
color3 = #f0c674
color11 = #f0c674
# blue
color4 = #81a2be
color12 = #81a2be
# magenta
color5 = #b294bb
color13 = #b294bb
# cyan
color6 = #8abeb7
color14 = #8abeb7
# white
color7 = #c5c8c6
color15 = #ffffff
# vim: ft=dosini cms=#%s
<file_sep>/.bsh/functions.bash
function google()
{
w3m "https://www.google.com/search?q=$1";
}
function wttr()
{
curl -H "Accept-Language: ${LANG%_*}" wttr.in/"${1:-Milan}"?0m
}
<file_sep>/.bsh/completions.bash
if [ -f /usr/share/git/completion/git-completion.bash ]; then
. /usr/share/git/completion/git-completion.bash
fi
if [ -f /usr/share/git/completion/git-prompt.sh ]; then
. /usr/share/git/completion/git-prompt.sh
fi
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
fi
if [ -f ~/utilities/composerAutocomplete/composer_completion ] ; then
. ~/utilities/composerAutocomplete/composer_completion
fi
|
8472d00cf442ab3590b869ee83bc84ea5fda92ba
|
[
"Markdown",
"INI",
"Shell"
] | 13
|
Shell
|
overflowsith/dotfiles
|
e595e1837cf804377bab714c980b423e1b0c4839
|
074bd48c6bdb2a22c67bc9672bd65e4c190b4a94
|
refs/heads/master
|
<file_sep>// recursive.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
#include<iostream>
using namespace std;
char a[20];
int k =0;
void E();
void E1();
void T();
void T1();
void F();
void E(){
cout<<"E->TE'"<<endl;
T();
E1();
}
void E1(){
if(a[k]=='+'){
cout<<"E'->+TE'"<<endl;
k++;
T();
E1();
}
else{
cout<<"E'->ε"<<endl;
}
}
void T(){
cout<<"T->FT'"<<endl;
F();
T1();
}
void T1(){
if(a[k]=='*'){
cout<<"T'->*FT'"<<endl;
k++;
F();
T1();
}
else{
cout<<"T'->ε"<<endl;
}
}
void F(){
if(a[k]=='i'){
cout<<"F->i"<<endl;
k++;
}
else if(a[k]== '('){
k++;
cout<<"F->(E)"<<endl;
E();
if(a[k]==')'){
k++;
}
else{
cout<<"输入的表达式不合法,括号不匹配"<<endl;
exit(0);
}
}
else{
cout<<"输入的表达式不合法,请输入正确的终结符号"<<endl;
exit(0);
}
}
int main(){
cout<<"给定表达式文法为:"<<endl;
cout<<"E->E+T|T"<<endl;
cout<<"T->T*F|F"<<endl;
cout<<"F->(E)|i"<<endl;
cout<<endl;
cout<<"消除左递归之后的文法为:"<<endl;
cout<<"E->TE'"<<endl;
cout<<"E'->+TE'|ε"<<endl;
cout<<"T->FT'"<<endl;
cout<<"T'->*FT'|ε"<<endl;
cout<<"F->(E)"<<endl;
cout<<"F->i"<<endl;
cout<<endl;
cout<<"请输入要分析的句子(以$作为结束符):"<<endl;
gets_s(a);
E();
if((a[k]=='$'))
cout<<"输入的表达式合法"<<endl;
else
cout<<"输入的表达式不合法,没有结束标识符"<<endl;
}
<file_sep>// morphology.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
#include<iostream>
#include<string>
using namespace std;
/*判断读入的字符是否为字母*/
bool isLetter(char c){
if((c>='a'&&c<='z')||(c>='A'&&c<='Z'))
return true;
else
return false;
}
/*判断读入的字符是否为数字*/
bool isDigit(char c){
if(c>='0'&&c<='9')
return true;
else
return false;
}
/*判断是否为关键字*/
bool isKey(const char *string){
if(!strcmp(string ,"int")||!strcmp(string,"char")||!strcmp(string,"void")
||!strcmp(string ,"if")||!strcmp(string ,"else")||!strcmp(string ,"switch")
||!strcmp(string ,"case")||!strcmp(string ,"default")||!strcmp(string ,"while")
||!strcmp(string ,"while")||!strcmp(string ,"do")||!strcmp(string ,"for")
||!strcmp(string ,"break")||!strcmp(string ,"continue")||!strcmp(string ,"return"))
return true;
else
return false;
}
/*判断是否为单目运算符*/
bool isOperator(char c){
if(c=='+'||c=='-'||c=='*'||c=='/'||c=='%'||c=='!'||c=='&'||c=='|'||c=='='||c=='>'||c=='<')
return true;
else
return false;
}
/*判断是否为分隔符*/
bool isSeparator(char c){
if(c ==',' || c==':' || c==';' || c=='(' || c==')' || c=='{' || c=='}')
return true;
else
return false;
}
void main(){
char a[500],ch;
string str;
int i,j,k;
cout<<"请输入源程序:"<<endl;
gets_s(a);
j=strlen(a);
for(i =0;i<j;i++){
ch =a[i];
if(isLetter(ch)==true || isDigit(ch) ==true){
str.append(1,ch);
}
else{
//当遇到非字母和数字时先输出
int flag=0;
if(isDigit(str[0])==true){
//八进制
if(str[0] =='0' && str.length()>1 && str[1]!='x'){
for(k =1;k<str.length();k++){ //判断是否输入0~7以外的字符
if(str[k]<'0' || str[k]>'7'){
cout<<"error "<<str<<endl;
flag =1;
break;
}
}
if(flag ==0)
cout<<"<2,"<<str<<"> 八进制数字常量"<<endl;
}
//十六进制
else if(str[0]=='0' && str[1]== 'x'){
for(k =2;k<str.length();k++){
if((str[k]>='0' && str[k]<='9') ||(str[k]>='A' && str[k]<='F') || (str[k]>='a' && str[k]<='f')){}
else{
cout<<"error "<<str<<endl;
flag =1;
break;
}
}
if(flag ==0)
cout<<"<2,"<<str<<"> 十六进制数字常量"<<endl;
}
//十进制
else{
for(k =1;k<str.length();k++){
if(str[k]<'0' || str[k]>'9'){
cout<<"error "<<str<<endl;
flag =1;
break;
}
}
if(flag ==0)
cout<<"<2,"<<str<<"> 十进制数字常量"<<endl;
}
}
else if(isLetter(str[0])==true){
if(isKey(str.c_str())==true)
cout<<"<3,"<<str<<"> 关键字"<<endl;
else
cout<<"<1,"<<str<<"> 变量"<<endl;
}
str.clear();
//输出运算符
if(isOperator(ch)){
if((ch=='&' && a[i+1]=='&')||(ch=='+' && a[i+1]=='+')
||(ch=='-' && a[i+1]=='-')||(ch=='|' && a[i+1]=='|')
||(ch=='>' && a[i+1]=='=')||(ch=='<' && a[i+1]=='=')
||(ch=='=' && a[i+1]=='=')||(ch=='!' && a[i+1]=='=')){
cout<<"<4,"<<ch<<a[i+1]<<"> 运算符"<<endl;
i++;
}
else
cout<<"<4,"<<ch<<"> 运算符"<<endl;
}
//输出分隔符
if(isSeparator(ch)){
cout<<"<5,"<<ch<<"> 分隔符"<<endl;
}
//输出字符串常量
if(ch =='"'){
str.append(1,ch);
for(k =i+1;k<j;k++){
if(a[k] !='"'){
str.append(1,a[k]);
}
else if(a[k] =='"'){
str.append(1,a[k]);
cout<<"<2,"<<str<<"> 字符串常量"<<endl;
str.clear();
break;
}
}
i =k;
}
//输出字符常量
if(ch ==39){
str.append(1,ch);
for(k =i+1;k<j;k++){
if(a[k] !=39){
str.append(1,a[k]);
}
else if(a[k] ==39){
str.append(1,a[k]);
cout<<"<2,"<<str<<"> 字符串常量"<<endl;
str.clear();
break;
}
}
i =k;
}
}
}
if(!str.empty()){
int flag=0;
if(isDigit(str[0])==true){
//八进制
if(str[0] =='0' && str.length()>1 && str[1]!='x'){
for(k =1;k<str.length();k++){ //判断是否输入0~7以外的字符
if(str[k]<'0' || str[k]>'7'){
cout<<"error "<<str<<endl;
flag =1;
break;
}
}
if(flag ==0)
cout<<"<2,"<<str<<"> 八进制数字常量"<<endl;
}
//十六进制
else if(str[0]=='0' && str[1]== 'x'){
for(k =2;k<str.length();k++){
if((str[k]>='0' && str[k]<='9') ||(str[k]>='A' && str[k]<='F') || (str[k]>='a' && str[k]<='f')){}
else{
cout<<"error "<<str<<endl;
flag =1;
break;
}
}
if(flag ==0)
cout<<"<2,"<<str<<"> 十六进制数字常量"<<endl;
}
//十进制
else{
for(k =1;k<str.length();k++){
if(str[k]<'0' || str[k]>'9'){
cout<<"error "<<str<<endl;
flag =1;
break;
}
}
if(flag ==0)
cout<<"<2,"<<str<<"> 十进制数字常量"<<endl;
}
}
else if(isLetter(str[0])==true){
if(isKey(str.c_str())==true)
cout<<"<3,"<<str<<"> 关键字"<<endl;
else
cout<<"<1,"<<str<<"> 变量"<<endl;
}
str.clear();
}
}
<file_sep>#include "stdafx.h"
#include<iostream>
#include<string>
#define MAXS 100
using namespace std;
string NODE; //结点集合
string CHANGE; //终结符集合
int N; //NFA边数
struct edge {
string first;
string change;
string last;
};
struct chan {
string ltab;
string jihe[MAXS];
};
void kong(int a) {
int i;
for(i=0; i<a; i++)
cout<<' ';
}
//排序
void paixu(string &a) {
int i,j;
char b;
for(j=0; j<a.length(); j++)
for(i=0; i<a.length(); i++)
if(NODE.find(a[i])>NODE.find(a[i+1])) {
b=a[i];
a[i]=a[i+1];
a[i+1]=b;
}
}
void eclouse(char c,string &he,edge b[]) {
int k;
for(k=0; k<N; k++) {
if(c==b[k].first[0])
if(b[k].change=="*") {
if(he.find(b[k].last)==string::npos)
he.append(b[k].last);
eclouse(b[k].last[0],he,b);
}
}
}
void move(chan &he,int m,edge b[]) {
int i,j,k,l;
k=he.ltab.length();
l=he.jihe[m].length();
for(i=0; i<k; i++)
for(j=0; j<N; j++)
if((CHANGE[m]==b[j].change[0])&&(he.ltab[i]==b[j].first[0]))
if(he.jihe[m].find(b[j].last[0])==string::npos) //把通过输入量可以达到的状态节点归入jihe【】中
he.jihe[m].append(b[j].last);
}
//输出
void show(int len,int h,chan *t) {
int i,j,m;
cout<<" "<<"\t";
for(i=0; i<len; i++)
cout<<CHANGE[i]<<"\t";
cout<<endl<<"-------------------------"<<endl;
for(i=0; i<h; i++) {
cout<<' '<<t[i].ltab;
m=t[i].ltab.length();
for(j=0; j<len; j++) {
kong(8-m); //起到制表功能的函数
m=t[i].jihe[j].length();
cout<<t[i].jihe[j];
}
cout<<endl;
}
}
int main() {
edge *b=new edge[MAXS];
int i,j,k,m,n,h,x,y,len;
bool flag;
string jh[MAXS],endnode,ednode,sta;
cout<<"请输入NFA各边信息(起点 条件[空为*] 终点),以#结束:"<<endl;
b[0].first="0";b[0].change="*";b[0].last="1";
b[1].first="1";b[1].change="*";b[1].last="2";
b[2].first="1";b[2].change="*";b[2].last="4";
b[3].first="2";b[3].change="a";b[3].last="3";
b[4].first="3";b[4].change="*";b[4].last="7";
b[5].first="4";b[5].change="b";b[5].last="5";
b[6].first="5";b[6].change="a";b[6].last="6";
b[7].first="6";b[7].change="*";b[7].last="7";
b[8].first="7";b[8].change="*";b[8].last="1";
b[9].first="7";b[9].change="*";b[9].last="8";
b[10].first="0";b[10].change="*";b[10].last="8";
/*for(i=0; i<MAXS; i++) { //b数组为边
cin>>b[i].first;
if(b[i].first=="#") break;
cin>>b[i].change>>b[i].last;
}*/
N=11; //记录边数
for(i=0; i<N; i++) {
if(NODE.find(b[i].first)==string::npos)
NODE.append(b[i].first);
if(NODE.find(b[i].last)==string::npos)
NODE.append(b[i].last);
if((CHANGE.find(b[i].change)==string::npos)&&(b[i].change!="*"))
CHANGE.append(b[i].change);
}
len=CHANGE.length();
cout<<"结点中属于终态的是:"<<endl;
cin>>endnode;
for(i=0; i<endnode.length(); i++)
if(NODE.find(endnode[i])==string::npos) {
cout<<"所输终态不在集合中,错误!"<<endl;
return 0;
}
//以下用于判断是否为NFA
int flag1;
for(i=0; i<N; i++) {
if(b[i].change=="*"){
flag1 =1;
break;
}
for(j=i; i<N; i++) {
if((b[i].first==b[j].first&&b[i].change==b[j].change)||b[i].change=="*") {
flag1=1;
break;
}
flag1=0;
}
if (flag1==1) break;
}
if(flag1==1) cout<<"这是一个NFA!"<<endl;
else cout<<"这是一个DFA!"<<endl;
//以上用于判断是否为NFA
chan *t=new chan[MAXS];
t[0].ltab=b[0].first;
h=1;
eclouse(b[0].first[0],t[0].ltab,b); //求e-clouse
for(i=0; i<h; i++) {
for(k=0; k<len; k++) {
move(t[i],k,b); //求move(I,a),转到下一个字母
for(j=0; j<t[i].jihe[k].length(); j++)
eclouse(t[i].jihe[k][j],t[i].jihe[k],b);
}
for(j=0; j<len; j++) {
paixu(t[i].jihe[j]);
for(k=0; k<h; k++) {
flag=operator==(t[k].ltab,t[i].jihe[j]);
if(flag)
break;
}
if(!flag&&t[i].jihe[j].length())
t[h++].ltab=t[i].jihe[j];
}
}
cout<<endl<<"状态转换矩阵如下:"<<endl;
show(len,h,t); //输出状态转换矩阵
//状态重新命名
string *d=new string[h];
NODE.erase();
cout<<endl<<"重命名:"<<endl;
for(i=0; i<h; i++) {
sta=t[i].ltab;
t[i].ltab.erase();
t[i].ltab='A'+i;
NODE+=t[i].ltab;
cout<<'{'<<sta<<"}="<<t[i].ltab<<endl;
for(j=0; j<endnode.length(); j++) //此循环用于找出终结节点
if(sta.find(endnode[j])!=string::npos)
d[1]=ednode+=t[i].ltab; // 把终结节点的ltab存入ednode中
for(k=0; k<h; k++)
for(m=0; m<len; m++)
if(sta==t[k].jihe[m]) //把jihe【】替换为新名字
t[k].jihe[m]=t[i].ltab;
}
for(i=0; i<NODE.length(); i++)
if(ednode.find(NODE[i])==string::npos)
d[0]+=NODE[i]; //d[0]中存的是非终结节点,[1]中存的是终结节点
endnode=ednode; //重新写endcode,把ednode的信息存入很endnode中
cout<<endl<<"DFA如下:"<<endl;
show(len,h,t); //输出DFA
cout<<"其中终态为:"<<endnode<<endl;
//DFA最小化
m=2;
sta.erase();
flag=0;
for(i=0; i<m; i++) {
for(k=0; k<len; k++) {
y=m;
for(j=0; j<d[i].length(); j++) { //遍历同一个d【】中的所有元素 ,遍历完成后会使sta清空
for(n=0; n<y; n++) {
if(d[n].find(t[NODE.find(d[i][j])].jihe[k])<d[n].length()||t[NODE.find(d[i][j])].jihe[k].length()==0) {
if(t[NODE.find(d[i][j])].jihe[k].length()==0)
x=m;
else
x=n; //这个机制是用于判断属于同一个d【】的各符号,通过相同的输入量是否到达同一个d【】
if(!sta.length()) {
sta+=x+48; //ASCII码表48是0
} else if(sta[0]!=x+48) {
d[m]+=d[i][j];
flag=1;
d[i].erase(j,1);
j--; //由于擦除了被排除出去的元素,所以循环量j要减减
}
break; //跳出n
}//if
}//n
}//j
if(flag) {
m++;
flag=0;
}
sta.erase();
}//k
}//i
cout<<endl<<"集合划分:";
for(i=0; i<m; i++)
cout<<"{"<<d[i]<<"} ";
cout<<endl;
//状态重新命名
chan *md=new chan[m];
NODE.erase();
cout<<endl<<"重命名:"<<endl;
for(i=0; i<m; i++) {
md[i].ltab='A'+i;
NODE+=md[i].ltab;
cout<<"{"<<d[i]<<"}="<<md[i].ltab<<endl;
}
for(i=0; i<m; i++)
for(k=0; k<len; k++)
for(j=0; j<h; j++) {
if(d[i][0]==t[j].ltab[0]) {
for(n=0; n<m; n++) {
if(!t[j].jihe[k].length())
break;
else if(d[n].find(t[j].jihe[k])<d[n].length()) {
md[i].jihe[k]=md[n].ltab;
break;
}
}
break;
}
}
ednode.erase();
cout<<"endnode:"<<endnode<<endl;
for(i=0; i<m; i++)
for(j=0; j<endnode.length(); j++)
if(d[i].find(endnode[j])<d[i].length()&&ednode.find(md[i].ltab))
if(ednode.find(md[i].ltab)>ednode.length())
ednode+=md[i].ltab;
endnode=ednode;
cout<<endl<<"最小化DFA如下:"<<endl;
show(len,m,md);
cout<<"其中终态为:"<<endnode<<endl;
return 0;
}
<file_sep>// LL.cpp : 定义控制台应用程序的入口点。
#include "stdafx.h"
#include<iostream>
#include<string>
#include<stack>
using namespace std;
char showStack(stack<char> stackChars){
while(!stackChars.empty()){
cout<<stackChars.top();
stackChars.pop();
}
return '\0';
}
char showString(char a[20],char *ip){
char *t =ip;
while(*t!='\0'){
cout<<*t;
t++;
}
return '\0';
}
int main(){
int k =0;
int i,j,r,n;
char G[10][20];//原文法
char P[10][20];
char GG[10][20];//消除左递归后的文法
char PP[10][20];
string U; //非终结符号
string UU; //消除左递归之后的非终结符号
string u; //终结符号
string uu; //消除左递归之后的终结符号
memset(G,0,sizeof(G));
memset(P,0,sizeof(P));
memset(GG,0,sizeof(GG));
memset(PP,0,sizeof(PP));
//Init_grammer
cout<<"请输入文法产生式的条数:"<<endl;
cin>>n;
for(i=0;i<n;i++){
cout<<"第"<<i+1<<"条文法为:"<<endl;
cin>>G[i];
}
for(i =0;G[i][0]!='\0';i++){
P[k][0] =G[i][0];
P[k][1] ='-';
P[k][2] ='>';
r =3;
for(j =3;G[i][j]!='\0';j++){
if(G[i][j]=='|'){
k++;j++;
P[k][0] =G[i][0];
P[k][1] ='-';
P[k][2] ='>';
r =3;
P[k][r] =G[i][j];
r++;
}
else{
P[k][r] =G[i][j];
r++;
}
}
k++;
}
for(i =0;P[i][0]!='\0';i++){
if(U.find(P[i][0])==string::npos)
U.append(1,P[i][0]);
}
for(i =0;P[i][0]!='\0';i++){
for(j =3;P[i][j]!='\0';j++){
if(u.find(P[i][j])==string::npos && U.find(P[i][j])==string::npos && P[i][j]!='#')
u.append(1,P[i][j]);
}
}
//判断是否存在左递归,若存在左递归即消除
int flag =0;
for(i =0;P[i][0]!='\0';i++){
if(P[i][0] ==P[i][3]){ //存在左递归情况
flag =1;
break;
}
}
if(flag){
cout<<"该文法存在左递归情况!"<<endl;
string ar,br; //存放左递归相关式子
char C ='A';
int temp,m=0;
int flagg; //flagg =1表示某条规则有左递归情况
for(i =0;U[i]!='\0';i++){
flagg =0;
for(j =0;P[j][0]!='\0';j++){
if(P[j][0] ==U[i]){
if(P[j][3]==U[i]){
flagg =1;
ar =br ='\0';
for(temp =4;P[j][temp]!='\0';temp++)
ar.append(1,P[j][temp]);
/*if(P[j+1][3]==U[i])
ar.append(1,'|');*/
}
else{
for(temp =3;P[j][temp]!='\0';temp++)
br.append(1,P[j][temp]);
if(P[j+1][0]==U[i] && P[j+1][3] ==U[i])
br.append(1,'|');
}
}
}
if(flagg ==0){
for(r =0;G[i][r]!='\0';r++)
GG[m][r] =G[i][r];
m++;
}
else{
GG[m][0]=U[i];GG[m][1]='-';GG[m][2]='>';
for(j=1;br[j]!='\0';j++){
r= 3;
GG[m][r] =br[j];
r++;
}
while(U.find(C)!=string::npos)
C++;
GG[m][r]=C;m++;
GG[m][0]=C;GG[m][1]='-';GG[m][2]='>';
r= 3;
for(j=1;ar[j]!='\0';j++){
GG[m][r] =ar[j];
r++;
}
GG[m][r]=C;GG[m][r+1] ='|';GG[m][r+2] ='#';
m++;C++;
}
}
cout<<"消除左递归之后的文法:"<<endl;
for(i=0;i<m;i++){
for(j=0;GG[i][j]!='\0';j++)
cout<<GG[i][j];
cout<<endl;
}
cout<<endl;
}
else{
cout<<"该文法不存在左递归情况"<<endl;
for(i=0;i<n;i++){
for(j=0;G[i][j]!='\0';j++)
GG[i][j] =G[i][j];
}
}
//整理消除左递归之后的文法
k =0;
for(i =0;GG[i][0]!='\0';i++){
PP[k][0] =GG[i][0];
PP[k][1] ='-';
PP[k][2] ='>';
r =3;
for(j =3;GG[i][j]!='\0';j++){
if(GG[i][j]=='|'){
k++;j++;
PP[k][0] =GG[i][0];
PP[k][1] ='-';
PP[k][2] ='>';
r =3;
PP[k][r] =GG[i][j];
r++;
}
else{
PP[k][r] =GG[i][j];
r++;
}
}
k++;
}
for(i =0;PP[i][0]!='\0';i++){ //记录非终结符号
if(UU.find(PP[i][0])==string::npos)
UU.append(1,PP[i][0]);
}
for(i =0;PP[i][0]!='\0';i++){ //记录非终结符号
for(j =3;PP[i][j]!='\0';j++){
if(uu.find(PP[i][j])==string::npos && UU.find(PP[i][j])==string::npos && PP[i][j]!='#')
uu.append(1,PP[i][j]);
}
}
//求解FIRST集
string *FIRST =new string[10];
int step1 =10,s,count;
char ch,a;
while(step1>0){
step1--;
for(i =k-1;i>=0;i--){
r =UU.find(PP[i][0]);
if(PP[i][3]=='#' && FIRST[r].find('#') ==string::npos) //右部首符号为空
FIRST[r].append(1,'#');
else{
for(j =3;PP[i][j]!='\0';j++){
ch =PP[i][j];
if(uu.find(ch)!=string::npos){ //右部首符号为终结符号
if(FIRST[r].find(ch)==string::npos){
FIRST[r].append(1,ch);
break;
}
else
break;
}
if(UU.find(ch)!=string::npos){ //右部首符号为非终结符号
s =UU.find(PP[i][j]);
for(count =0;FIRST[s][count]!='\0';count++){
a =FIRST[s][count];
if(a !='#' && FIRST[r].find(a) ==string ::npos)
FIRST[r].append(1,a);
}
if(FIRST[s].find('#') !=string::npos && FIRST[r].find('#')==string::npos)
FIRST[r].append(1,'#');
if(FIRST[s].find('#')==string::npos)
break;
}
}
}
}
}
cout<<"FIRST集:"<<endl;
for(i=0;UU[i]!='\0';i++)
cout<<"FIRST("<<UU[i]<<")="<<FIRST[i]<<endl;
cout<<endl;
//求解FOLLOW集
string *FOLLOW =new string[10];
int step2 =10,t;
FOLLOW[0].append(1,'$');
while(step2>0){
step2--;
for(i =0;i<k;i++){
for(j =3;PP[i][j]!='\0';j++){
a =PP[i][j];
if(UU.find(a)!=string::npos){
if(PP[i][j+1] =='\0'){
for(count =0;FOLLOW[UU.find(PP[i][0])][count]!='\0';count++){
ch =FOLLOW[UU.find(PP[i][0])][count];
if(FOLLOW[UU.find(PP[i][j])].find(ch)==string::npos)
FOLLOW[UU.find(PP[i][j])].append(1,ch);
}
}
if(PP[i][j+1]!='\0'){
for(t =j+1;PP[i][t]!='\0';t++){
ch =PP[i][t];
//后跟终结符号
if(uu.find(ch)!=string::npos && FOLLOW[UU.find(PP[i][j])].find(ch)==string::npos){
FOLLOW[UU.find(PP[i][j])].append(1,ch);
break;
}
//后跟非终结符号
if(UU.find(ch)!=string::npos){
for(r =0;FIRST[UU.find(ch)][r]!='\0';r++){
a =FIRST[UU.find(ch)][r]; //将first[j+1]加入follow[j]
if(a !='#' && FOLLOW[UU.find(PP[i][j])].find(a) ==string::npos)
FOLLOW[UU.find(PP[i][j])].append(1,a);
}
//如果#属于first[j+1],将follow[x]加入follow[j]
if(FIRST[UU.find(ch)].find('#') !=string::npos){
for(int c =0;FOLLOW[UU.find(PP[i][0])][c]!='\0';c++){
a =FOLLOW[UU.find(PP[i][0])][c];
if(FOLLOW[UU.find(PP[i][j])].find(a)==string::npos)
FOLLOW[UU.find(PP[i][j])].append(1,a);
}
}
}
}
}
}
}
}
}
cout<<"FOLLOW集:"<<endl;
for(i=0;UU[i]!='\0';i++)
cout<<"FOLLOW("<<UU[i]<<")="<<FOLLOW[i]<<endl;
cout<<endl;
//求解SELECT集
string *SELECT =new string[10];
for(i =0;i<k;i++){
for(j =3;PP[i][j]!='\0';j++){
//右部首为终结符号
if(uu.find(PP[i][j])!=string::npos && SELECT[i].find(PP[i][j])==string::npos){
SELECT[i].append(1,PP[i][j]);
break;
}
//右部首为非终结符号且其FIRST集不包括‘#’
if(UU.find(PP[i][j])!=string::npos && FIRST[UU.find(PP[i][j])].find('#')==string::npos){
for(count =0;FIRST[UU.find(PP[i][j])][count]!='\0';count++){
a =FIRST[UU.find(PP[i][j])][count];
if(SELECT[i].find(a)==string::npos)
SELECT[i].append(1,a);
}
break;
}
//右部首为非终结符号且其FIRST集包括‘#’
if(UU.find(PP[i][j])!=string::npos && FIRST[UU.find(PP[i][j])].find('#')!=string::npos){
for(count =0;FIRST[UU.find(PP[i][j])][count]!='\0';count++){
a =FIRST[UU.find(PP[i][j])][count];
if(SELECT[i].find(a)==string::npos && a!='#')
SELECT[i].append(1,a);
}
}
if(PP[i][j]=='#'||PP[i][j]=='\0'){
for(count =0;FOLLOW[UU.find(PP[i][0])][count]!='\0';count++){
a= FOLLOW[UU.find(PP[i][0])][count];
if(SELECT[i].find(a)==string::npos){
SELECT[i].append(1,a);
}
}
break;
}
}
}
cout<<"SELECT集:"<<endl;
for(i =0;i<k;i++){
cout<<"SELECT("<<PP[i]<<")="<<SELECT[i]<<endl;
}
cout<<endl;
//构造预测分析表
string forecastTable[10][10];
cout<<"预测分析表:"<<endl;
for(j =1;j<(uu.length()+1);j++)
forecastTable[0][j] =uu[j-1];
forecastTable[0][j]='$';
for(i =1;i<(UU.length()+1);i++)
forecastTable[i][0] =UU[i-1];
for(i =0;i<k;i++){
for(n =0;SELECT[i][n]!='\0';n++){
if(SELECT[i][n]=='$')
forecastTable[UU.find(PP[i][0])+1][uu.length()+1]=PP[i];
else
forecastTable[UU.find(PP[i][0])+1][uu.find(SELECT[i][n])+1]=PP[i];
}
}
for(i=0;i<(UU.length()+1);i++){
cout<<" "<<endl;
for(j=0;j<(uu.length()+2);j++)
cout<<forecastTable[i][j]<<" ";
cout<<endl;
}
//根据预测分析表判断表达式是否合法
uu.append(1,'$');
cout<<"请输入需要判断的表达式(以$作为结束符):";
char exp_statement[20];
cin>>exp_statement;
cout<<"分析栈 余留字符串 所用产生式 "<<endl;
char *ip =exp_statement;
stack<char> stackChars;
stackChars.push('$');
stackChars.push(UU[0]);
char X =stackChars.top();
while(X !='$'){
if(X ==*ip){
cout<<showStack(stackChars)<<" ";
cout<<showString(exp_statement,ip)<<" 匹配";
cout<<X<<"="<<*ip<<endl;
stackChars.pop();
ip++;
}
else if(X == '#'){
stackChars.pop();
}
else if(UU.find(*ip)==string::npos && uu.find(*ip)==string::npos){
cout<<"error!"<<endl;
break;
}
else if(uu.find(X)!=string::npos && X!='#'){
cout<<"error!"<<endl;
break;
}
else if(forecastTable[UU.find(X)+1][uu.find(*ip)+1].empty()){
cout<<"error!"<<endl;
break;
}
else if(!forecastTable[UU.find(X)+1][uu.find(*ip)+1].empty()){
cout<<showStack(stackChars)<<" ";
cout<<showString(exp_statement,ip)<<" 应用";
cout<<forecastTable[UU.find(X)+1][uu.find(*ip)+1]<<endl;
for(i =0;i<k;i++){
if(PP[i][0]==X && SELECT[i].find(*ip)!=string::npos){
stackChars.pop();
for(j =0;PP[i][j]!='\0';j++){}
for(r =j;r>3;r--){
stackChars.push(PP[i][r-1]);
}
break;
}
}
}
X =stackChars.top();
}
if(X =='$'){
cout<<showStack(stackChars)<<" ";
cout<<showString(exp_statement,ip)<<" ";
cout<<"成功"<<endl;
cout<<"该表达式符合文法"<<endl;
}
}<file_sep>#include<stdio.h>
#include<string.h>
int zhan[50];
char input[200];
char str[50];
int ip, top, p=0; //ip->input top->stack p->str
int t; //newtable
int fstart, fend;
char tblptr[20][20];
int offset[10];
char stype[10];
int swidth=0;
int gotofind(int num, char s);
struct table1
{
int state;
char str[10];
char output[4];
}; //分析表
struct table1 table[50] = {
{0,"id","r2"},
{0,"proc","r2"},
{0,"M","1"},
{0,"P","2"},
{1,"id","s4"},
{1,"proc","s5"},
{1,"D","3"},
{2,"$","acc"},
{3,";","s6"},
{3,"$","r1"},
{4,":","s7"},
{5,"id","s8"},
{6,"id","s4"},
{6,"proc","s5"},
{6,"D","9"},
{7,"integer","s11"},
{7,"real","s12"},
{7,"|","s13" },
{7,"T","10"},
{8,";","s14"},
{9,";","r3"},
{9,"$","r3"},
{10,";","r5"},
{10,"$","r5"},
{11,";","r7"},
{11,"$","r7"},
{12,";","r8"},
{12,"$","r8"},
{13,"integer","s11"},
{13,"real","s12"},
{13,"|","s13"},
{13,"T","15"},
{14,"id","r6"},
{14,"proc","r6"},
{14,"N","16"},
{15,";","r9"},
{15,"$","r9"},
{16,"id","s4"},
{16,"proc","s5"},
{16,"D","17"},
{17,";","s18"},
{18,"id","s4"},
{18,"proc","s5"},
{18,"s","s19"},
{18,"D","9"},
{19,";","r4"},
{19,"$","r4"}
};
struct regular1
{
char start;
char production[15];
int len;
}; //文法产生式
struct regular1 regular[10] = {
{'S',"P",1},
{'P',"MD",2},
{'M',"e",0},
{'D',"D:D",3},
{'D',"procid;ND;s",7},
{'D',"id:T",3},
{'N',"e",0},
{'T',"integer",1},
{'T',"real",1},
{'T',"|T",2}
};
struct newtable
{
char name[10];
char type[10];
int width;
}; //翻译出来的表项
struct newtable newtb[100];
void Init_stack(); //初始化栈
void Init_queue(); //初始化队列
void compare();
int finda(int num, int sstart,int send);
void choose();
void translate(int num);
int main()
{
printf("input string : ");
gets(input);
Init_queue();
Init_stack();
compare();
for (int i = 0; i < t; i++)
{
printf("name : %s\n", newtb[i].name);
printf("type : %s\n", newtb[i].type);
printf("width : %d\n", newtb[i].width);
}
}
void Init_queue()
{
int len = strlen(input);
input[len] = '$';
input[len + 1] = '\0';
ip = 0;
}
void Init_stack()
{
top = 0;
zhan[top] = 0;
}
void compare()
{
int i,j;
while (1)
{
choose(); //分词
i = finda(zhan[top], fstart,fend);
if(i==50)
{
printf("error!\n");
ip=fend;
}
else if (table[i].output[0] == 's')
{
if (table[i].output[2] == '\0')
{
top++;
zhan[top] = table[i].output[1] - '0';
}
else
{
top++;
zhan[top] = (table[i].output[1] - '0') * 10 + (table[i].output[2] - '0');
}
for(int i = fstart;i<fend;i++)
str[p++] = input[i];
if (input[fend] >= '0' &&input[fend] <= '9')
{
for(j=fend;(input[j] >= '0')&&(input[j] <= '9');j++);
ip = j;
}
else
ip = fend;
str[p] = '\0';
printf("移入:");
puts(str);
}
else if (table[i].output[0] == 'r')
{
int num = 0;
if (table[i].output[2] == '\0')
{
num += table[i].output[1] - '0';
}
else
{
num += (table[i].output[2] - '0') * 10 + table[i].output[1] - '0';
}
translate(num);
printf("根据%c->%s归约 ", regular[num].start, regular[num].production);
if (strcmp(regular[num].production, "e") == 0)
p -= 0;
else
p -= strlen(regular[num].production);
str[p++] = regular[num].start;
str[p] = '\0';
puts(str);
top -= regular[num].len;
int temp = gotofind(zhan[top], str[p - 1]);
if (table[temp].output[1] == '\0')
zhan[top + 1] = table[temp].output[0] - '0';
else
zhan[top + 1] = (table[temp].output[0] - '0') * 10 + (table[temp].output[1] - '0');
top++;
}
else if(strcmp(table[i].output,"acc") == 0)
{
printf("接受\n");
break;
}
}
}
int finda(int num, int sstart,int send)
{
char s[100];
int i, j, k;
for (i = sstart,j = 0; i < send; i++,j++)
{
s[j] = input[i];
}
s[j] = '\0';
for (i = 0; i < 48; i++)
{
if (table[i].state == num&&strcmp(table[i].str,s) == 0)
{
k=i;
break;
}
else
k=50;
}
return k;
}
int gotofind(int num, char s)
{
int i;
for (i = 0; i < 48; i++)
{
if (table[i].state == num&&table[i].str[0] == s)
break;
}
return i;
}
void choose()
{
if(input[ip]==' ')
ip++;
if(input[ip] == 'i'&&input[ip + 1] == 'd')
{
int i, j, k;
for (i = ip + 2;; i++)
{
if (input[i] < '0'||input[i] > '9')
{
for (j = 0, k = ip; k < i; j++, k++)
{
tblptr[t][j] = input[k];
}
tblptr[t][j] = '\0';
break;
}
}
}
if (input[ip] >= 'a'&&input[ip] <= 'z')
{
for (int i = ip; i < strlen(input); i++)
{
if (input[i]>='a'&&input[i]<='z')
{
continue;
}
else
{
fstart = ip;
fend = i;
break;
}
}
}
else
{
fstart = ip;
fend = ip + 1;
}
}
void translate(int num)
{
switch (num)
{
case 1:
{
break;
}
case 2:
{
offset[t] = 0;
break;
}
case 4:
{
break;
}
case 5:
{
newtb[t].width = offset[t];
strcpy(newtb[t].name, tblptr[t]);
strcpy(newtb[t++].type, stype);
offset[t] = offset[t - 1] + swidth;
break;
}
case 6:
{
break;
}
case 7:
{
strcpy(stype, "integer");
swidth =4;
break;
}
case 8:
{
strcpy(stype, "real");
swidth =8;
break;
}
case 9:
{
swidth =4;
break;
}
}
}
|
32e09ff5f9b50a08f7e5bba7667fc786c96ea8b3
|
[
"C++"
] | 5
|
C++
|
YangWenxu/compilers
|
cf20ecfc494834c0869d84b12204b0e41748a5ce
|
7369a595bd08898775a7f8c37d7819aa388b3cbb
|
refs/heads/master
|
<file_sep><?php
namespace App\Security;
class LoginFormAuthenticator
{
}
|
fc1d4f5de35e198231e4ac00ecdbd807f17b6346
|
[
"PHP"
] | 1
|
PHP
|
FakherChihawi/kms_project
|
2b70218aeea0f1962bab619fd61e6ef06013371e
|
a92281cf67800c4e81ede582c188692ee187613d
|
refs/heads/master
|
<repo_name>Tikam02/TwitterAdvSearch<file_sep>/scraper.py
#! usr/bin/python3
from bs4 import BeautifulSoup
import time
from csv import DictWriter
import pprint
import datetime
from datetime import date, timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def init_driver(driver_type):
if driver_type == 1:
driver = webdriver.Firefox()
elif driver_type == 2:
driver = webdriver.Chrome()
elif driver_type == 3:
driver = webdriver.Ie()
elif driver_type == 4:
driver = webdriver.Opera()
elif driver_type == 5:
driver = webdriver.PhantomJS()
driver.wait = WebDriverWait(driver, 5)
return driver
def scroll(driver, start_date, end_date, words, lang, max_time=180):
languages = { 1: 'en', 2: 'it', 3: 'es', 4: 'fr', 5: 'de', 6: 'ru', 7: 'zh'}
url = "https://twitter.com/search?q="
for w in words[:-1]:
url += "{}%20OR".format(w)
url += "{}%20".format(words[-1])
url += "since%3A{}%20until%3A{}&".format(start_date, end_date)
if lang != 0:
url += "l={}&".format(languages[lang])
url += "src=typd"
print(url)
driver.get(url)
start_time = time.time() # remember when we started
while (time.time() - start_time) < max_time:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
def scrape_tweets(driver):
try:
tweet_divs = driver.page_source
obj = BeautifulSoup(tweet_divs, "html.parser")
content = obj.find_all("div", class_="content")
dates = []
names = []
tweet_texts = []
for i in content:
date = (i.find_all("span", class_="_timestamp")[0].string).strip()
try:
name = (i.find_all("strong", class_="fullname")[0].string).strip()
except AttributeError:
name = "Anonymous"
tweets = i.find("p", class_="tweet-text").strings
tweet_text = "".join(tweets)
# hashtags = i.find_all("a", class_="twitter-hashtag")[0].string
dates.append(date)
names.append(name)
tweet_texts.append(tweet_text)
data = {
"date": dates,
"name": names,
"tweet": tweet_texts,
}
make_csv(data)
except Exception:
print("Whoops! Something went wrong!")
driver.quit()
def make_csv(data):
l = len(data['date'])
print("count: %d" % l)
with open("twitterData.csv", "a+") as file:
fieldnames = ['Date', 'Name', 'Tweets']
writer = DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for i in range(l):
writer.writerow({'Date': data['date'][i],
'Name': data['name'][i],
'Tweets': data['tweet'][i],
})
def get_all_dates(start_date, end_date):
dates = []
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
step = timedelta(days=1)
while start_date <= end_date:
dates.append(str(start_date.date()))
start_date += step
return dates
def main():
driver_type = int(input("1) Firefox | 2) Chrome | 3) IE | 4) Opera | 5) PhantomJS\nEnter the driver you want to use: "))
wordsToSearch = input("Enter the words: ").split(',')
for w in wordsToSearch:
w = w.strip()
start_date = input("Enter the start date in (Y-M-D): ")
end_date = input("Enter the end date in (Y-M-D): ")
lang = int(input("0) All Languages 1) English | 2) Italian | 3) Spanish | 4) French | 5) German | 6) Russian | 7) Chinese\nEnter the language you want to use: "))
all_dates = get_all_dates(start_date, end_date)
print(all_dates)
for i in range(len(all_dates) - 1):
driver = init_driver(driver_type)
scroll(driver, str(all_dates[i]), str(all_dates[i + 1]), wordsToSearch, lang)
scrape_tweets(driver)
time.sleep(5)
print("The tweets for {} are ready!".format(all_dates[i]))
driver.quit()
if __name__ == "__main__":
main()
<file_sep>/README.md
# TwitterAdvSearch
A scraping tool to scrape tweets with user provided keywords and hashtags, in a given data range.
## Usage
Type `python --version` in the terminal to check that python3 is installed.
Type `pip install -r requirements.txt` in the terminal to install all the external dependencies.
Type `python scraper.py` in the terminal from the directory that you cloned to run the program.
Follow the instruction from the script to do advanced searches on Twitter
|
1bff1fe8545d72c6707719d0786a438ab8fee265
|
[
"Markdown",
"Python"
] | 2
|
Python
|
Tikam02/TwitterAdvSearch
|
efc3d7fa67f82bd4a8bd9469c6a7e24ff25d5606
|
fb7799a97896e18f023b6344f291d055f36cd82d
|
refs/heads/main
|
<file_sep>count = 0;
document.getElementById('count').innerHTML = count;
var modal = document.getElementById("myModal");
// Get the button that opens the modal
var btn = document.getElementById("myBtn");
// Get the <span> element that closes the modal
var span = document.getElementsByClassName("close")[0];
// When the user clicks the button, open the modal
btn.onclick = function() {
modal.style.display = "block";
}
// When the user clicks on <span> (x), close the modal
span.onclick = function() {
modal.style.display = "none";
}
// When the user clicks anywhere outside of the modal, close it
window.onclick = function(event) {
if (event.target == modal) {
modal.style.display = "none";
}
}
var score;
score = count;
function paper(){
document.getElementById('you-pick').style.border = '20px solid hsl(230, 89%, 62%)';
document.getElementById('you-pick').src = 'images/icon-paper.svg';
document.getElementById('bot-pick').display = 'none'
document.getElementById('menu').style.display = 'none';
document.getElementById('pick').style.display = 'block';
game(0);
}
function rock(){
document.getElementById('you-pick').style.border = '20px solid hsl(349, 71%, 52%)';
document.getElementById('you-pick').src = 'images/icon-rock.svg';
document.getElementById('bot-pick').display = 'none';
document.getElementById('menu').style.display = 'none';
document.getElementById('pick').style.display = 'block';
game(1);
}
function scissors(){
document.getElementById('you-pick').style.border = '20px solid hsl(40, 84%, 53%)';
document.getElementById('you-pick').src = 'images/icon-scissors.svg';
document.getElementById('bot-pick').display = 'none'
document.getElementById('menu').style.display = 'none';
document.getElementById('pick').style.display = 'block';
game(2);
}
function bot(rand){
if (rand==0){
document.getElementById('bot-pick').style.display = document.getElementById('you-pick').style.display;
document.getElementById('bot-pick').src = 'images/icon-paper.svg';
document.getElementById('bot-pick').style.border = '20px solid hsl(230, 89%, 62%)';
document.getElementById('bg').style.display = 'none';
}
else if (rand==1){
document.getElementById('bot-pick').style.display = document.getElementById('you-pick').style.display;
document.getElementById('bot-pick').src = 'images/icon-rock.svg';
document.getElementById('bot-pick').style.border = '20px solid hsl(349, 71%, 52%)';
document.getElementById('bg').style.display = 'none';
}
else if(rand==2){
document.getElementById('bg').style.display = 'none';
document.getElementById('bot-pick').style.display = document.getElementById('you-pick').style.display;
document.getElementById('bot-pick').src = 'images/icon-scissors.svg';
document.getElementById('bot-pick').style.border = '20px solid hsl(40, 84%, 53%)';
}
}
var card = ['paper','rock', 'scissors'];
function game(num){
var rand = Math.floor(Math.random() * 3);
// console.log(rand);
if(num==0){
if(rand==2){
document.getElementById('demo').innerHTML = 'YOU LOSE';
}
else if (rand==1){
document.getElementById('demo').innerHTML = 'YOU WIN';
score = score + 1;
document.getElementById('count').innerHTML = score;
}
else{
document.getElementById('demo').innerHTML = 'DRAW';
}
}
else if (num==1){
if (rand==0){
document.getElementById('demo').innerHTML = 'YOU LOSE';
}
else if (rand==1){
document.getElementById('demo').innerHTML = 'DRAW';
}
else{
document.getElementById('demo').innerHTML = 'YOU WIN';
score = score + 1;
document.getElementById('count').innerHTML = score;
}
}
else{
if(rand==0){
document.getElementById('demo').innerHTML = 'YOU WIN';
score = score + 1;
document.getElementById('count').innerHTML = score;
}
else if(rand==1){
document.getElementById('demo').innerHTML = 'YOU LOSE';
}
else{
document.getElementById('demo').innerHTML = 'DRAW';
}
}
bot(rand);
}
function playAgain(){
document.getElementById('pick').style.display = 'none';
document.getElementById('menu').style.display = 'block';
}
|
fae5c97fb9595ce20c6df2560cfa1b7c0a5b1c7a
|
[
"JavaScript"
] | 1
|
JavaScript
|
rahulkundena/rock-paper-scissors
|
4f6cccccdf1d89fbb8edb4c69fa993ee93ef5568
|
99140e26649e0a4d423250052af5d935637dedde
|
refs/heads/master
|
<file_sep>var namespace = "urn:x-cast:com.google.ads.imasdk.cast";
window.splashImg = document.getElementById('splash');
window.mediaElement = document.getElementById('media');
window.mediaManager = new cast.receiver.MediaManager(window.mediaElement);
window.castReceiverManager = cast.receiver.CastReceiverManager.getInstance();
window.customMessageBus = window.castReceiverManager.getCastMessageBus(namespace);
window.castReceiverManager.start();
window.castReceiverManager.onSenderDisconnected = function() {
broadcast("seek," + currentContentTime);
window.close();
}
window.customMessageBus.onMessage = function(event) {
var message = event.data.split(',');
var senderId = event.senderId;
console.log("Message from: " + senderId + " Message: " + message);
switch (message[0]) {
case "requestAd":
requestAd(message[1]);
return;
case "seek":
seek(parseFloat(message[1]));
return;
}
}
function broadcast(message) {
window.customMessageBus.broadcast(message);
}
var origOnLoad = window.mediaManager.onLoad.bind(window.mediaManager);
var origOnLoadEvent;
window.mediaManager.onLoad = function(event) {
console.log('onLoad');
broadcast('onLoad');
origOnLoadEvent = event;
window.splashImg.style.display = 'none';
window.mediaElement.style.display = 'block';
initIMA();
origOnLoad(origOnLoadEvent);
}
var origOnEnded, origOnSeek;
var adDisplayContainer, adsLoader, adsManager;
var currentContentTime = 0;
var discardAdBreak = -1;
function initIMA() {
console.log('initIma');
adDisplayContainer = new google.ima.AdDisplayContainer(document.getElementById('adContainer'), window.mediaElement);
adDisplayContainer.initialize();
adsLoader = new google.ima.AdsLoader(adDisplayContainer);
adsLoader.addEventListener(google.ima.AdsManagerLoadedEvent.Type.ADS_MANAGER_LOADED, onAdsManagerLoaded, false);
adsLoader.addEventListener(google.ima.AdErrorEvent.Type.AD_ERROR, onAdError, false);
adsLoader.addEventListener(google.ima.AdEvent.Type.ALL_ADS_COMPLETED, onAllAdsCompleted, false);
}
function onAdsManagerLoaded(adsManagerLoadedEvent) {
console.log('onAdsManagerLoaded');
broadcast('onAdsManagerLoaded');
// Get the ads manager.
adsManager = adsManagerLoadedEvent.getAdsManager(
window.mediaElement); // should be set to the content video element
// Add listeners to the required events.
adsManager.addEventListener(
google.ima.AdErrorEvent.Type.AD_ERROR,
onAdError);
adsManager.addEventListener(
google.ima.AdEvent.Type.CONTENT_PAUSE_REQUESTED,
onContentPauseRequested);
adsManager.addEventListener(
google.ima.AdEvent.Type.CONTENT_RESUME_REQUESTED,
onContentResumeRequested);
try {
// Initialize the ads manager. Ad rules playlist will start at this time.
adsManager.init(640, 360, google.ima.ViewMode.NORMAL);
// Call play to start showing the ad. Single video and overlay ads will
// start at this time; the call will be ignored for ad rules.
adsManager.start();
origOnEnded = window.mediaManager.onEnded.bind(window.mediaManager);
origOnSeek = window.mediaManager.onSeek.bind(window.mediaManager);
if (discardAdBreak != -1) {
adsManager.discardAdBreak();
currentContentTime = discardAdBreak;
discardAdBreak = -1;
}
} catch (adError) {
// An error may be thrown if there was a problem with the VAST response.
broadcast("Ads Manager Error: " + adError);
}
}
function requestAd(adTag) {
console.log('requestAd');
var adsRequest = new google.ima.AdsRequest();
adsRequest.adTagUrl = adTag;
adsRequest.linearAdSlotWidth = window.mediaElement.width;
adsRequest.linearAdSlotHeight = window.mediaElement.height;
adsRequest.nonLinearAdSlotWidth = window.mediaElement.width;
adsRequest.nonLinearAdSlotHeight = window.mediaElement.height / 3;
adsLoader.requestAds(adsRequest);
}
function seek(time) {
currentContentTime = time;
window.mediaElement.currentTime = time;
window.mediaElement.play();
}
function onAdError(adErrorEvent) {
broadcast("Ad Error: " + adErrorEvent.getError().toString());
// Handle the error logging.
if (adsManager) {
adsManager.destroy();
}
window.mediaElement.play();
}
function onContentPauseRequested() {
currentContentTime = window.mediaElement.currentTime;
broadcast("contentPauseRequested: " + currentContentTime);
window.mediaManager.onEnded = function(event) {};
window.mediaManager.onSeek = function(event) {
var requestId = event.data.requestId;
window.mediaManager.broadcastStatus(true, requestId);
}
}
function onContentResumeRequested() {
window.mediaManager.onEnded = origOnEnded;
window.mediaElement.addEventListener('playing', function() {
var mediaInfo = window.mediaManager.getMediaInformation();
mediaInfo.duration = window.mediaElement.duration;
window.mediaManager.setMediaInformation(mediaInfo);
});
window.mediaManager.onSeek = origOnSeek;
window.onEnded = origOnEnded;
broadcast("contentResumeRequested: " + currentContentTime);
origOnLoad(origOnLoadEvent);
seek(currentContentTime);
window.mediaElement.play();
}
function onAllAdsCompleted() {
if (adsManager) {
adsManager.destroy();
}
}
|
16461bd1c860c87db48063173db0d3277ba67e1a
|
[
"JavaScript"
] | 1
|
JavaScript
|
yingxuel/hosting
|
09a1126eab6fa0f0f3683a8f1b879f0512fe4778
|
9104007efafa1288991dce4a09bc61729bf7f6eb
|
refs/heads/master
|
<repo_name>team-dev-java/thichlamua<file_sep>/thichlamua/src/main/java/vn/thichlamua/fashion/controller/HomeController.java
package vn.thichlamua.fashion.controller;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
@Controller
public class HomeController {
@RequestMapping("/{shopName}")
public String getViewByCustomer(@PathVariable("shopName") String shopName){
return "user/fashion/kuteshop/kuteshop";
}
}
|
94686af6a94e425abadc470459a32afe20ac8976
|
[
"Java"
] | 1
|
Java
|
team-dev-java/thichlamua
|
0155f356a37b8c0569cce7de9f584f9342cf16cb
|
73e847699ae23f2594cc1c12d279b492ec944882
|
refs/heads/master
|
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Overloading04
{
public class Numb
{
public int FindMinimum(int number1, int number2)
{
int min;
if (number1 < number2)
min = number1;
else
min = number2;
return min;
}
public int FindMinimum(int number1, int number2, float number3)
{
int intnumber3 = (int)Math.Ceiling(number3);
int min;
if (number1 < number2 && number1 < number3)
min = number1;
else if (number2 < number1 && number2 < number3)
min = number2;
else
min = intnumber3;
return min;
}
public int FindMaximum(int number1, int number2)
{
int max;
if (number1 > number2)
max = number1;
else
max = number2;
return max;
}
public int FindMaximum(int number1, int number2, float number3)
{
int intnumber3 = (int)Math.Ceiling(number3);
int max;
if (number1 > number2 && number1 > number3)
max = number1;
else if (number2 > number1 && number2 > number3)
max = number2;
else
max = intnumber3;
return max;
}
}
}
|
1d06a04d96c3eb438119cf74e27baf35c737092b
|
[
"C#"
] | 1
|
C#
|
ngarvan/Overloading04
|
82a6e99851bc22bee673a31e5160250cd19e6852
|
78feab259795a5aae569cb852a98500bed8271c3
|
refs/heads/master
|
<file_sep>from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.utils.datastructures import MultiValueDictKeyError
from polls.models import Question, Choice
def index(request):
questions = Question.objects.all()
context = {
'questions': questions,
}
return render(request, 'polls/index.html', context)
def question_detail(request, pk):
context = {
'question': Question.objects.get(pk=pk)
}
return render(request, 'polls/question.html', context)
def vote(request, pk):
if request.method == 'POST':
# 누군가 form의 action에 있는 question_pk 값을 변형해서 보냈을 경우 발생 가능
try:
question = Question.objects.get(pk=pk)
except Question.DoesNotExist:
return redirect('index')
try:
# 누군가 input의 value에 있는 choice_pk 값을 변형해서 보냈을 경우
# Choice.DoesNotExist발생 가능
choice_pk = request.POST['choice_pk']
choice = Choice.objects.get(pk=choice_pk)
choice.votes += 1
choice.save()
except MultiValueDictKeyError:
pass
except Choice.DoesNotExist:
pass
finally:
return redirect('question_detail', pk=question.pk)
return HttpResponse('Permission denied', status=403)
|
009226a51fe66b5cdfc88bf4163b4137ca7dfce6
|
[
"Python"
] | 1
|
Python
|
klee2017/django-tutorial-project
|
6e5482ee99acae9ceb6d182385faecee4ec5c1a8
|
d3ddc650caf7d11bd8393fdbe1478d7a1879149d
|
refs/heads/master
|
<repo_name>QFann/NettyTest<file_sep>/src/main/java/com/qf/netty/MessageRequestPacket.java
package com.qf.netty;
import lombok.Data;
import static com.qf.netty.Command.MESSAGE_REQUEST;
/**
* 消息发送数据包
* Created by qifan on 2018/9/28.
*/
@Data
public class MessageRequestPacket extends Packet {
private String message;
public MessageRequestPacket(String message) {
this.message = message;
}
@Override
public Byte getCommand() {
return MESSAGE_REQUEST;
}
}
<file_sep>/src/main/java/com/qf/chain/ChainHandler.java
package com.qf.chain;
/**
* Created by qifan on 2018/8/15.
*/
public abstract class ChainHandler {
public void execute(Chain chain){
handleProcess();
chain.proceed();
}
protected abstract void handleProcess();
}
<file_sep>/src/main/java/com/qf/ImageTest.java
package com.qf;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
/**
* Created by qifan on 2018/8/20.
*/
public class ImageTest {
JFrame frame;
public static void main(String[] args){
ImageTest test = new ImageTest();
test.go();
}
public void go(){
frame = new JFrame();
frame.setVisible(true);
JButton button = new JButton("should I do it?");
button.addActionListener(new AngelListener());
button.addActionListener(new DeviListener());
frame.getContentPane().add(BorderLayout.CENTER,button);
}
class AngelListener implements ActionListener {
@Override
public void actionPerformed(ActionEvent e) {
System.out.println("Don't do it,you might regret id!");
}
}
class DeviListener implements ActionListener{
@Override
public void actionPerformed(ActionEvent e) {
System.out.println("Come on,do it!");
}
}
}
<file_sep>/src/main/java/com/qf/dynamic/Client.java
package com.qf.dynamic;
import com.qf.pattern.RealSubject;
import com.qf.pattern.Subject;
import java.lang.reflect.Proxy;
/**
* Created by qifan on 2018/8/14.
*/
public class Client {
public static void main (String[] args){
System.getProperties().put("sun.misc.ProxyGenerator.saveGeneratedFiles","true");
Subject subject =(Subject) Proxy.newProxyInstance(Client.class.getClassLoader(),new Class[]{Subject.class},new JdkProxySubject(new RealSubject()));
subject.request();
}
}
<file_sep>/README.md
# NettyTest
简单的客户端服务端通信
最近在看Netty 。
实现了简单的客户端服务端的编码解码通信过程。
对里面的代码基本上每一段都做了注释。应该算是比较易于理解的.
里面还有以前看aop的一些测试用例
还有一个网上摘下来的计算器实现。这个实现使用了栈帧的方式,感觉很巧妙,可以看看。
<file_sep>/src/main/java/com/qf/chain/Client.java
package com.qf.chain;
/**
* Created by qifan on 2018/8/15.
*/
public class Client {
static class HandlerA extends Handker{
@Override
protected void handleProcess() {
System.out.println("handler by a");
}
}
static class HandlerB extends Handker{
@Override
protected void handleProcess() {
System.out.println("handler by b");
}
}
static class HandlerC extends Handker{
@Override
protected void handleProcess() {
System.out.println("handler by c");
}
}
public static void main(String[] args){
Handker handkerA = new HandlerA();
Handker handkerB = new HandlerB();
Handker handkerC = new HandlerC();
handkerA.setSucessor(handkerB);
handkerB.setSucessor(handkerC);
handkerA.execute();
}
}
<file_sep>/src/main/java/com/qf/netty/NettyServer.java
package com.qf.netty;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.GenericFutureListener;
/**
* Created by qifan on 2018/9/26.
*/
public class NettyServer {
public static void main(String[] args){
// 创建服务端引导类
ServerBootstrap serverBootstrap = new ServerBootstrap();
// 创建两大线程组,boss 负责监听端口 worker负责 处理每一条连接的读写
NioEventLoopGroup boss = new NioEventLoopGroup();
NioEventLoopGroup worker = new NioEventLoopGroup();
serverBootstrap
// 指定线程模型
.group(boss,worker)
// 指定IO 类型
.channel(NioServerSocketChannel.class)
// 每条连接的读写
.childHandler(new ChannelInitializer<NioSocketChannel>() {
@Override
protected void initChannel(NioSocketChannel ch) throws Exception {
// addLast(处理逻辑类)
// ch.pipeline().addLast(new ServerHandler());
ch.pipeline().addLast(new PacketDecoder());
ch.pipeline().addLast(new LoginRequestHandler());
ch.pipeline().addLast(new MessageRequestHandler());
ch.pipeline().addLast(new PacketEncoder());
}
});
// 绑定监听端口
bind(serverBootstrap,8000);
}
/**
* 监听端口
* @param serverBootstrap 服务器类
* @param port 端口
*/
public static void bind(final ServerBootstrap serverBootstrap,final int port){
serverBootstrap.bind(port).addListener(new GenericFutureListener<Future<? super Void>>() {
@Override
public void operationComplete(Future<? super Void> future) throws Exception {
if(future.isSuccess()){
System.out.println("端口["+port+"]绑定成功!");
}else {
//连接失败 ,端口+1 重试
System.out.println("端口["+ port + "]绑定失败!");
bind(serverBootstrap,port+1);
}
}
});
}
}
<file_sep>/src/main/java/com/qf/netty/Attributes.java
package com.qf.netty;
import io.netty.util.AttributeKey;
/**
* Created by qifan on 2018/9/28.
*/
public interface Attributes {
// 创建一个name为login的常量。
AttributeKey<Boolean> LOGIN = AttributeKey.newInstance("login");
}
|
40c1dcf8f3686ea5bd07042f55a7e4ab2314fd5e
|
[
"Markdown",
"Java"
] | 8
|
Java
|
QFann/NettyTest
|
54b06a547ca1ce766552a7c3c652ce8e3257d573
|
059b4b943eb5fbe6389841158e354cf0dde3c2c9
|
refs/heads/master
|
<repo_name>athorsen/wiimote-button-presser<file_sep>/README.md
# wiimote-button-presser
This is a project I did to allow my son to keep playing <NAME> (his favorite game) when he broke his arm.

If you want to learn more about the project, you can get the full details, parts list, and build instructions [on my website](https://www.devmashup.com/wiimote-button-presser/).
<file_sep>/wiimote_button_presser/wiimote_button_presser.ino
#include <Servo.h>
const int _buttonPin = 11;
const int _servoPin = 10;
Servo _wiimoteServo;
void setup() {
pinMode(_buttonPin, INPUT);
_wiimoteServo.attach(_servoPin);
}
void loop() {
int buttonState = digitalRead(_buttonPin);
if (buttonState == HIGH) {
// button is pressed - move servo to press Wii button
_wiimoteServo.write(20);
} else {
// button is released - move servo to not press Wii button
_wiimoteServo.write(30);
}
delay(50);
}
|
a7ba1c901c240cd08d60adb5cd4bc1762fad10e1
|
[
"Markdown",
"C++"
] | 2
|
Markdown
|
athorsen/wiimote-button-presser
|
7987db771adf58d2f6009af03773012ad84e09d7
|
674e8906fecb14ac1c3dc057848f69ade5d30f49
|
refs/heads/master
|
<file_sep>My implementation of https://adventofcode.com/
<file_sep>// https://adventofcode.com/2019/day/1
/*
--- Day 1: The Tyranny of the Rocket Equation ---
Santa has become stranded at the edge of the Solar System while delivering presents to other planets! To accurately calculate his position in space, safely align his warp drive, and return to Earth in time to save Christmas, he needs you to bring him measurements from fifty stars.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
The Elves quickly load you into a spacecraft and prepare to launch.
At the first Go / No Go poll, every Elf is Go until the Fuel Counter-Upper. They haven't determined the amount of fuel required yet.
Fuel required to launch a given module is based on its mass. Specifically, to find the fuel required for a module, take its mass, divide by three, round down, and subtract 2.
For example:
For a mass of 12, divide by 3 and round down to get 4, then subtract 2 to get 2.
For a mass of 14, dividing by 3 and rounding down still yields 4, so the fuel required is also 2.
For a mass of 1969, the fuel required is 654.
For a mass of 100756, the fuel required is 33583.
The Fuel Counter-Upper needs to know the total fuel requirement. To find it, individually calculate the fuel needed for the mass of each module (your puzzle input), then add together all the fuel values.
What is the sum of the fuel requirements for all of the modules on your spacecraft?
*/
var assert = require("assert");
const input2 = [12, 14];
const input = [
93912,
138996,
112824,
110011,
139024,
132292,
74029,
81664,
138077,
109614,
121056,
136338,
132771,
86611,
131526,
123101,
61315,
93900,
62070,
97957,
67168,
119464,
119066,
111076,
56856,
144203,
109400,
120187,
57915,
143353,
71308,
67695,
141275,
106552,
136209,
86990,
98969,
57207,
99103,
71940,
63145,
91765,
121095,
139700,
128851,
77138,
66712,
91318,
96924,
132235,
99897,
67479,
87996,
121100,
55411,
61715,
130658,
121030,
141445,
83939,
90402,
121107,
59618,
120112,
58140,
103514,
90538,
55552,
142739,
61770,
147374,
80038,
128830,
93328,
52369,
71801,
144536,
147140,
118213,
128056,
92155,
114384,
89234,
124451,
94214,
79174,
108427,
111041,
96715,
128414,
62521,
93897,
107428,
90637,
126176,
78676,
69504,
93663,
80869,
124230
];
// input.forEach();
function getFuel(mass) {
return Math.floor(mass / 3) - 2;
}
assert(getFuel(12) === 2);
assert(getFuel(14) === 2);
assert(getFuel(1969) === 654);
assert(getFuel(100756) === 33583);
const output = input.reduce(
(previousValue, currentValue) => previousValue + getFuel(currentValue),
0
);
console.log("Output Part 1: ", output);
/*
--- Part Two ---
During the second Go / No Go poll, the Elf in charge of the Rocket Equation Double-Checker stops the launch sequence. Apparently, you forgot to include additional fuel for the fuel you just added.
Fuel itself requires fuel just like a module - take its mass, divide by three, round down, and subtract 2. However, that fuel also requires fuel, and that fuel requires fuel, and so on. Any mass that would require negative fuel should instead be treated as if it requires zero fuel; the remaining mass, if any, is instead handled by wishing really hard, which has no mass and is outside the scope of this calculation.
So, for each module mass, calculate its fuel and add it to the total. Then, treat the fuel amount you just calculated as the input mass and repeat the process, continuing until a fuel requirement is zero or negative. For example:
A module of mass 14 requires 2 fuel. This fuel requires no further fuel (2 divided by 3 and rounded down is 0, which would call for a negative fuel), so the total fuel required is still just 2.
At first, a module of mass 1969 requires 654 fuel. Then, this fuel requires 216 more fuel (654 / 3 - 2). 216 then requires 70 more fuel, which requires 21 fuel, which requires 5 fuel, which requires no further fuel. So, the total fuel required for a module of mass 1969 is 654 + 216 + 70 + 21 + 5 = 966.
The fuel required by a module of mass 100756 and its fuel is: 33583 + 11192 + 3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346.
What is the sum of the fuel requirements for all of the modules on your spacecraft when also taking into account the mass of the added fuel? (Calculate the fuel requirements for each module separately, then add them all up at the end.)
*/
function getFuelRecursive(mass) {
let fuel = getFuel(mass);
if (fuel > 0) {
return fuel + getFuelRecursive(fuel);
} else {
return 0;
}
}
assert(getFuelRecursive(14) === 2);
assert(getFuelRecursive(1969) === 966);
assert(getFuelRecursive(100756) === 50346);
const output2 = input.reduce(
(previousValue, currentValue) =>
previousValue + getFuelRecursive(currentValue),
0
);
console.log("Output Part 2: ", output2);
|
226825f21706b4dc358a46bd7aa0da788656c184
|
[
"Markdown",
"JavaScript"
] | 2
|
Markdown
|
sdg9/adventofcode-old
|
605c0be8e81ae67f6693e4fb44484567890cb5bf
|
6a73ce934e7fab8e41e1bbd141f81bceaa590786
|
refs/heads/master
|
<file_sep>package com.jntele.troy.jntelelte;
import android.app.AlertDialog;
import android.app.Dialog;
import android.app.DialogFragment;
import android.app.FragmentManager;
import android.content.DialogInterface;
import android.os.Bundle;
/**
* Created by lenovo on 2018/5/27.
*/
public class InfoDialogFragment extends DialogFragment {
private DialogInterface.OnClickListener infoCallback;
private String title;
private String message;
private String hint;
public void show(String title, String message, String hint, DialogInterface.OnClickListener infoCallback,
FragmentManager fragmentManager) {
this.title = title;
this.message = message;
this.hint = hint;
this.infoCallback = infoCallback;
show(fragmentManager, "InfoDialogFragment");
}
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
AlertDialog.Builder builder = new AlertDialog.Builder(getActivity());
builder.setTitle(title);
builder.setMessage(message);
builder.setNegativeButton(hint, infoCallback);
return builder.create();
}
}
<file_sep>package com.jntele.troy.jntelelte;
/**
* LTE信息BEAN
*/
public class CellData {
private String cell_id="";
private String cell_name="";
private String bbu_name="";
private String producer="";
private String rru_type ="";
private String system_type="";
private String station_name="";
private String county="";
private String source="";
public String getCellId(){return cell_id;}
public String getCellName(){return cell_name;}
public String getBBUName(){return bbu_name;}
public String getProducer(){return producer;}
public String getRRUType (){return rru_type ;}
public String getSystemType(){return system_type;}
public String getStationName(){return station_name;}
public String getCounty(){return county;}
public String getSource(){return source;}
public void setCellId(String info){cell_id=info;}
public void setCellId(int info){cell_id=""+info;}
public void setCellName(String info){cell_name=info;}
public void setBBUName(String info){bbu_name=info;}
public void setProducer(String info){producer=info;}
public void setRRUType (String info){rru_type =info;}
public void setSystemType(String info){system_type=info;}
public void setStationName(String info){station_name=info;}
public void setCounty(String info){county=info;}
public void setSource(String info){source=info;}
}
<file_sep>package com.jntele.troy.jntelelte;
import android.content.Context;
import android.graphics.Color;
import android.location.Location;
import android.os.Build;
import android.support.annotation.Nullable;
import android.telephony.CellIdentityCdma;
import android.telephony.CellIdentityLte;
import android.telephony.SignalStrength;
import android.text.Spannable;
import android.text.SpannableStringBuilder;
import android.text.style.ForegroundColorSpan;
import android.util.AttributeSet;
import android.view.LayoutInflater;
import android.view.View;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.jntele.troy.jntelelte.CellData;
import java.lang.reflect.InvocationTargetException;
import java.util.Locale;
import java.util.Objects;
import static java.lang.Math.abs;
/**
* Created by lenovo on 2018/5/26.
*/
public class InfoView extends LinearLayout {
// 手机基础信息界面元素
private TextView changjiaView;
private TextView xinghaoView;
private TextView systemView;
public TextView networkView;
public TextView locationtypeView;
public TextView locationView;
public TextView imei1View;
public TextView imei2View;
public TextView iesi1View;
public TextView iesi2View;
// LTE网络信息界面元素
private TextView enbView;
private TextView cellIdView;
private TextView pciView;
private TextView tacView;
private TextView ciView;
private TextView rsrpView;
private TextView rsrqView;
private TextView sinrView;
private TextView freqView;
// LTE基站信息界面元素
private TextView bbuNameView;
private TextView rruNameView;
private TextView stationNameView;
private TextView xitongView;
private TextView producerView;
private TextView rruTypeView;
// CDMA网络信息界面元素
private TextView nidView;
private TextView cidView;
private TextView sidView;
private TextView cdmaEcioView;
private TextView cdmaDbmView;
private TextView evdoEcioView;
private TextView evdoDbmView;
private TextView evdoSnrView;
private TextView bidView;
//
private LinearLayout ltenetInfo;
private LinearLayout ltestationInfo;
private LinearLayout cdmanetInfo;
public InfoView(Context context, @Nullable AttributeSet attrs) {
super(context, attrs);
LayoutInflater.from(context).inflate(R.layout.info_layout, this);
// 初始化手机基础信息界面
changjiaView = (TextView) findViewById(R.id.changjia);
xinghaoView = (TextView) findViewById(R.id.xinghao);
systemView = (TextView) findViewById(R.id.system);
networkView = (TextView) findViewById(R.id.network);
locationtypeView = (TextView) findViewById(R.id.locationtype);
locationView = (TextView) findViewById(R.id.location);
imei1View = (TextView) findViewById(R.id.imei1);
imei2View = (TextView)findViewById(R.id.imei2);
iesi1View = (TextView) findViewById(R.id.iesi1);
iesi2View = (TextView)findViewById(R.id.iesi2);
// 初始化LTE网络信息界面
enbView = (TextView) findViewById(R.id.enodeb);
cellIdView = (TextView) findViewById(R.id.cellid);
ciView = (TextView) findViewById(R.id.ci);
tacView = (TextView) findViewById(R.id.tac);
pciView = (TextView) findViewById(R.id.pci);
rsrpView = (TextView) findViewById(R.id.rsrp);
rsrqView = (TextView) findViewById(R.id.rsrq);
sinrView = (TextView) findViewById(R.id.sinr);
freqView = (TextView) findViewById(R.id.freq);
// 初始化基站信息界面
bbuNameView = (TextView) findViewById(R.id.bbuname);
rruNameView = (TextView) findViewById(R.id.cellname);
stationNameView = (TextView) findViewById(R.id.stationname);
xitongView = (TextView) findViewById(R.id.xitong);
producerView = (TextView) findViewById(R.id.producer);
rruTypeView = (TextView) findViewById(R.id.rrutype);
// 初始化CDMA网络信息界面
nidView = (TextView) findViewById(R.id.nid);
sidView = (TextView) findViewById(R.id.sid);
cidView = (TextView) findViewById(R.id.cid);
bidView = (TextView) findViewById(R.id.bid);
cdmaDbmView = (TextView) findViewById(R.id.cdmadbm);
cdmaEcioView = (TextView) findViewById(R.id.cdmaecio);
evdoDbmView = (TextView) findViewById(R.id.evdodbm);
evdoEcioView = (TextView) findViewById(R.id.evdoecio);
evdoSnrView = (TextView) findViewById(R.id.evdosnr);
//
ltenetInfo = (LinearLayout) findViewById(R.id.ltenetinfo);
ltestationInfo = (LinearLayout) findViewById(R.id.ltestationinfo);
cdmanetInfo = (LinearLayout) findViewById(R.id.cdmanetinfo);
setInfo(changjiaView, "厂家:", android.os.Build.BRAND);
setInfo(xinghaoView, "型号:", android.os.Build.MODEL);
setInfo(systemView, "系统:", String.format("Android %s", android.os.Build.VERSION.RELEASE));
}
public void setNetwork(String network,String operator){
setInfo(networkView, "数据:", String.format("%s(%s)", network, operator));
}
public void setPhoneID(String imei,String imsi,int num){
if(num==0) {
setInfo(imei1View, "IMEI:", imei);
setInfo(iesi1View, "IMSI:", imsi);
imei2View.setVisibility(View.GONE);
iesi2View.setVisibility(View.GONE);
}else if(num==1){
setInfo(imei1View, "IMEI1:", imei);
setInfo(iesi1View, "IMSI1:", imsi);
imei2View.setVisibility(View.VISIBLE);
iesi2View.setVisibility(View.VISIBLE);
}else{
setInfo(imei2View, "IMEI2:", imei);
setInfo(iesi2View, "IMSI2:", imsi);
}
}
public void setLocationType(String locationType){
setInfo(locationtypeView,"定位:",locationType);
}
public void setLocation(Location location){
setInfo(locationView,"",String.format(Locale.getDefault(),"(%.5f,%.5f)",location.getLongitude(),location.getLatitude()));
}
public void setLteNetInfo(CellIdentityLte cellIdentity){
int ci = cellIdentity.getCi();
int enb = ci / 256;
setInfo(enbView,"eNB ","" + enb);
setInfo(ciView, "CI ", "" + ci);
setInfo(tacView, "TAC ", "" + cellIdentity.getTac());
setInfo(pciView, "PCI ", "" + cellIdentity.getPci());
setInfo(cellIdView, "CellID ", "" + (ci - enb * 256));
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
setInfo(freqView,"频段 ","" + cellIdentity.getEarfcn());
}
}
public void setLteSignalInfo(SignalStrength signalStrength) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
String tmpInfo;
int tmp = (int) signalStrength.getClass().getMethod("getLteRsrp").invoke(signalStrength);
if ((tmp <= -120) || (tmp >= -1))
tmpInfo = "";
else
tmpInfo = "" + tmp;
setInfo(rsrpView, "RSRP ", tmpInfo);
tmp = (int) signalStrength.getClass().getMethod("getLteRsrq").invoke(signalStrength);
if ((tmp <= -120) || (tmp >= -1))
tmpInfo = "";
else
tmpInfo = "" + tmp;
setInfo(rsrqView, "RSRQ ", tmpInfo);
tmp = (int) signalStrength.getClass().getMethod("getLteRssnr").invoke(signalStrength);
if(abs(tmp)>300)
tmpInfo = "";
else
tmpInfo = String.format(Locale.getDefault(),"%.1f",0.1*tmp);
setInfo(sinrView, "RSSNR ",tmpInfo);
}
public void setLteStationInfo(CellData cd){
if(Objects.equals(cd.getBBUName(), ""))
unshowLteStationView();
else {
showLteStationView();
setInfo(bbuNameView, "BBU:", cd.getBBUName());
setInfo(rruNameView, "RRU:", cd.getCellName());
setInfo(stationNameView, "站点名:", cd.getStationName());
setInfo(xitongView, "系统:", cd.getSystemType());
setInfo(producerView, "厂家:", cd.getProducer());
setInfo(rruTypeView, "RRU型号:", cd.getRRUType());
}
}
public void setCdmaNetInfo(CellIdentityCdma cellIdentity){
setInfo(nidView, "NID ", "" + cellIdentity.getNetworkId());
setInfo(sidView, "SID ", "" + cellIdentity.getSystemId());
int cid = cellIdentity.getBasestationId();
setInfo(cidView, "CID ", "" + cid);
int x = cid / (16 * 16);
int y = x / 16;
int z = cid - x * 16 * 16 + y * 16 * 16;
setInfo(bidView, "BID ", "" + z);
}
public void setCdmaSignalInfo(SignalStrength signalStrength) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException{
String tmpInfo;
int tmp = (int) signalStrength.getClass().getMethod("getCdmaDbm").invoke(signalStrength);
if ((tmp <= -120) || (tmp >= -1))
tmpInfo = "";
else
tmpInfo = "" + tmp;
setInfo(cdmaDbmView, "1XRx", tmpInfo);
tmp = (int) signalStrength.getClass().getMethod("getCdmaEcio").invoke(signalStrength);
if ((tmp <= -120) || (tmp >= -1))
tmpInfo = "";
else
tmpInfo = "" + 0.1 * tmp;
setInfo(cdmaEcioView, "1XEcio", tmpInfo);
tmp = (int) signalStrength.getClass().getMethod("getEvdoDbm").invoke(signalStrength);
if ((tmp <= -120) || (tmp >= -1))
tmpInfo = "";
else
tmpInfo = "" + tmp;
setInfo(evdoDbmView, "DoRx", tmpInfo);
tmp = (int) signalStrength.getClass().getMethod("getEvdoEcio").invoke(signalStrength);
if ((tmp <= -120) || (tmp >= -1))
tmpInfo = "";
else
tmpInfo = "" + 0.1 * tmp;
setInfo(evdoEcioView, "DoEcio", tmpInfo);
tmp = (int) signalStrength.getClass().getMethod("getEvdoSnr").invoke(signalStrength);
if ((tmp == -1) || (tmp == 255))
tmpInfo = "";
else
tmpInfo = "" + tmp;
setInfo(evdoSnrView, "SNR ", tmpInfo);
}
protected void setInfo(TextView view, String name, String info){
if((name==null)||(name==""))
view.setText(info);
else {
if ((info == null) || (info == ""))
info = " ";
SpannableStringBuilder infos = new SpannableStringBuilder(String.format("%s%s", name, info));
infos.setSpan(new ForegroundColorSpan(Color.parseColor("#F8DC10")), 0, name.length(), Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
view.setText(infos);
}
}
public void showLteNetView(){
ltenetInfo.setVisibility(View.VISIBLE);
}
public void unshowLteNetView(){
ltenetInfo.setVisibility(View.GONE);
}
public void showLteStationView(){
ltestationInfo.setVisibility(View.VISIBLE);
}
public void unshowLteStationView(){
ltestationInfo.setVisibility(View.GONE);
}
public void showCdmaNetView(){
cdmanetInfo.setVisibility(View.VISIBLE);
}
public void unshowCdmaNetView(){
cdmanetInfo.setVisibility(View.GONE);
}
}
<file_sep>package com.jntele.troy.jntelelte;
import android.content.ContentValues;
import android.content.Context;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteException;
import android.os.Build;
import android.util.Log;
//import android.util.Log;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* 数据库操作函数
* Created by lenovo on 2018/5/19.
*/
class DataBaseUtil {
private Context context;
private String DB_NAME = "jntele.db";// 数据库的名字
private String DATABASE_PATH;// 数据库在手机里的路径
private SQLiteDatabase db;
public DataBaseUtil(Context context) {
this.context = context;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
Log.d("TroyInfoDB", context.getDataDir().getPath());
DATABASE_PATH = context.getDataDir().getPath();
}else{
DATABASE_PATH = "/data/data/" + context.getPackageName() + "/databases/";
}
}
/**
* 判断数据库是否存在
*
* @return false or true
*/
public boolean checkDataBase() {
SQLiteDatabase db;
try {
String databaseFilename = DATABASE_PATH + DB_NAME;
db = SQLiteDatabase.openDatabase(databaseFilename, null, SQLiteDatabase.OPEN_READONLY);
} catch (SQLiteException e) {
return false;
}
if (db != null) {
db.close();
return true;
}else{
return false;
}
}
/**
* 复制数据库到手机指定文件夹下
*
* @throws IOException
*/
public void copyDataBase() throws IOException {
String databaseFilenames = DATABASE_PATH + DB_NAME;
File dir = new File(DATABASE_PATH);
if (!dir.exists())// 判断文件夹是否存在,不存在就新建一个
dir.mkdir();
FileOutputStream os = new FileOutputStream(databaseFilenames);// 得到数据库文件的写入流
InputStream is = context.getResources().openRawResource(R.raw.jntele);
byte[] buffer = new byte[8192];
int count;
while ((count = is.read(buffer)) > 0) {
os.write(buffer, 0, count);
os.flush();
}
is.close();
os.close();
}
public CellData getCellInfo(String ci)
{
String TB_NAME = "jntele";//表格的名字
ContentValues value = new ContentValues();
CellData cd = new CellData();
openDatabase();
Cursor cursor = db.query(TB_NAME, null, "cell_id=?", new String[] { ci }, null, null, null);
while (cursor.moveToNext()) {
// cd.setCellId(cursor.getString(cursor.getColumnIndex("cell_id")));
cd.setCellName(cursor.getString(cursor.getColumnIndex("cell_name")));
cd.setBBUName(cursor.getString(cursor.getColumnIndex("bbu_name")));
switch (cursor.getString(cursor.getColumnIndex("producer")))
{
case "N":
cd.setProducer("诺基亚");
break;
case "H":
cd.setProducer("华为");
break;
default:
cd.setProducer("未知");
}
;
cd.setRRUType (cursor.getString(cursor.getColumnIndex("rru_type")));
cd.setSystemType((cursor.getString(cursor.getColumnIndex("system_type"))).indexOf('I')!=-1?"室分":"室外");
cd.setStationName(cursor.getString(cursor.getColumnIndex("station_name")));
// cd.setCounty(cursor.getString(cursor.getColumnIndex("county")));
// cd.setSource(cursor.getString(cursor.getColumnIndex("source")));
}
cursor.close();
// closeDatabase();
return cd;
}
private void openDatabase() {
if (db == null) {
db = SQLiteDatabase.openOrCreateDatabase(DATABASE_PATH + "/" + DB_NAME, null);
}
}
private void closeDatabase() {
if (db != null) {
db.close();
}
}
}
|
151d5e2d6952fddeda68611c95b42dbcf6dc924a
|
[
"Java"
] | 4
|
Java
|
troyishere/jntelelte
|
8be61b74e91830386365a4e1e955643da912a226
|
ac075d21b7fd18e29bf00a8a306d6b0ca4dbfcfb
|
refs/heads/main
|
<file_sep># sample_site_vite
Sample Site with Vite
<file_sep>using System;
using System.IO;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Azure.WebJobs;
using Microsoft.Azure.WebJobs.Extensions.Http;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging;
using Newtonsoft.Json;
namespace Griffin
{
public static class GetCurrentDateTime
{
[FunctionName("GetCurrentDateTime")]
public static async Task<IActionResult> Run(
[HttpTrigger(AuthorizationLevel.Anonymous, "get", "post", Route = null)] HttpRequest req,
ILogger log)
{
string currentTime = DateTime.Now.ToString();
return new OkObjectResult(currentTime);
}
}
}
|
c397cd48e8ea246187e1e98a7367026c7c3d66bb
|
[
"Markdown",
"C#"
] | 2
|
Markdown
|
1kevgriff/sample_site_vite
|
80d43dcd637472c5fbee5c6f20bbf8e8623cc882
|
510e2e95390f247564bc72b8dc2d03207c9d4d7e
|
refs/heads/master
|
<file_sep>from django.urls import path
from . import views
urlpatterns = [
path('logout/', views.try_logout, name='logout'),
path('login/', views.try_login, name='login'),
path('check/', views.try_check, name='check'),
]
<file_sep>from django.urls import path
from . import views
urlpatterns = [
path('__register/', views.try_register, name='register'),
path('__upload/', views.try_upload, name='upload'),
path('__save/', views.try_save, name='save'),
]
<file_sep>from packs.hashing import GenerateDataHash
from datetime import date
from django.conf import settings
from django.utils.text import slugify
from django.db.utils import IntegrityError
from django.db import models
from django.contrib.auth import get_user_model
UserModel = get_user_model()
class Article(models.Model):
title = models.CharField(max_length=150)
slug = models.SlugField(unique=True, db_index=True, blank=True)
author = models.CharField(max_length=64, null=True, blank=True)
owner = models.ForeignKey(UserModel, null=True, blank=True, on_delete=models.CASCADE)
owner_hash = models.CharField(max_length=32, null=True, blank=True)
text = models.TextField()
date = models.DateField(default=date.today)
def __str__(self) -> str:
return self.slug
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(''.join(eval(settings.ALPHABET).get(w, w) for w in self.title.lower())) + \
self.date.strftime('-%m-%d')
try:
super(type(self), self).save(*args, **kwargs)
except IntegrityError:
exists_slug = []
articles = type(self).objects.all()
[exists_slug.append(article.slug) if self.slug in article.slug else None for article in articles]
if len(exists_slug) != 1:
number = [int(exist_slug.split('-')[-1]) for exist_slug in exists_slug][-1] + 1
self.slug += f'-{number}'
else:
self.slug += '-2'
super(type(self), self).save(*args, **kwargs)
class Storage(models.Model):
hash = models.CharField(max_length=255, unique=True, db_index=True, null=True, blank=True)
use_hash = models.BooleanField(default=True)
file = models.FileField(unique=True, db_index=True)
date = models.DateField(default=date.today)
def __str__(self) -> str: return str(self.file)
def save(self, *args, **kwargs):
if self.use_hash:
self.hash = GenerateDataHash(kwargs.get('bytes'), type(self))
if type(self.hash) is bytes:
return self.hash.decode()
self.file.name = f"{self.hash[:16]}.{kwargs.get('type')}"
super(type(self), self).save()
class Meta:
verbose_name = "Storage object"
verbose_name_plural = "Storage"
<file_sep>from django.conf import settings
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('auth/', include('ext_auth.urls')),
path('admin/', admin.site.urls),
path('api/v1/', include('api.urls')),
path('', include('core.urls')),
]
admin.site.site_header = "Flatype Admin Panel"
admin.site.site_title = "Flatype Admin"
admin.site.index_title = "Welcome to Flatype Admin Panel"
handler400 = "core.exceptions.bad_request"
handler403 = "core.exceptions.permission_denied"
handler404 = "core.exceptions.page_not_found"
handler500 = "core.exceptions.server_error"
if settings.DEBUG:
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<file_sep># Generated by Django 3.2.6 on 2021-08-23 22:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0021_storage_use_hash'),
]
operations = [
migrations.AlterField(
model_name='storage',
name='hash',
field=models.CharField(blank=True, db_index=True, max_length=255, null=True, unique=True),
),
]
<file_sep>from django.db import models
from django.contrib.auth import get_user_model
UserModel = get_user_model()
class ExternalHashId(models.Model):
user = models.ForeignKey(UserModel, on_delete=models.CASCADE)
session = models.CharField(max_length=32, unique=True, blank=True, null=True, db_index=True)
class Meta:
verbose_name = "External hash id"
verbose_name_plural = "External hash id's"
<file_sep># Generated by Django 3.2.6 on 2021-08-23 21:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0020_alter_article_slug'),
]
operations = [
migrations.AddField(
model_name='storage',
name='use_hash',
field=models.BooleanField(default=True),
),
]
<file_sep># Generated by Django 3.2.4 on 2021-08-15 14:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0016_storage_date'),
]
operations = [
migrations.AlterField(
model_name='storage',
name='file',
field=models.FileField(db_index=True, unique=True, upload_to=''),
),
]
<file_sep>from .models import ExternalHashId
from django.contrib import admin
from django.contrib.sessions.models import Session
from django.contrib.auth import get_user_model, admin as auth_admin
UserModel = get_user_model()
@admin.register(Session)
class SessionAdmin(admin.ModelAdmin):
def get_session_data(self, obj): return obj.get_decoded()
get_session_data.short_description = 'session data'
list_display = ('session_key', 'get_session_data', 'expire_date',)
class ExternalHashIdInline(admin.TabularInline):
model = ExternalHashId
extra = 0
class UserAdmin(auth_admin.UserAdmin):
inlines = [
ExternalHashIdInline,
]
admin.site.unregister(UserModel)
admin.site.register(UserModel, UserAdmin)
admin.register(ExternalHashId)
<file_sep># Generated by Django 3.2.2 on 2021-05-09 17:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_remove_article_slug'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='data',
new_name='date',
),
]
<file_sep>from .models import Article
from django.shortcuts import get_object_or_404
from django.utils.html import format_html
from django.views.generic import TemplateView
from django.shortcuts import render
class Create(TemplateView):
template_name = 'create.html'
class View(TemplateView):
def get(self, request, *args, **kwargs):
article = get_object_or_404(Article, **kwargs)
return render(
request,
'view.html',
{
'title': article.title,
'author': article.author if article.author is not None else '',
'date': article.date.strftime('%B %d, %Y'),
'content': format_html(article.text),
}
)
<file_sep>from django.contrib import admin
from .models import Article, Storage
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
fields = ('title', 'slug', 'author', 'owner', 'owner_hash', 'text', 'date',)
list_display = ('title', 'slug', 'author', 'date',)
search_fields = ('title', 'text',)
date_hierarchy = 'date'
def get_readonly_fields(self, request, obj=None) -> tuple:
if obj:
return self.readonly_fields + ('slug', 'owner', 'date',)
return self.readonly_fields
@admin.register(Storage)
class StorageAdmin(admin.ModelAdmin):
fields = ('hash', 'file', 'date', 'use_hash',)
list_display = ('file', 'hash', 'date',)
list_display_links = ('date',)
search_fields = ('file', 'hash', 'date',)
date_hierarchy = 'date'
def get_readonly_fields(self, request, obj=None) -> tuple:
if obj:
return self.readonly_fields + ('hash', 'file', 'date', 'use_hash',)
return self.readonly_fields
<file_sep>!(function (t, e, r) {
var n = function (t) {
var n = { text: "", start: 0, end: 0 };
if (!t.value) return n;
try {
if (e.getSelection)
(n.start = t.selectionStart),
(n.end = t.selectionEnd),
(n.text = t.value.slice(n.start, n.end));
else if (r.selection) {
t.focus();
var s = r.selection.createRange(),
a = r.body.createTextRange();
n.text = s.text;
try {
a.moveToElementText(t), a.setEndPoint("StartToStart", s);
} catch (c) {
(a = t.createTextRange()), a.setEndPoint("StartToStart", s);
}
(n.start = t.value.length - a.text.length),
(n.end = n.start + s.text.length);
}
} catch (c) {}
return n;
},
s = {
getPos: function (t) {
var e = n(t);
return { start: e.start, end: e.end };
},
setPos: function (t, r, n) {
(n = this._caretMode(n)),
"start" === n ? (r.end = r.start) : "end" === n && (r.start = r.end),
t.focus();
try {
if (t.createTextRange) {
var s = t.createTextRange();
e.navigator.userAgent.toLowerCase().indexOf("msie") >= 0 &&
((r.start = t.value.substr(0, r.start).replace(/\r/g, "").length),
(r.end = t.value.substr(0, r.end).replace(/\r/g, "").length)),
s.collapse(!0),
s.moveStart("character", r.start),
s.moveEnd("character", r.end - r.start),
s.select();
} else t.setSelectionRange && t.setSelectionRange(r.start, r.end);
} catch (a) {}
},
getText: function (t) {
return n(t).text;
},
_caretMode: function (t) {
switch (((t = t || "keep"), t === !1 && (t = "end"), t)) {
case "keep":
case "start":
case "end":
break;
default:
t = "keep";
}
return t;
},
replace: function (e, r, s) {
var a = n(e),
c = e.value,
o = t(e).scrollTop(),
i = { start: a.start, end: a.start + r.length };
(e.value = c.substr(0, a.start) + r + c.substr(a.end)),
t(e).scrollTop(o),
this.setPos(e, i, s);
},
insertBefore: function (e, r, s) {
var a = n(e),
c = e.value,
o = t(e).scrollTop(),
i = { start: a.start + r.length, end: a.end + r.length };
(e.value = c.substr(0, a.start) + r + c.substr(a.start)),
t(e).scrollTop(o),
this.setPos(e, i, s);
},
insertAfter: function (e, r, s) {
var a = n(e),
c = e.value,
o = t(e).scrollTop(),
i = { start: a.start, end: a.end };
(e.value = c.substr(0, a.end) + r + c.substr(a.end)),
t(e).scrollTop(o),
this.setPos(e, i, s);
},
};
t.extend({
selection: function (n) {
var s = "text" === (n || "text").toLowerCase();
try {
if (e.getSelection) {
if (s) return e.getSelection().toString();
var a,
c = e.getSelection();
return (
c.getRangeAt
? (a = c.getRangeAt(0))
: ((a = r.createRange()),
a.setStart(c.anchorNode, c.anchorOffset),
a.setEnd(c.focusNode, c.focusOffset)),
t("<div></div>").append(a.cloneContents()).html()
);
}
if (r.selection)
return s
? r.selection.createRange().text
: r.selection.createRange().htmlText;
} catch (o) {}
return "";
},
}),
t.fn.extend({
selection: function (t, e) {
switch (((e = e || {}), t)) {
case "getPos":
return s.getPos(this[0]);
case "setPos":
return this.each(function () {
s.setPos(this, e);
});
case "replace":
return this.each(function () {
s.replace(this, e.text, e.caret);
});
case "insert":
return this.each(function () {
"before" === e.mode
? s.insertBefore(this, e.text, e.caret)
: s.insertAfter(this, e.text, e.caret);
});
case "get":
default:
return s.getText(this[0]);
}
return this;
},
});
})(jQuery, window, window.document);
<file_sep>[Flatype](https://fla.codes/)
=============================
### The blog platform, idea taken from:
<a href="https://telegra.ph/">
<img src="https://telegra.ph/images/logo.png" width="165">
</a>
<a href="https://teletype.in/">
<img src="https://teletype.in/static/images/apple-touch-icon.13a32bb54bdfb592c7f574ad53849ba2.png" width="165">
</a>
<file_sep># Generated by Django 3.2.4 on 2021-08-05 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_alter_article_slug'),
]
operations = [
migrations.AlterField(
model_name='article',
name='author',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='Your name'),
),
]
<file_sep>BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1')
def bool(string: str) -> bool: return string.lower() in BOOLEAN_TRUE_STRINGS
<file_sep>import os
import hashlib
def GenerateRandomHash(model=None) -> str:
hash = hashlib.md5(os.urandom(64)).hexdigest()
if model is not None:
hash_exist = model.objects.filter(owner_hash=hash)
while hash_exist:
hash = hashlib.md5(os.urandom(64)).hexdigest()
hash_exist = model.objects.filter(owner_hash=hash)
continue
return hash
def GenerateDataHash(data=None, model=None) -> str:
hash = hashlib.sha256(data).hexdigest()
if model is not None:
hash_exist = model.objects.filter(hash=hash)
hash = str(hash_exist.first()).encode() if hash_exist else hash
return hash
<file_sep>asgiref
DateTime
Django
gunicorn
python-dotenv
django-jazzmin
pytz
sqlparse
zope.interface
requests
psycopg2-binary<file_sep>from django.contrib.auth.forms import UserCreationForm
from core.models import Article, Storage
from core.forms import ArticleForm, StorageForm
from ext_auth.models import ExternalHashId
from packs.hashing import GenerateRandomHash
from django.views.decorators.http import require_http_methods
from django.contrib.auth import login
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
@csrf_exempt
@require_http_methods(["POST"])
def try_register(request) -> JsonResponse:
form = UserCreationForm(request.POST)
if not form.is_valid():
return JsonResponse(
{
'error': True,
'data': 'Data is not valid'
},
)
user = form.save(commit=False)
user.first_name = form.data.get('first_name', '')
user.last_name = form.data.get('last_name', '')
user.email = form.data.get('email', '')
user.save()
if owner_hash := request.session.get('externalid',):
ExternalHashId.objects.create(user=user, session=owner_hash)
articles = Article.objects.filter(owner_hash=owner_hash)
for article in articles:
article.owner = user
article.save()
login(request, user)
return JsonResponse({
'data': 'ok'
})
@require_http_methods(["POST"])
def try_save(request):
form = ArticleForm(request.POST)
if not form.is_valid():
return JsonResponse(
{
'error': True,
'data': 'Data is not valid'
},
)
slug = form.data.get('save_hash',)
if slug != '':
owner_hash = request.session.get('externalid',)
try:
article = Article.objects.get(slug=slug)
except Article.DoesNotExist:
return JsonResponse(
{
'error': True,
'data': 'Article not found'
},
)
if not (request.user == article.owner or owner_hash == article.owner_hash):
return JsonResponse(
{
'error': True,
'data': 'Forbidden'
},
)
article.title = form.cleaned_data.get('title',)
article.author = form.cleaned_data.get('author',)
article.text = form.cleaned_data.get('text',)
if request.user.is_authenticated and owner_hash and article.owner is None:
article.owner = request.user
article.save()
else:
article = form.save(commit=False)
if request.user.is_authenticated:
article.owner = request.user
else:
if owner_hash := request.session.get('externalid',):
article.owner_hash = owner_hash
else:
article.owner_hash = GenerateRandomHash(Article)
request.session['externalid'] = article.owner_hash
article.save()
return JsonResponse({
'path': article.slug
})
@require_http_methods(["POST"])
def try_upload(request) -> JsonResponse:
form = StorageForm(request.POST, request.FILES)
if not form.is_valid():
return JsonResponse(
{
'error': True,
'data': 'Data is not valid'
},
)
file = request.FILES.get('file',)
instance = Storage(file=file)
object = instance.save(type=file.content_type.split('/')[-1], bytes=file.read())
return JsonResponse(
[
{
'src': f'/media/{instance if object is None else object}'
}
],
safe=False
)
<file_sep>import os
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('POSTGRES_DB'),
'USER': os.environ.get('POSTGRES_USER'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': os.environ.get('HOST'),
'PORT': 5432,
}
}
<file_sep># Generated by Django 3.2.2 on 2021-05-10 14:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_article_slug'),
]
operations = [
migrations.AlterField(
model_name='article',
name='date',
field=models.DateField(default=datetime.date.today),
),
migrations.AlterField(
model_name='article',
name='slug',
field=models.SlugField(blank=True, null=True),
),
]
<file_sep>from django.urls import path
from . import views
urlpatterns = [
path('', views.Create.as_view(), name='create'),
path('<slug:slug>', views.View.as_view(), name='view'),
]
<file_sep># Generated by Django 3.2.6 on 2021-08-15 21:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0018_storage_hash'),
]
operations = [
migrations.AlterField(
model_name='storage',
name='hash',
field=models.CharField(blank=True, db_index=True, max_length=255, unique=True),
),
]
<file_sep>from django.views.decorators.csrf import requires_csrf_token
from django.shortcuts import render
@requires_csrf_token
def bad_request(request, exception):
return render(request, 'exceptions/400.html', status=400)
@requires_csrf_token
def permission_denied(request, exception):
return render(request, 'exceptions/403.html', status=403)
@requires_csrf_token
def page_not_found(request, exception):
return render(request, 'exceptions/404.html', status=404)
@requires_csrf_token
def server_error(request):
return render(request, 'exceptions/500.html', status=500)
<file_sep>{% load i18n static %}<!DOCTYPE html>
<html lang="en">
<head>
<title>{% block title %}{% endblock %}Flatype</title>
<meta name="viewport" content="width=device-width">
<link rel="shortcut icon" href="{% static '/img/logo.png' %}" type="image/png" />
<link rel="stylesheet" href="{% static 'css/core.min.css' %}" />
</head>
<body>
{% block content %}{% endblock %}
</body>
</html>
<file_sep>function _classCallCheck(t, e) {
if (!(t instanceof e))
throw new TypeError("Cannot call a class as a function");
}
function _possibleConstructorReturn(t, e) {
if (!t)
throw new ReferenceError(
"this hasn't been initialised - super() hasn't been called"
);
return !e || ("object" != typeof e && "function" != typeof e) ? t : e;
}
function _inherits(t, e) {
if ("function" != typeof e && null !== e)
throw new TypeError(
"Super expression must either be null or a function, not " + typeof e
);
(t.prototype = Object.create(e && e.prototype, {
constructor: { value: t, enumerable: !1, writable: !0, configurable: !0 },
})),
e &&
(Object.setPrototypeOf ? Object.setPrototypeOf(t, e) : (t.__proto__ = e));
}
function _sanitize(t, e) {
var o = document.createElement("a");
o.href = t;
var l = o.href.slice(0, o.href.indexOf(":"));
return e.indexOf(l) > -1;
}
function relativeUrl(t) {
var e = location,
o = document.createElement("a");
return (
(o.href = t),
e.origin != o.origin
? o.href
: e.pathname != o.pathname || e.search != o.search
? o.pathname + o.search + o.hash
: e.href == o.href
? o.hash || o.pathname + o.search + o.hash
: o.hash
);
}
function getFigureValueByUrl(t) {
var e = void 0;
if (
(e = t.match(
/^(https?):\/\/(www\.)?youtube\.com\/watch.*v=([a-zA-Z0-9_-]+)/i
)) ||
(e = t.match(/^(https?):\/\/(www\.)?youtu\.be\/([a-zA-Z0-9_-]+)/i))
)
return { embed: "https://telegra.ph/embed/youtube?url=" + encodeURIComponent(t) };
if ((e = t.match(/^(https?):\/\/(www\.)?vimeo\.com\/(\d+)/i)))
return { embed: "https://telegra.ph/embed/vimeo?url=" + encodeURIComponent(t) };
if (
(e = t.match(
/^(https?):\/\/(www\.|mobile\.)?twitter\.com\/(.+)\/status\/(\d+)/i
))
)
return { embed: "https://telegra.ph/embed/twitter?url=" + encodeURIComponent(t) };
if (
(e = t.match(
/^(https?):\/\/(t\.me|telegram\.me|telegram\.dog)\/([a-zA-Z0-9_]+)\/(\d+)/i
))
)
return { embed: "https://telegra.ph/embed/telegram?url=" + encodeURIComponent(t) };
if (
(e = t.match(
/^data:(image\/gif|image\/jpe?g|image\/png|video\/mp4);base64,(.*)$/
))
)
return "video/" == e[1].substr(0, 6) ? { video: t } : { image: t };
if ((e = t.match(/^(https?):\/\/\S+/i))) {
var o = document.createElement("a");
if (((o.href = t), o.pathname.match(/\.(jpe?g|png|gif|mp4)$/i)))
return "mp4" == e[1] ? { video: t } : { image: t };
}
return !1;
}
function _resizeIframe(t, e, o) {
$("iframe").map(function () {
var l = null;
try {
l = this.contentWindow;
} catch (i) {}
if (l && l == t) {
var r = o / e;
this.setAttribute("width", "640"),
this.setAttribute("height", Math.round(640 * r) + ""),
this.parentNode &&
this.parentNode.classList.contains("iframe_helper") &&
(this.parentNode.style.paddingTop = 100 * r + "%"),
window.quill && quill.updateSelection(Quill.sources.USER);
}
});
}
function initQuill() {
function t(t, e) {
return [
t,
function (t, o) {
return o.compose(new Delta().retain(o.length(), e));
},
];
}
function e(t) {
var e = a.scroll.line(t),
o = _slicedToArray(e, 2),
l = o[0],
i = o[1];
return a.getText(t, l.length() - i);
}
function o(t) {
var o = e(t);
return !o || "\n" == o;
}
function l(t, e, l) {
var i = void 0,
r = e.index;
e.length > 0 && a.scroll.deleteAt(r, e.length);
var n = o(r),
s = !1,
u = a.scroll.descendant(BreakBlot, r),
c = _slicedToArray(u, 1);
if ((i = c[0]))
(!i.prev || i.prev instanceof BreakBlot) &&
(a.scroll.deleteAt(--r, 1), (s = !0));
else {
var d = a.scroll.descendant(BreakBlot, r - 1),
p = _slicedToArray(d, 1);
(i = p[0]), i && (a.scroll.deleteAt(--r, 1), (s = !0));
}
var h = a.scroll.descendant(SingleLineBlot, r),
f = _slicedToArray(h, 1);
if (((i = f[0]), i || s || !t))
a.insertText(r, "\n", Quill.sources.USER),
a.setSelection(++r, Quill.sources.USER),
(l.format.blockHeader ||
l.format.blockSubheader ||
l.format.blockBlockquote ||
l.format.blockPullquote) &&
n &&
a.formatLine(
r,
1,
{
blockHeader: !1,
blockSubheader: !1,
blockBlockquote: !1,
blockPullquote: !1,
},
Quill.sources.USER
);
else {
a.insertEmbed(r, "textBreak", !0, Quill.sources.USER);
var m = a.scroll.descendant(BreakBlot, r),
b = _slicedToArray(m, 1);
(i = b[0]),
!i ||
i.next ||
(i.prev && i.prev instanceof BreakBlot) ||
(a.insertEmbed(++r, "textBreak", !0, Quill.sources.SILENT),
a.setSelection(r, 0, Quill.sources.SILENT));
}
return a.selection.scrollIntoView(), !1;
}
function i(t) {
var e = a.scroll.line(t.index),
o = _slicedToArray(e, 2),
l = o[0],
i = o[1];
if (l) {
var r = l.domNode.innerText,
n = r.substr(0, i),
s = void 0;
if ((s = n.match(/(^|\s)((?:https?|tg):\/\/\S+|www\.\S+)$/))) {
var u = s[2],
c = u.length;
"www." == u.substr(0, 4) && (u = "http://" + u);
var d = a.scroll.descendants(LinkBlot, t.index - c, c);
d.length || a.formatText(t.index - c, c, "link", u, Quill.sources.USER);
}
}
return !0;
}
var r = draftGet();
r && $("#_tl_editor").html(r);
var a = new MyQuill("#_tl_editor", {
readOnly: !0,
fileSizeLimit: 5242880,
fileSizeLimitCallback: function () {
showError("File too big (up to 5 MB allowed)");
},
updatePhoto: updatePhoto,
formats: [
"bold",
"italic",
"underline",
"strike",
"code",
"link",
"textBreak",
"blockTitle",
"blockAuthor",
"blockHeader",
"blockSubheader",
"blockBlockquote",
"blockPullquote",
"blockDivider",
"blockFigure",
"code-block",
"list",
],
modules: {
clipboard: {
matchers: [
t("h2", { blockHeader: !0 }),
t("h5", { blockSubheader: !0 }),
t("h6", { blockSubheader: !0 }),
[
"img",
function (t, e) {
return t.src && _sanitize(t.src, ["http", "https", "data"])
? new Delta().insert({
blockFigure: { image: t.src, caption: t.alt || "" },
})
: new Delta();
},
],
[
"video",
function (t, e) {
return t.src && _sanitize(t.src, ["http", "https", "data"])
? new Delta().insert({ blockFigure: { video: t.src } })
: new Delta();
},
],
[
"br",
function (t, e) {
return t.classList.contains("inline")
? new Delta().insert({ textBreak: !0 })
: e;
},
],
],
},
keyboard: {
bindings: {
indent: {
handler: function () {
return !0;
},
},
outdent: {
handler: function () {
return !0;
},
},
tab: {
key: Keyboard.keys.TAB,
handler: function () {
return !0;
},
},
"required enter": {
key: Keyboard.keys.ENTER,
collapsed: !0,
shiftKey: null,
format: ["blockTitle", "blockAuthor"],
suffix: /^$/,
handler: function (t, e) {
var o = this.quill.scroll.descendant(FieldBlot, t.index),
l = _slicedToArray(o, 1),
i = l[0];
return i && i.next && !$(i.next.domNode).text()
? (this.quill.setSelection(
i.next.offset(this.quill.scroll),
0,
Quill.sources.USER
),
!1)
: (this.quill.insertText(t.index, "\n", Quill.sources.USER),
!1);
},
},
"required tab prev": {
key: Keyboard.keys.TAB,
shiftKey: !0,
handler: function (t, e) {
var o = null;
if (t.length > 0) {
var l = a.scroll.descendants(Block, t.index, t.length);
if (1 != l.length) return !0;
o = l[0];
} else {
var i = a.scroll.descendant(Block, t.index),
r = _slicedToArray(i, 1);
o = r[0];
}
if (null != o && null != o.prev && o.prev instanceof FieldBlot) {
var n = o.prev.offset(a.scroll),
s = o.prev.length();
return a.setSelection(n, s > 1 ? s : 0, Quill.sources.USER), !1;
}
return !0;
},
},
"required tab next": {
key: Keyboard.keys.TAB,
shiftKey: !1,
handler: function (t, e) {
var o = null;
if (t.length > 0) {
var l = a.scroll.descendants(Block, t.index, t.length);
if (1 != l.length) return !0;
o = l[0];
} else {
var i = a.scroll.descendant(Block, t.index),
r = _slicedToArray(i, 1);
o = r[0];
}
if (null != o && o instanceof FieldBlot && null != o.next) {
var n = o.next.offset(a.scroll);
if (o.next instanceof FieldBlot) {
var s = o.next.length();
a.setSelection(n, s > 1 ? s : 0, Quill.sources.USER);
} else a.setSelection(n, 0, Quill.sources.USER);
return !1;
}
return !0;
},
},
"no tab": {
key: Keyboard.keys.TAB,
shiftKey: null,
handler: function (t, e) {
return !1;
},
},
"detect embed": {
key: Keyboard.keys.ENTER,
collapsed: !0,
handler: function (t, e) {
var o = a.scroll.line(t.index),
l = _slicedToArray(o, 2),
i = l[0],
r = l[1];
if (i) {
var n = i.domNode.innerText,
s = n.substr(0, r),
u = void 0;
if ((u = s.match(/(^|\s)(https?:\/\/\S+)$/))) {
var c = u[2],
d = a.scroll.descendants(
LinkBlot,
t.index - c.length,
c.length
);
if (
(d.length ||
a.formatText(
t.index - c.length,
c.length,
"link",
c,
Quill.sources.USER
),
!s.substr(0, r - c.length).trim().length &&
"P" == i.domNode.tagName)
) {
var p = getFigureValueByUrl(c);
if (p) {
var h = i.offset(a.scroll);
return (
a.updateContents(
new Delta()
.retain(h)
["delete"](s.length)
.insert({ blockFigure: p }),
Quill.sources.USER
),
hideBlocksTooltip(),
!1
);
}
}
}
}
return !0;
},
},
"divider autofill": {
key: Keyboard.keys.ENTER,
collapsed: !0,
prefix: /^([-*])\1{2,}$/,
handler: function (t, e) {
var o = a.scroll.line(t.index),
l = _slicedToArray(o, 2),
i = l[0];
l[1];
if (i && "P" == i.domNode.tagName) {
var r = i.offset(a.scroll),
n = new Delta()
.retain(r)
["delete"](i.length())
.insert({ blockDivider: !0 });
return (
i.next || n.insert("\n"),
a.updateContents(n, Quill.sources.USER),
!1
);
}
return !0;
},
},
break: {
key: Keyboard.keys.ENTER,
shiftKey: !0,
handler: l.bind(null, !0),
},
enter: { key: Keyboard.keys.ENTER, handler: l.bind(null, !1) },
"detect link": { key: " ", collapsed: !0, handler: i },
"cancel placeholder": {
key: Keyboard.keys.ESCAPE,
handler: function (t, e) {
return (
checkOncePlaceholder(),
this.quill.updateSelection(Quill.sources.USER),
!0
);
},
},
"list autofill": {
key: " ",
collapsed: !0,
format: { list: !1 },
prefix: /^(1\.|-|\*)$/,
handler: function (t, e) {
var o = e.prefix.length;
this.quill.scroll.deleteAt(t.index - o, o),
this.quill.formatLine(
t.index - o,
1,
"list",
1 === o ? "bullet" : "ordered",
Quill.sources.USER
),
this.quill.setSelection(t.index - o, Quill.sources.SILENT);
},
},
"pre wrap": {
key: 192,
collapsed: !0,
format: { "code-block": !1 },
prefix: /^``$/,
offset: 2,
handler: function (t, e) {
var o = e.prefix.length,
l = t.index - o;
this.quill.scroll.deleteAt(l, o),
this.quill.formatLine(
l,
1,
"code-block",
!0,
Quill.sources.USER
),
this.quill.setSelection(l, Quill.sources.SILENT);
},
},
code: {
key: 192,
handler: function (t, e) {
if (!e.collapsed) {
var o = a.scroll.descendants(Block, t.index, t.length);
if (
o.length > 1 ||
(1 == o.length && o[0] instanceof CodeBlock)
)
return (
this.quill.format(
"code-block",
!e.format["code-block"],
Quill.sources.USER
),
!1
);
var l = a.scroll.descendants(BreakBlot, t.index, t.length);
if (l.length)
return (
this.quill.format(
"code-block",
!e.format["code-block"],
Quill.sources.USER
),
!1
);
}
return (
!(!e.collapsed || e.format.code || /\s$/.test(e.prefix)) ||
void this.quill.format(
"code",
!e.format.code,
Quill.sources.USER
)
);
},
},
"figure delete": {
key: Keyboard.keys.BACKSPACE,
collapsed: !0,
offset: 0,
handler: function (t, e) {
var o = a.scroll.line(t.index),
l = _slicedToArray(o, 2),
i = l[0];
l[1];
return (
!(i && i.prev && i.prev instanceof FigureBlot) ||
(e.empty && a.deleteText(t.index, 1, Quill.sources.USER),
a.setSelection(i.prev.offset(a.scroll)),
!1)
);
},
},
"field backspace": {
key: Keyboard.keys.BACKSPACE,
collapsed: !0,
offset: 0,
handler: function (t, e) {
var o = a.scroll.line(t.index),
l = _slicedToArray(o, 2),
i = l[0];
l[1];
return !(
i &&
i.prev &&
i.prev instanceof FieldBlot &&
$(i.domNode).text().length > 0
);
},
},
},
},
},
});
return (
a.addContainer($tl_link_tooltip.get(0)),
a.addContainer($tl_tooltip.get(0)),
a.addContainer($tl_blocks.get(0)),
a.on(Quill.events.EDITOR_CHANGE, function (t, e) {
if (t === Quill.events.SELECTION_CHANGE && a.isEnabled() && null != e) {
checkFigureBlots(e);
var o = a.scroll.descendant(Block, e.index),
l = _slicedToArray(o, 2),
i = l[0];
l[1];
0 === e.length
? (hideFormatTooltip(),
null == i ||
i instanceof FieldBlot ||
i instanceof BlockquoteBlot ||
i instanceof PullquoteBlot ||
i instanceof CodeBlock ||
i instanceof ListItem ||
$(i.domNode).text().length
? hideBlocksTooltip()
: showBlocksTooltip(e))
: (null == i || i instanceof TitleBlot
? hideFormatTooltip()
: (showFormatTooltip(e), toolbarUpdate(e)),
hideBlocksTooltip());
var r = a.getFormat(e);
$tl_article.toggleClass(
"title_focused",
!(!r.blockTitle && !r.blockAuthor)
),
checkOncePlaceholder();
}
}),
a.on(Quill.events.TEXT_CHANGE, function () {
a.getSelection();
checkRequiredBlots(a),
checkBlotPlaceholder(a),
checkOncePlaceholder(),
draftSave();
}),
a.on(Quill.events.TEXT_PASTE, function () {
var t = a.getSelection();
t && i(t);
}),
a.on(Quill.events.SCROLL_OPTIMIZE, function (t) {
t.forEach(function (t) {
if (
"childList" == t.type &&
!t.addedNodes.length &&
t.removedNodes.length
) {
var e = t.previousSibling,
o = t.nextSibling;
if (!o && e && "BR" == e.tagName && "inline" == e.className) {
var l = document.createElement("br");
(l.className = "inline"), t.target.appendChild(l);
} else
!o ||
!e ||
("BR" == e.tagName && "inline" == e.className) ||
"BR" != o.tagName ||
"inline" != o.className ||
o.nextSibling ||
(o.parentNode && o.parentNode.removeChild(o));
}
});
}),
a.scroll.domNode.setAttribute("dir", "auto"),
$(document).on("click touchstart", function (t) {
for (var e = t.target; e; ) {
if (e === a.container) return;
e = e.parentNode;
}
hideFormatTooltip(), hideBlocksTooltip();
}),
checkRequiredBlots(a),
checkBlotPlaceholder(a),
a
);
}
function checkOncePlaceholder() {
$(".placeholder_once")
.removeAttr("data-placeholder")
.removeClass("placeholder_once empty");
}
function checkBlotPlaceholder(t) {
var e = t.scroll.descendants(Block, 0, t.scroll.length());
e.forEach(function (t) {
if (t.domNode.hasAttribute("data-placeholder")) {
var e = $(t.domNode).text();
$(t.domNode).toggleClass("empty", !e);
}
});
}
function checkRequiredBlots(t) {
var e = t.scroll.lines(),
o = _slicedToArray(e, 2),
l = o[0],
i = o[1];
if (l instanceof BlockEmbed)
t.updateContents(
new Delta()
.insert("\n", { blockTitle: !0 })
.insert("\n", { blockAuthor: !0 }),
Quill.sources.SILENT
);
else if (
(l instanceof TitleBlot ||
t.formatLine(0, 1, { blockTitle: !0 }, Quill.sources.SILENT),
i)
) {
if (i instanceof BlockEmbed) {
var r = i.offset(t.scroll);
t.updateContents(
new Delta().retain(r).insert("\n", { blockAuthor: !0 }),
Quill.sources.SILENT
);
} else if (!(i instanceof AuthorBlot)) {
var a = i.offset(t.scroll);
t.formatLine(a, 1, { blockAuthor: !0 }, Quill.sources.SILENT);
}
} else {
var n = t.scroll.length();
t.updateContents(
new Delta().retain(n).insert("\n", { blockAuthor: !0 }),
Quill.sources.SILENT
);
}
var s = t.scroll.lines(),
u = _slicedToArray(s, 3),
c = u[2];
if (c) {
var d = c.offset(t.scroll),
p = t.scroll.length() - d,
h = t.scroll.descendants(FieldBlot, d, p);
h.forEach(function (e) {
var o = e.offset(t.scroll),
l = e.length(),
i = e.constructor.blotName;
t.formatText(o, l, i, !1, Quill.sources.SILENT);
});
} else {
var f = t.scroll.length();
t.insertText(f, "\n", Quill.sources.SILENT);
}
var m = t.scroll.lines();
m.forEach(function (t, e) {
"P" == t.domNode.tagName &&
(3 == m.length && 2 == e
? t.domNode.setAttribute("data-placeholder", "Your story...")
: t.domNode.removeAttribute("data-placeholder"));
});
}
function checkFigureBlots(t) {
var e = quill.scroll.descendant(FigureBlot, t.index),
o = _slicedToArray(e, 1),
l = o[0],
i = quill.scroll.descendants(FigureBlot, 0, quill.scroll.length());
i.forEach(function (t) {
l !== t && t.blur();
}),
l && (l.focus(), hideFormatTooltip(), hideBlocksTooltip());
}
function updatePhoto(t, e) {
return "image/jpg" == t.type || "image/jpeg" == t.type
? loadImage(
t,
function (o) {
if ("error" === o.type) e(t);
else if (o.toBlob)
o.toBlob(function (t) {
e(t);
}, t.type);
else {
var l = o.toDataURL(t.type),
i = { type: t.type, base64_data: l.split(",")[1] };
e(uploadDataToBlob(i));
}
},
{ canvas: !0, orientation: !0 }
)
: void e(t);
}
function uploadDataToBlob(t) {
for (var e = atob(t.base64_data), o = [], l = 0; l < e.length; l++)
o.push(e.charCodeAt(l));
return new Blob([new Uint8Array(o)], { type: t.type });
}
function _uploadFile(t, e, o, l) {
var i = new FormData();
i.append("csrfmiddlewaretoken", $('[name=csrfmiddlewaretoken]').val()),
i.append("file", uploadDataToBlob(t)),
$.ajax({
url: "api/v1/__upload/",
type: "POST",
data: i,
cache: !1,
dataType: "json",
processData: !1,
contentType: !1,
xhr: function r() {
var r = new XMLHttpRequest();
return (
r.upload.addEventListener("progress", function (t) {
t.lengthComputable && e && e(t.loaded, t.total);
}),
r
);
},
beforeSend: function (t) {
e && e(0, 1);
},
success: function (t) {
return t.error
? l && l(t.error)
: void $.each(t, function (t, e) {
o && o(e);
});
},
error: function (t) {
return l && l("Network error");
},
});
}
function wrapDomElement(t) {
if (!t.tagName) return t.data;
var e = { tag: t.tagName.toLowerCase() };
if (t.attributes.length) {
e.attrs = {};
for (var o = 0; o < t.attributes.length; o++) {
var l = t.attributes[o];
e.attrs[l.name] = l.value;
}
}
if (t.childNodes.length) {
e.children = [];
for (var o = 0; o < t.childNodes.length; o++)
e.children.push(wrapDomElement(t.childNodes[o]));
}
return e;
}
function getPageContent(t) {
var e = $(quill.scroll.domNode);
$("textarea,input", e).map(function () {
this.setAttribute("data-value", this.value);
});
var o = e.clone();
return (
$("textarea,input", e).map(function () {
this.removeAttribute("data-value");
}),
$("textarea,input", o).map(function () {
(this.value = this.getAttribute("data-value")),
this.removeAttribute("data-value");
}),
updateEditableText(o, !1),
$("[contenteditable]", o).removeAttr("contenteditable"),
$("[data-placeholder]", o).removeAttr("data-placeholder"),
$("[data-label]", o).removeAttr("data-label"),
$("[data-title]", o).removeAttr("data-title"),
$(".editable_text", o).removeClass("editable_text"),
$(".focus", o).removeClass("focus"),
$(".empty", o).removeClass("empty"),
$('[class=""]', o).removeAttr("class"),
$(".file_progress", o).remove(),
$(".cursor_wrapper", o).remove(),
t
? ($("h1:not(:has(br)),address:not(:has(br))", o).append("<br>"),
o.html())
: ($("h1,address", o).remove(),
$("br.inline", o).replaceWith("\n"),
{
data: JSON.stringify(wrapDomElement(o.get(0)).children),
length: o.html().length,
})
);
}
function showError(t) {
$error_msg.text(t),
clearTimeout($error_msg.to),
$error_msg.addClass("shown"),
($error_msg.to = setTimeout(function () {
$error_msg.removeClass("shown");
}, 3e3));
}
function savePage() {
if ($tl_article.hasClass("tl_article_saving")) return !1;
var t = $("h1", $tl_content).text(),
e = $("address", $tl_content).text(),
o = $("address a", $tl_content).attr("href") || "";
if (t.length < 2) {
clearTimeout($tl_article.to),
$tl_article.addClass("title_required"),
($tl_article.to = setTimeout(function () {
$tl_article.removeClass("title_required");
}, 3e3)),
quill.focus();
var l = quill.scroll.descendants(TitleBlot, 0, quill.scroll.length()),
i = _slicedToArray(l, 1),
r = i[0];
return (
quill.setSelection(r.offset(), r.length() - 1),
quill.selection.scrollIntoView(),
showError("Title is too small")
);
}
var a = $('img[src^="data:"],video[src^="data:"]');
if (a.length) return showError("Upload in progress.\nPlease wait...");
var n = getPageContent(!0);
if (n.length > 65536) return showError("Content is too big");
$tl_article.addClass("tl_article_saving"), updateEditable(!1);
var csrf = $('[name=csrfmiddlewaretoken]').val();
var s = "---------------------------TelegraPhBoundary21",
u =
"--" +
s +
'\r\nContent-Disposition: form-data; name="csrfmiddlewaretoken"\r\n\r\n' +
csrf +
"\r\n--" +
s +
'\r\nContent-Disposition: form-data; name="title"\r\n\r\n' +
t +
"\r\n--" +
s +
'\r\nContent-Disposition: form-data; name="author"\r\n\r\n' +
e +
"\r\n--" +
s +
'\r\nContent-Disposition: form-data; name="text"\r\n\r\n' +
n +
"\r\n--" +
s +
'\r\nContent-Disposition: form-data; name="author_url"\r\n\r\n' +
o +
"\r\n--" +
s +
'\r\nContent-Disposition: form-data; name="save_hash"\r\n\r\n' +
(T.saveHash || "") +
"\r\n--" +
s +
'\r\nContent-Disposition: form-data; name="page_id"\r\n\r\n' +
T.pageId +
"\r\n--" +
s +
"--";
$.ajax("api/v1/__save/", {
contentType: "multipart/form-data; boundary=" + s,
data: u,
type: "POST",
dataType: "json",
xhrFields: { withCredentials: !0 },
success: function (t) {
return (
$tl_article.removeClass("tl_article_saving"),
t.error
? (updateEditable(!0), showError(t.data))
: (draftClear(),
void (!T.pageId && t.path && (location.href = "/" + t.path)))
);
},
error: function (t) {
return (
$tl_article.removeClass("tl_article_saving"),
updateEditable(!0),
showError("Network error")
);
},
});
}
function checkAuth() {
$.ajax("auth/check/", {
data: {
csrfmiddlewaretoken: $('[name=csrfmiddlewaretoken]').val(),
page_id: T.pageId,
},
type: "POST",
dataType: "json",
xhrFields: { withCredentials: !0 },
success: function (t) {
if (
(t.save_hash && (T.saveHash = t.save_hash),
((t.can_edit && T.saveHash) || !T.pageId) &&
(t.short_name && $account.text(t.short_name),
$tl_article.addClass("tl_article_editable")),
!T.pageId &&
($tl_article.addClass("tl_article_edit"),
!draftGet() && t.author_name))
) {
if (t.author_url) var e = { link: t.author_url };
else var e = {};
var o = quill.scroll.descendants(AuthorBlot),
l = _slicedToArray(o, 1),
i = l[0];
i &&
quill.updateContents(
new Delta()
.retain(i.offset())
["delete"](i.length())
.insert(t.author_name, e),
Quill.sources.USER
);
}
if (t.auth_alert && t.short_name) {
var r =
"Success! You are now logged in as <b>" +
htsc(t.short_name) +
"</b> in this browser.";
t.migrate_count > 0 && t.migrate_hash
? ((r += "<br/><br/>"),
(r +=
"We can also add " +
t.migrate_count +
" Telegraph page" +
(t.migrate_count > 1 ? "s" : "") +
" from this browser to your account."),
showAlert(r, {
close_btn: "Skip",
submit_btn: "Add",
submit: function () {
migratePages(t.migrate_hash);
},
}))
: showAlert(r);
}
(pageContent = getPageContent(!0)), updateEditable(isEdit());
},
});
}
function migratePages(t) {
$.ajax("https://edit.telegra.ph/migrate", {
data: { migrate_hash: t },
type: "POST",
dataType: "json",
xhrFields: { withCredentials: !0 },
success: function (t) {
t.migrated_count > 0
? showAlert(
"Added <b>" +
t.migrated_count +
"</b> Telegraph page" +
(t.migrated_count > 1 ? "s" : "") +
' to your account.<br><br>To see a list of your pages, talk to the <a href="https://t.me/telegraph" target="_blank">@Telegraph</a> bot on Telegram.'
)
: hideAlert();
},
});
}
function toolbarUpdate(t) {
var e = null == t ? {} : quill.getFormat(t),
o = !!e.blockAuthor,
l = !(!e.blockHeader && !e.blockSubheader),
i = !!e["code-block"];
if (
($bold_button.toggleClass("active", !!e.bold),
$bold_button.toggleClass("disabled", o || l || i),
$italic_button.toggleClass("active", !!e.italic),
$italic_button.toggleClass("disabled", o || l || i),
$header_button.toggleClass("active", !!e.blockHeader),
$header_button.toggleClass("disabled", o),
$subheader_button.toggleClass("active", !!e.blockSubheader),
$subheader_button.toggleClass("disabled", o),
$quote_button.toggleClass(
"active",
!(!e.blockBlockquote && !e.blockPullquote)
),
$quote_button.toggleClass("pullquote", !!e.blockPullquote),
$quote_button.toggleClass("disabled", o),
null != t)
) {
var r = quill.scroll.descendants(LinkBlot, t.index, t.length);
$link_button.toggleClass("active", !!r.length);
} else $link_button.toggleClass("active", !1);
$link_button.toggleClass("disabled", i);
}
function storageSet(t, e) {
try {
return localStorage.setItem(t, e), !!localStorage.getItem(t);
} catch (o) {
return !1;
}
}
function storageGet(t) {
try {
return localStorage.getItem(t);
} catch (e) {
return !1;
}
}
function storageDelete(t) {
try {
return localStorage.removeItem(t), !0;
} catch (e) {
return !1;
}
}
function draftClear() {
storageDelete("draft");
}
function draftSave() {
if (!pageContent) return !1;
if (!T.pageId) {
var t = getPageContent(!0);
if (pageContent != t) return (pageContent = t), storageSet("draft", t);
}
return !1;
}
function draftGet() {
return !T.pageId && storageGet("draft");
}
function isEdit() {
return $tl_article.hasClass("tl_article_edit");
}
function updateEditableText(t, e) {
"undefined" == typeof e && (e = isEdit()),
e
? $(".editable_text:not(:has(.editable_input))", t).map(function () {
var t = this.innerText,
e = document.createElement("textarea");
return (
e.classList.add("editable_input"),
e.setAttribute("tabindex", "-1"),
e.setAttribute("rows", "1"),
(e.value = t),
t || this.classList.add("empty"),
$(this).empty().append(e),
autosize(e),
e
);
})
: $(".editable_text > .editable_input", t).map(function () {
var t = this.value,
e = this.parentNode;
return $(e).empty().text(t), e;
});
}
function updateEditable(t) {
if (
($tl_article.toggleClass("tl_article_edit", t),
updateEditableText(),
window.quill && (quill.enable(t), t && quill.focus()),
!t)
) {
var e = $("h1", $tl_content).text(),
o = $("address", $tl_content).text(),
l = $("address a", $tl_content).attr("href");
$("h1", $tl_header).text(e),
$("address a", $tl_header).text(o),
l
? $("address a", $tl_header).attr("href", l)
: $("address a", $tl_header).removeAttr("href"),
hideLinkTooltip(),
hideFormatTooltip(),
hideBlocksTooltip();
}
}
function showLinkTooltip(t, e) {
if (isEdit()) {
var o = { index: t.offset(quill.scroll), length: t.length() };
$tl_link_tooltip.text(decodeURI(e)),
tooltipUpdatePosition($tl_link_tooltip, o, linkTTOptions),
$tl_link_tooltip.hasClass("move_anim") ||
setTimeout(function () {
$tl_link_tooltip.addClass("move_anim");
}, 1),
$tl_link_tooltip.hasClass("shown") ||
setTimeout(function () {
$tl_link_tooltip.addClass("shown");
}, 10);
}
}
function hideLinkTooltip() {
$tl_link_tooltip.removeClass("move_anim shown");
}
function showFormatTooltip(t) {
isEdit() &&
($tl_tooltip.removeClass("tooltip_prompt"),
tooltipUpdatePosition($tl_tooltip, t, formatTTOptions),
$tl_tooltip.hasClass("move_anim") ||
setTimeout(function () {
$tl_tooltip.addClass("move_anim");
}, 10),
$tl_tooltip.hasClass("shown")
? tooltipUpdatePosition($tl_link_tooltip, null, linkTTOptions)
: setTimeout(function () {
$tl_tooltip.addClass("shown"),
tooltipUpdatePosition($tl_link_tooltip, null, linkTTOptions);
}, 10));
}
function hideFormatTooltip() {
$tl_tooltip.removeClass("move_anim shown"),
tooltipUpdatePosition($tl_link_tooltip, null, linkTTOptions);
}
function showBlocksTooltip(t) {
isEdit() && ($tl_blocks.addClass("shown"), blocksUpdatePosition(t));
}
function hideBlocksTooltip() {
$tl_blocks.removeClass("shown");
}
function hideAlert() {
$(".tl_alert").remove();
}
function showAlert(t, e) {
(e = e || {}),
(e.close_btn = e.close_btn || "OK"),
(e.submit_btn = e.submit_btn || !1),
(e.close = e.close || hideAlert),
(e.submit = e.submit || e.close),
hideAlert();
var o = $(
'<div class="tl_alert"><main class="tl_alert_message"><section></section><aside class="tl_message_buttons"></aside></main></div>'
);
$("section", o).html(t);
var l = $("aside", o);
if (e.close_btn) {
var i = $('<button class="button"></button>');
i.html(e.close_btn).click(e.close).appendTo(l);
}
if (e.submit_btn) {
var r = $('<button class="button"></button>');
r.html(e.submit_btn)
.click(function () {
o.addClass("tl_alert_loading"), e.submit();
})
.appendTo(l);
}
o.appendTo("body");
}
function isOverElement(t, e, o) {
if (!e || !e.hasClass("shown")) return !1;
(t.bottom = t.top + t.height), (t.right = t.left + t.width);
var l = e,
i = {
top: l._top,
bottom: l._top + e.outerHeight(),
left: l._left,
right: l._left + e.outerWidth(),
};
return (
!(
t.left - i.right >= o ||
i.left - t.right >= o ||
t.top - i.bottom >= o ||
i.top - t.bottom >= o
) && i
);
}
function tooltipUpdatePosition(t, e, o) {
if (
((o = o || { padding: 10, position: "top" }),
(e = e || t._range || null),
null != e)
) {
var l = quill.getBounds(e),
i = $(quill.container).offset(),
r = { width: t.outerWidth(), height: t.outerHeight() },
a = {
width: $(window).outerWidth(),
height: $(window).outerHeight(),
scrolltop: document.body.scrollTop,
},
n = { left: 9, top: a.scrolltop + 9 },
s = {
left: a.width - r.width - 9,
top: a.scrolltop + a.height - r.height - 9,
};
r.left = l.left + l.width / 2 - r.width / 2;
var u = i.left + r.left;
u < n.left
? (r.left = n.left - i.left)
: u > s.left && (r.left = s.left - i.left);
var c = void 0;
if ("top" == o.position) {
r.top = l.top - r.height - o.padding;
var d = i.top + r.top;
(c = !1), d < n.top && ((r.top = l.bottom + o.padding), (c = !0));
} else if ("bottom" == o.position) {
var p = !1;
(r.top = l.bottom + o.padding),
(p = isOverElement(r, o.depend, o.dependPadding)) &&
(r.top = p.bottom + o.dependPadding);
var h = i.top + r.top;
(c = !0),
h > s.top &&
((r.top = l.top - r.height - o.padding),
(p = isOverElement(r, o.depend, o.dependPadding)) &&
(r.top = p.top - r.height - o.dependPadding),
(c = !1));
}
(r.left = Math.round(r.left)),
(r.top = Math.round(r.top)),
(t._range = e),
(o.minDelta &&
Math.abs(r.left - t._left) < o.minDelta &&
Math.abs(r.top - t._top) < o.minDelta) ||
((t._left = r.left),
(t._top = r.top),
t.css({ left: r.left, top: r.top }).toggleClass("bottom", c));
}
}
function blocksUpdatePosition(t) {
if (
("undefined" == typeof t && (t = quill.getSelection()),
null != t && window.quill)
) {
var e = quill.getBounds(t);
$tl_blocks.css({ top: e.top + e.height / 2 });
}
}
function htsc(t) {
return t
.replace(/&/g, "&")
.replace(/</g, "<")
.replace(/>/g, ">")
.replace(/"/g, """)
.replace(/\'/g, "'")
.replace(/%/g, "%");
}
function toolbarPrompt(t, e, o) {
var l = $(".prompt_input", t),
i = $(".close", t);
l.val("").attr("placeholder", e),
l.on("keydown", function (e) {
var i = e.which || e.keyCode;
if (27 == i) toolbarPromptHide(t);
else if (13 == i) {
var r = l.val();
r && (o && o(r), e.preventDefault()), toolbarPromptHide(t);
}
}),
l.on("blur", function () {
toolbarPromptHide(t);
}),
i.on("click", function () {
toolbarPromptHide(t);
}),
t.show().addClass("tooltip_prompt"),
l.focus();
}
function toolbarPromptHide(t) {
var e = $(".prompt_input", t),
o = $(".close", t);
e.off("keydown"),
e.off("blur"),
o.off("click"),
t.show().removeClass("tooltip_prompt"),
quill.focus();
}
var _slicedToArray = (function () {
function t(t, e) {
var o = [],
l = !0,
i = !1,
r = void 0;
try {
for (
var a, n = t[Symbol.iterator]();
!(l = (a = n.next()).done) && (o.push(a.value), !e || o.length !== e);
l = !0
);
} catch (s) {
(i = !0), (r = s);
} finally {
try {
!l && n["return"] && n["return"]();
} finally {
if (i) throw r;
}
}
return o;
}
return function (e, o) {
if (Array.isArray(e)) return e;
if (Symbol.iterator in Object(e)) return t(e, o);
throw new TypeError(
"Invalid attempt to destructure non-iterable instance"
);
};
})(),
_createClass = (function () {
function t(t, e) {
for (var o = 0; o < e.length; o++) {
var l = e[o];
(l.enumerable = l.enumerable || !1),
(l.configurable = !0),
"value" in l && (l.writable = !0),
Object.defineProperty(t, l.key, l);
}
}
return function (e, o, l) {
return o && t(e.prototype, o), l && t(e, l), e;
};
})(),
_get = function t(e, o, l) {
null === e && (e = Function.prototype);
var i = Object.getOwnPropertyDescriptor(e, o);
if (void 0 === i) {
var r = Object.getPrototypeOf(e);
return null === r ? void 0 : t(r, o, l);
}
if ("value" in i) return i.value;
var a = i.get;
if (void 0 !== a) return a.call(l);
},
ua = navigator.userAgent.toLowerCase(),
browser = {
opera: /opera/i.test(ua) || /opr/i.test(ua),
msie:
(/msie/i.test(ua) && !/opera/i.test(ua)) ||
/trident\//i.test(ua) ||
/edge/i.test(ua),
msie_edge: /edge/i.test(ua) && !/opera/i.test(ua),
mozilla: /firefox/i.test(ua),
chrome: /chrome/i.test(ua) && !/edge/i.test(ua),
safari: !/chrome/i.test(ua) && /webkit|safari|khtml/i.test(ua),
iphone: /iphone/i.test(ua),
ipod: /ipod/i.test(ua),
ipad: /ipad/i.test(ua),
android: /android/i.test(ua),
mobile: /iphone|ipod|ipad|opera mini|opera mobi|iemobile|android/i.test(ua),
safari_mobile: /iphone|ipod|ipad/i.test(ua),
opera_mobile: /opera mini|opera mobi/i.test(ua),
opera_mini: /opera mini/i.test(ua),
mac: /mac/i.test(ua),
},
Inline = Quill["import"]("blots/inline"),
Block = Quill["import"]("blots/block"),
BlockEmbed = Quill["import"]("blots/block/embed"),
Embed = Quill["import"]("blots/embed"),
TextBlot = Quill["import"]("blots/text"),
CodeBlock = Quill["import"]("formats/code-block"),
ListItem = Quill["import"]("formats/list/item"),
Parchment = Quill["import"]("parchment"),
Delta = Quill["import"]("delta"),
Keyboard = Quill["import"]("modules/keyboard"),
LinkBlot = (function (t) {
function e(t, o) {
_classCallCheck(this, e);
var l = _possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).call(this, t)
);
return (
$(t).on("mouseover", function () {
showLinkTooltip(l, o);
}),
$(t).on("mouseout", function () {
hideLinkTooltip();
}),
l
);
}
return (
_inherits(e, t),
_createClass(e, null, [
{
key: "create",
value: function (t) {
var o = _get(
e.__proto__ || Object.getPrototypeOf(e),
"create",
this
).call(this, t);
(t = this.sanitize(t)), o.setAttribute("href", t);
var l = t.substr(0, 1);
return (
"/" != l &&
"#" != l &&
"tg://" != t.substr(0, 5) &&
"mailto:" != t.substr(0, 7) &&
o.setAttribute("target", "_blank"),
o
);
},
},
{
key: "formats",
value: function (t) {
return t.getAttribute("href");
},
},
{
key: "sanitize",
value: function (t) {
return _sanitize(t, ["http", "https", "tg", "mailto"])
? relativeUrl(t)
: "about:blank";
},
},
]),
_createClass(e, [
{
key: "detach",
value: function () {
$(this.domNode).off("mouseover mouseout"),
_get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"detach",
this
).call(this),
hideLinkTooltip();
},
},
{
key: "format",
value: function (t, o) {
return t === this.statics.blotName && o
? ((o = this.constructor.sanitize(o)),
this.domNode.setAttribute("href", o),
void this.domNode.setAttribute("data-title", o))
: _get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"format",
this
).call(this, t, o);
},
},
]),
e
);
})(Inline);
(LinkBlot.blotName = "link"),
(LinkBlot.tagName = "a"),
Quill.register(LinkBlot);
var BreakBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return _inherits(e, t), e;
})(Embed);
(BreakBlot.blotName = "textBreak"),
(BreakBlot.tagName = "br"),
(BreakBlot.className = "inline"),
Quill.register(BreakBlot);
var SingleLineBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return (
_inherits(e, t),
_createClass(e, [
{
key: "replace",
value: function (t) {
t.children.forEach(function (t) {
t instanceof BreakBlot &&
t.replaceWith(Parchment.create("text", " "));
}),
_get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"replace",
this
).call(this, t);
},
},
{
key: "insertAt",
value: function (t, o, l) {
"undefined" != typeof l && "textBreak" == o
? _get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"insertAt",
this
).call(this, t, "\n")
: _get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"insertAt",
this
).call(this, t, o, l);
},
},
]),
e
);
})(Block),
FieldBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return _inherits(e, t), e;
})(SingleLineBlot),
TitleBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return (
_inherits(e, t),
_createClass(
e,
[
{
key: "formatAt",
value: function (t, o, l, i) {
l === this.constructor.blotName &&
_get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"formatAt",
this
).call(this, t, o, l, i);
},
},
],
[
{
key: "create",
value: function (t) {
var o = _get(
e.__proto__ || Object.getPrototypeOf(e),
"create",
this
).call(this, t);
return (
o.setAttribute("data-placeholder", "Title"),
o.setAttribute("data-label", "Title"),
o
);
},
},
]
),
e
);
})(FieldBlot);
(TitleBlot.blotName = "blockTitle"),
(TitleBlot.tagName = "h1"),
Quill.register(TitleBlot);
var AuthorBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return (
_inherits(e, t),
_createClass(
e,
[
{
key: "formatAt",
value: function (t, o, l, i) {
l === this.constructor.blotName
? _get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"formatAt",
this
).call(this, t, o, l, i)
: "link" === l &&
_get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"formatAt",
this
).call(this, 0, this.length(), l, i);
},
},
],
[
{
key: "create",
value: function (t) {
var o = _get(
e.__proto__ || Object.getPrototypeOf(e),
"create",
this
).call(this, t);
return (
o.setAttribute("data-placeholder", "Your name"),
o.setAttribute("data-label", "Author"),
o
);
},
},
]
),
e
);
})(FieldBlot);
(AuthorBlot.blotName = "blockAuthor"),
(AuthorBlot.tagName = "address"),
Quill.register(AuthorBlot);
var HeaderBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return (
_inherits(e, t),
_createClass(e, [
{
key: "optimize",
value: function () {
_get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"optimize",
this
).call(this);
var t = $(this.domNode).text();
(t = t.replace(/[\s_]+/g, "-")),
(t = t.replace(/(^-+|-+$)/g, "")),
this.domNode.setAttribute("id", t);
},
},
{
key: "formatAt",
value: function (t, o, l, i) {
(("bold" !== l && "italic" !== l && "code" !== l) || !i) &&
_get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"formatAt",
this
).call(this, t, o, l, i);
},
},
]),
e
);
})(SingleLineBlot);
(HeaderBlot.blotName = "blockHeader"),
(HeaderBlot.tagName = "h3"),
Quill.register(HeaderBlot);
var SubheaderBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return _inherits(e, t), e;
})(HeaderBlot);
(SubheaderBlot.blotName = "blockSubheader"),
(SubheaderBlot.tagName = "h4"),
Quill.register(SubheaderBlot);
var BlockquoteBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return _inherits(e, t), e;
})(Block);
(BlockquoteBlot.blotName = "blockBlockquote"),
(BlockquoteBlot.tagName = "blockquote"),
Quill.register(BlockquoteBlot);
var PullquoteBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return _inherits(e, t), e;
})(Block);
(PullquoteBlot.blotName = "blockPullquote"),
(PullquoteBlot.tagName = "aside"),
Quill.register(PullquoteBlot);
var CodeBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return (
_inherits(e, t),
_createClass(e, [
{
key: "replace",
value: function (t) {
t.children.forEach(function (t) {
t instanceof BreakBlot &&
t.replaceWith(Parchment.create("text", "\n"));
}),
_get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"replace",
this
).call(this, t);
},
},
]),
e
);
})(CodeBlock);
(CodeBlot.blotName = "code-block"), Quill.register(CodeBlot);
var DividerBlot = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return _inherits(e, t), e;
})(BlockEmbed);
(DividerBlot.blotName = "blockDivider"),
(DividerBlot.tagName = "hr"),
Quill.register(DividerBlot);
var FigureBlot = (function (t) {
function e(t, o) {
_classCallCheck(this, e);
var l = _possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).call(this, t)
);
(l.domWrapper = document.createElement("div")),
(l.domCursor = document.createElement("span")),
(l.domCaption = document.createElement("figcaption")),
l.domWrapper.classList.add("figure_wrapper"),
l.domCursor.classList.add("cursor_wrapper"),
l.domCursor.setAttribute("contenteditable", "true"),
l.domCaption.classList.add("editable_text"),
l.domCaption.setAttribute("data-placeholder", "Caption (optional)"),
o.caption && (l.domCaption.innerText = o.caption),
l.domNode.appendChild(l.domWrapper),
l.domNode.appendChild(l.domCursor),
l.domNode.appendChild(l.domCaption),
setTimeout(function () {
updateEditableText(l.domNode);
}, 1);
var i = !1;
return (
o.image
? (l.appendImgNode(o.image), (i = l.uploadData(o.image)))
: o.video
? (l.appendVideoNode(o.video), (i = l.uploadData(o.video)))
: o.embed && l.appendIframeNode(o.embed),
i &&
((l.domProgress = document.createElement("div")),
(l.domProgressBar = document.createElement("div")),
l.domProgress.classList.add("file_progress"),
l.domProgressBar.classList.add("file_progress_bar"),
l.domWrapper.classList.add("loading"),
l.domProgress.appendChild(l.domProgressBar),
l.domWrapper.appendChild(l.domProgress),
l.uploadFile(i)),
$(l.domWrapper).click(function () {
if (!l.domNode.classList.contains("focus")) {
var t = l.offset(quill.scroll);
quill.focus(), quill.setSelection(t, 0, Quill.sources.USER);
}
}),
$(l.domCursor).keydown(function (t) {
var e = t.which || t.keyCode;
if (e == Keyboard.keys.BACKSPACE) {
var o = l.offset(quill.scroll);
quill.deleteText(o, l.length(), Quill.sources.USER),
quill.setSelection(o - 1, 0, Quill.sources.USER),
t.preventDefault();
} else if (e == Keyboard.keys.ENTER) {
var i = l.offset(quill.scroll) + l.length();
quill.focus(),
quill.insertText(i, "\n", Quill.sources.USER),
quill.setSelection(i, 0, Quill.sources.USER),
t.preventDefault();
}
}),
$(l.domCursor).on("paste", function (t) {
t.stopPropagation(), t.preventDefault();
}),
$(l.domCaption).keydown(function (t) {
var e = t.which || t.keyCode,
o = $(t.target);
if (e == Keyboard.keys.ENTER) {
if (t.shiftKey) return;
var i = o.selection("getPos"),
r = o.val();
if (i.start != i.end)
(r = r.substr(0, i.start) + r.substr(i.end)),
o.val(r).selection("setPos", { start: r.length, end: r.length });
else if (i.end == r.length) {
var a = l.offset(quill.scroll) + l.length();
quill.focus(),
quill.insertText(a, "\n", Quill.sources.USER),
quill.setSelection(a, 0, Quill.sources.USER);
}
t.preventDefault();
} else if (
e == Keyboard.keys.DOWN ||
e == Keyboard.keys.TAB ||
e == Keyboard.keys.RIGHT
) {
var n = o.selection("getPos"),
s = o.val();
if (n.start == n.end && n.end == s.length) {
var u = l.offset(quill.scroll) + l.length();
quill.focus(),
quill.setSelection(u, 0, Quill.sources.USER),
t.preventDefault();
}
} else if (e == Keyboard.keys.LEFT || e == Keyboard.keys.UP) {
var c = o.selection("getPos");
if (c.start == c.end && 0 === c.start) {
var d = l.offset(quill.scroll) - 1;
quill.focus(),
quill.setSelection(d, 0, Quill.sources.USER),
t.preventDefault();
}
}
}),
$(l.domCaption).on("paste", function (t) {
t.stopPropagation();
}),
$(l.domCaption).on(
"keyup drop change input textInput paste cut",
function (t) {
$(l.domCaption).toggleClass("empty", !t.target.value),
autosize.update(t.target),
draftSave();
}
),
$(l.domCaption).on("mousedown touchstart", function (t) {
l.focusCaptionInput(t);
}),
$(document).on("selectionchange", function (t) {
var e = window.getSelection();
e &&
e.focusNode === l.domCaption &&
e.isCollapsed &&
l.focusCaptionInput(t);
}),
l
);
}
return (
_inherits(e, t),
_createClass(e, null, [
{
key: "create",
value: function (t) {
var o = _get(
e.__proto__ || Object.getPrototypeOf(e),
"create",
this
).call(this, t);
return o.setAttribute("contenteditable", "false"), o;
},
},
]),
_createClass(
e,
[
{
key: "focusCaptionInput",
value: function (t) {
if (this.domCaption.classList.contains("empty")) {
var e = this.domCaption.querySelector(".editable_input");
e && (t.preventDefault(), e.focus());
}
},
},
{
key: "appendImgNode",
value: function (t) {
var e = document.createElement("img");
return (
e.setAttribute("src", this.sanitize(t)),
this.domWrapper.appendChild(e),
e
);
},
},
{
key: "appendVideoNode",
value: function (t) {
var e = document.createElement("video");
return (
e.setAttribute("src", this.sanitize(t)),
e.setAttribute("preload", "auto"),
e.setAttribute("controls", "controls"),
e.addEventListener("loadeddata", function () {
this.mozHasAudio ||
this.webkitAudioDecodedByteCount ||
(this.audioTracks && this.audioTracks.length) ||
(this.setAttribute("autoplay", "autoplay"),
this.setAttribute("loop", "loop"),
this.setAttribute("muted", "muted"),
this.removeAttribute("controls"),
this.play());
}),
this.domWrapper.appendChild(e),
e
);
},
},
{
key: "appendIframeNode",
value: function (t) {
var e = document.createElement("div"),
o = document.createElement("div"),
l = document.createElement("iframe");
return (
e.classList.add("iframe_wrap"),
e.appendChild(o),
o.classList.add("iframe_helper"),
(o.style.paddingTop = "56.25%"),
o.appendChild(l),
l.setAttribute("src", this.sanitize(t)),
l.setAttribute("width", "640"),
l.setAttribute("height", "360"),
l.setAttribute("frameborder", "0"),
l.setAttribute("allowtransparency", "true"),
l.setAttribute("allowfullscreen", "true"),
l.setAttribute("scrolling", "no"),
this.domWrapper.appendChild(e),
e
);
},
},
{
key: "uploadFile",
value: function (t) {
var e = this;
_uploadFile(
t,
function (t, o) {
var l = 0;
o && t && ((l = (100 * t) / o), (l = Math.min(100, l))),
(e.domProgressBar.style.width = l + "%");
},
function (o) {
if (o) {
var l = e.sanitize(o.src);
if ("video/" == t.type.substr(0, 6)) {
var i = e.domWrapper.querySelector("video");
i.setAttribute("src", l);
} else {
var r = e.domWrapper.querySelector("img");
r.setAttribute("src", l);
}
e.domWrapper.classList.remove("loading"), draftSave();
}
},
function (t) {
return (
quill.deleteText(
e.offset(quill.scroll),
e.length(),
Quill.sources.SILENT
),
draftSave(),
showError(t)
);
}
);
},
},
{
key: "uploadData",
value: function (t) {
var e = null;
return (
!!(e = t.match(
/^data:(image\/gif|image\/jpe?g|image\/png|video\/mp4);base64,(.*)$/
)) && { type: e[1], base64_data: e[2] }
);
},
},
{
key: "sanitize",
value: function (t) {
return _sanitize(t, ["http", "https", "data"]) ? t : "//:0";
},
},
{
key: "focus",
value: function () {
this.domNode.classList.add("focus");
},
},
{
key: "blur",
value: function () {
this.domNode.classList.remove("focus");
},
},
{
key: "_index",
value: function (t, e) {
if (t === this.domCaption) return 0;
var o = 0;
return (
t.nodeType == t.TEXT_NODE && (o += e >= 0 ? e : t.data.length),
t.previousSibling
? o + this._index(t.previousSibling, -1)
: t.parentNode
? o + this._index(t.parentNode, -1)
: 0
);
},
},
{
key: "_position",
value: function (t, e) {
if (t.nodeType == t.TEXT_NODE)
return e <= t.data.length
? [t, e]
: ((e -= t.data.length), [null, e]);
for (var o = t.firstChild; o; ) {
var l = null,
i = this._position(o, e),
r = _slicedToArray(i, 2);
if (((l = r[0]), (e = r[1]), l)) return [l, e];
o = o.nextSibling;
}
return [t, e];
},
},
{
key: "update",
value: function (t) {
this.domCursor.innerHTML = "";
},
},
{
key: "index",
value: function (t, e) {
return 0;
},
},
{
key: "position",
value: function (t, e) {
return [this.domCursor, 0];
},
},
],
[
{
key: "value",
value: function o(t) {
var o = { caption: "" },
e = t.querySelector("img");
e && (o.image = e.src);
var l = t.querySelector("video");
l && (o.video = l.src);
var i = t.querySelector("iframe");
i && (o.embed = i.src);
var r = t.querySelector("figcaption");
if (r) {
var a = r.querySelector(".editable_input");
a ? (o.caption = a.value) : (o.caption = r.innerText);
}
return o;
},
},
]
),
e
);
})(BlockEmbed);
(FigureBlot.blotName = "blockFigure"),
(FigureBlot.tagName = "figure"),
Quill.register(FigureBlot);
var MyQuill = (function (t) {
function e() {
return (
_classCallCheck(this, e),
_possibleConstructorReturn(
this,
(e.__proto__ || Object.getPrototypeOf(e)).apply(this, arguments)
)
);
}
return (
_inherits(e, t),
_createClass(e, [
{
key: "formatLine",
value: function () {
for (var t, o = arguments.length, l = Array(o), i = 0; i < o; i++)
l[i] = arguments[i];
(t = _get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"formatLine",
this
)).call.apply(t, [this].concat(l)),
this.updateSelection();
},
},
{
key: "formatText",
value: function () {
for (var t, o = arguments.length, l = Array(o), i = 0; i < o; i++)
l[i] = arguments[i];
(t = _get(
e.prototype.__proto__ || Object.getPrototypeOf(e.prototype),
"formatText",
this
)).call.apply(t, [this].concat(l)),
this.updateSelection();
},
},
{
key: "updateSelection",
value: function (t) {
if (this.hasFocus()) {
t = t || this.constructor.sources.SILENT;
var e = this.getSelection(!0);
this.setSelection(++e.index, e.length, t),
this.setSelection(--e.index, e.length, t);
}
},
},
]),
e
);
})(Quill),
$tl_page = $(".tl_page"),
$tl_article = $(".tl_article"),
$tl_header = $(".tl_article_header"),
$tl_content = $(".tl_article_content"),
$tl_tooltip = $("#_tl_tooltip"),
$tl_blocks = $("#_tl_blocks"),
$tl_link_tooltip = $("#_tl_link_tooltip"),
$bold_button = $("#_bold_button"),
$italic_button = $("#_italic_button"),
$link_button = $("#_link_button"),
$header_button = $("#_header_button"),
$subheader_button = $("#_subheader_button"),
$quote_button = $("#_quote_button"),
$image_button = $("#_image_button"),
$embed_button = $("#_embed_button"),
$edit_button = $("#_edit_button"),
$publish_button = $("#_publish_button"),
$account = $(".account"),
$error_msg = $("#_error_msg"),
formatTTOptions = {
padding: 10,
position: browser.mobile ? "bottom" : "top",
minDelta: 5,
},
linkTTOptions = {
padding: 7,
position: "bottom",
depend: $tl_tooltip,
dependPadding: 10,
};
$tl_tooltip.mouseover(function (t) {
var e = t.target;
"BUTTON" != t.target.tagName ||
t.target.classList.contains("disabled") ||
($tl_tooltip.attr("data-hover", e.id).addClass("hover"),
setTimeout(function () {
$tl_tooltip.addClass("hover_anim");
}, 1),
clearTimeout($tl_tooltip.to));
}),
$tl_tooltip.mouseout(function (t) {
var e = t.target;
"BUTTON" == e.tagName &&
($tl_tooltip.removeClass("hover"),
($tl_tooltip.to = setTimeout(function () {
$tl_tooltip.removeClass("hover_anim");
}, 70)));
}),
$bold_button.click(function (t) {
var e = t.target,
o = e.classList.contains("active");
t.preventDefault();
quill.getSelection(!0);
quill.format("bold", !o), quill.updateSelection(Quill.sources.API);
}),
$italic_button.click(function (t) {
var e = t.target,
o = e.classList.contains("active");
t.preventDefault();
quill.getSelection(!0);
quill.format("italic", !o), quill.updateSelection(Quill.sources.API);
}),
$link_button.click(function (t) {
var e = t.target,
o = e.classList.contains("active");
t.preventDefault();
var l = quill.getSelection(!0);
if (o) {
var i = quill.scroll.descendants(LinkBlot, l.index, l.length);
i.forEach(function (t) {
var e = t.offset(quill.scroll),
o = t.length();
quill.formatText(e, o, "link", !1);
}),
toolbarUpdate(l);
} else
toolbarPrompt($tl_tooltip, "Paste or type a link...", function (t) {
(t = t.trim()),
"#" != t.substr(0, 1) &&
"/" != t.substr(0, 1) &&
"http://" != t.substr(0, 7) &&
"https://" != t.substr(0, 8) &&
"tg://" != t.substr(0, 5) &&
"mailto:" != t.substr(0, 7) &&
(t = t.indexOf("@") > 0 ? "mailto:" + t : "http://" + t),
quill.focus(),
quill.format("link", t),
toolbarUpdate(l);
});
}),
$header_button.click(function (t) {
var e = t.target,
o = e.classList.contains("active");
t.preventDefault();
var l = quill.getSelection(!0);
quill.format("blockHeader", !o);
var i = quill.scroll.descendants(HeaderBlot, l.index, l.length);
i.forEach(function (t) {
var e = t.offset(quill.scroll),
o = t.length();
quill.formatText(
e,
o,
{ bold: !1, italic: !1, code: !1 },
Quill.sources.SILENT
);
}),
quill.updateSelection(Quill.sources.API);
}),
$subheader_button.click(function (t) {
var e = t.target,
o = e.classList.contains("active");
t.preventDefault();
var l = quill.getSelection(!0);
quill.format("blockSubheader", !o);
var i = quill.scroll.descendants(SubheaderBlot, l.index, l.length);
i.forEach(function (t) {
var e = t.offset(quill.scroll),
o = t.length();
quill.formatText(
e,
o,
{ bold: !1, italic: !1, code: !1 },
Quill.sources.SILENT
);
}),
quill.updateSelection(Quill.sources.API);
}),
$quote_button.click(function (t) {
var e = t.target,
o = e.classList.contains("active"),
l = e.classList.contains("pullquote");
t.preventDefault();
quill.getSelection(!0);
o
? quill.format("blockPullquote", !l)
: quill.format("blockBlockquote", !0),
quill.updateSelection(Quill.sources.API);
}),
$image_button.click(function () {
var t = quill.container.querySelector(
"input.ql-image[type=file][data-status=ready]"
);
null == t &&
((t = document.createElement("input")),
t.setAttribute("type", "file"),
t.setAttribute(
"accept",
browser.safari_mobile
? "image/gif, image/jpeg, image/jpg, image/png"
: "image/gif, image/jpeg, image/jpg, image/png, video/mp4"
),
t.classList.add("ql-image"),
t.addEventListener("change", function () {
if (null != t.files && null != t.files[0]) {
var e = t.files[0];
updatePhoto(e, function (e) {
if (quill.fileSizeLimit && e.size > quill.fileSizeLimit)
return (
quill.fileSizeLimitCallback && quill.fileSizeLimitCallback()
);
var o = new FileReader();
(o.onload = function (e) {
var o = getFigureValueByUrl(e.target.result);
if (o) {
var l = quill.getSelection(!0);
quill.updateContents(
new Delta()
.retain(l.index)
["delete"](l.length)
.insert({ blockFigure: o }),
Quill.sources.USER
);
} else showError("Invalid file format");
(t.value = ""), t.setAttribute("data-status", "ready");
}),
o.readAsDataURL(e);
});
}
}),
quill.container.appendChild(t)),
t.setAttribute("data-status", "busy"),
t.click();
}),
$embed_button.click(function (t) {
var e = quill.getSelection(!0),
o = quill.scroll.line(e.index),
l = _slicedToArray(o, 1),
i = l[0];
if (i) {
var r = $(i.domNode).text();
r ||
(i.domNode.setAttribute(
"data-placeholder",
"Paste a YouTube, Vimeo or Twitter link, and press Enter"
),
$(i.domNode).addClass("placeholder_once empty"),
hideBlocksTooltip());
}
}),
$publish_button.click(function () {
savePage();
}),
$edit_button.click(function () {
updateEditable(!0);
}),
$(window).on("scroll resize", function () {
tooltipUpdatePosition($tl_tooltip, null, formatTTOptions),
tooltipUpdatePosition($tl_link_tooltip, null, linkTTOptions);
}),
(new Image().src =
window.devicePixelRatio >= 2
? "static/img/icons_2x.png"
: "static/img/icons.png");
var quill = initQuill(),
pageContent = !1;
browser.mobile && $(document.body).addClass("mobile"), checkAuth();
<file_sep>from .base import *
if os.environ.get("LEVEL") == "PRODUCTION":
print('RUN PRODUCTION MODE')
from .production import *
else:
print('RUN LOCAL MODE')
from .local import *
<file_sep># Generated by Django 3.2.5 on 2021-07-19 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20210510_1744'),
]
operations = [
migrations.AlterField(
model_name='article',
name='text',
field=models.TextField(verbose_name='Your story'),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(max_length=150, verbose_name='Tittle'),
),
]
<file_sep>FROM python:3.9
ENV PYTHONUNBUFFERED=1
WORKDIR /usr/projects/flatype
COPY . /usr/projects/flatype/
RUN pip install -U pip && pip install -r requirements.txt
<file_sep># Generated by Django 3.2.5 on 2021-07-22 17:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20210719_1801'),
]
operations = [
migrations.AlterField(
model_name='article',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
<file_sep>from core.models import Article
from django.contrib.auth.forms import AuthenticationForm
from django.views.decorators.http import require_http_methods
from django.contrib.auth import authenticate, login, logout
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
def try_check(request) -> JsonResponse:
slug = request.POST.get('page_id',)
if slug == '0':
return JsonResponse({
'short_name': f'👤 {request.user}',
'author_name': str(request.user),
'author_url': '#' if request.user.is_authenticated else '',
'save_hash': '',
'can_edit': False,
})
try:
article = Article.objects.get(slug=slug)
except Article.DoesNotExist:
return JsonResponse(
{
'error': True,
'data': 'Article not found'
},
)
owner_hash = request.session.get('externalid',)
return JsonResponse({
'short_name': f'👤 {request.user}',
'author_name': str(request.user),
'author_url': '#' if request.user.is_authenticated else '',
'save_hash': slug,
'can_edit': True if request.user == article.owner or \
owner_hash == article.owner_hash else False,
})
@csrf_exempt
@require_http_methods(["POST"])
def try_login(request) -> JsonResponse:
form = AuthenticationForm(data=request.POST)
if not form.is_valid():
return JsonResponse(
{
'error': True,
'data': 'Data is not valid'
},
)
if request.user.is_authenticated:
return JsonResponse(
{
'error': True,
'data': 'User already authenticated'
},
)
username = form.cleaned_data.get('username',)
password = form.cleaned_data.get('password',)
user = authenticate(request, username=username, password=password)
if user is None:
return JsonResponse(
{
'error': True,
'data': 'User not found'
},
)
if not user.is_active:
return JsonResponse(
{
'error': True,
'data': 'User is locked'
},
)
login(request, user)
return JsonResponse({
'data': 'ok'
})
def try_logout(request) -> JsonResponse:
if not request.user.is_authenticated:
return JsonResponse(
{
'error': True,
'data': 'User is not authenticated'
},
)
logout(request)
return JsonResponse({
'data': 'ok'
})
|
61a4ab177bc6ce66c0dedc1beda552bd655b4ce2
|
[
"HTML",
"JavaScript",
"Markdown",
"Python",
"Text",
"Dockerfile"
] | 31
|
Python
|
Butonix/flatype
|
d55e1b52014df570b809283726220358a0abc4fc
|
331f6c32c6df727fd2a9670e1af72754e7575e8e
|
refs/heads/master
|
<repo_name>jboegeholz/rust_katas<file_sep>/christmas_tree/src/main.rs
fn draw_christmas_tree(height: i32){
let mut val: String = String::new();
for i in 0..height {
for j in 0..height-i{
val.push_str(" ");
}
for k in 0..(2*i+1){
val.push_str("X");
}
val.push_str("\n");
}
for j in 0..height{
val.push_str(" ");
}
val.push_str("|");
println!("{}", val);
}
#[test]
fn it_works() {
draw_christmas_tree(1);
}
fn main() {
draw_christmas_tree(27);
}
<file_sep>/karate_chop/src/main.rs
use std::f32;
fn bin_chop(the_number: i32, the_array: &[i32]) -> i32{
let mut l = 0;
let mut r = (the_array.len() - 1) as i32;
while r >= l {
let m = (((l as f32 + r as f32) / 2.0).floor()) as i32;
if the_array[m as usize] < the_number{
l = m + 1
}
else if the_array[m as usize] > the_number{
r = m - 1
}
else {
return m;
}
}
-1
}
fn main(){
assert!(4 == bin_chop(5, &[1, 2, 3, 4, 5]));
assert!(0 == bin_chop(1, &[1, 3, 4, 6, 7, 8, 10, 13, 14, 20]));
assert!(1 == bin_chop(3, &[1, 3, 4, 6, 7, 8, 10, 13, 14]));
assert!(2 == bin_chop(4, &[1, 3, 4, 6, 7, 8, 10, 13, 14, 20]));
assert!(3 == bin_chop(6, &[1, 3, 4, 6, 7, 8, 10, 13, 14]));
assert!(4 == bin_chop(7, &[1, 3, 4, 6, 7, 8, 10, 13, 14]));
assert!(8 == bin_chop(14, &[1, 3, 4, 6, 7, 8, 10, 13, 14]));
assert!(-1 == bin_chop(15, &[1, 3, 4, 6, 7, 8, 10, 13, 14]));
}<file_sep>/bubble_sort/src/main.rs
fn main(){
println!("Hello Bubble Sort");
assert!([1, 2, 3, 4, 5] == bubble_sort(&mut[5, 4, 3, 2, 1]));
//let mut ys: [i32; 500000] = [1; 500000];
//bubble_sort(&mut ys);
}
fn bubble_sort(the_array: &mut[i32]) -> &[i32]{
println!("########### start ###########");
let array_length = (the_array.len()) as i32;
let mut j = 1;
while j < array_length {
//println!("j: {}", j);
let mut i = 0;
while i < array_length - j {
//println!("i: {}", i);
let x = the_array[i as usize];
let y = the_array[(i + 1) as usize];
if x > y {
the_array[i as usize ] = y;
the_array[(i + 1) as usize] = x;
}
i += 1;
print!(".");
}
println!("{}", j);
//println!("After {}. pass: {:?}", j, &the_array);
j += 1;
}
println!("########### finished ###########");
return the_array
}<file_sep>/roman_numerals/src/main.rs
fn convert_number_simple(number: i32) -> String{
let mut roman_literal: String = String::new();
let thousands = number / 1000;
let five_hundreds = (number % 1000) / 500;
let hundreds = (number % 1000) % 500 / 100;
let fifties = (number % 1000) % 500 % 100 / 50;
let tens = (number % 1000) % 500 % 100 % 50 / 10;
let fives = (number % 1000) % 500 % 100 % 50 % 10 / 5;
let ones = (number % 1000) % 500 % 100 % 50 % 10 % 5;
add_literal("M", thousands, &mut roman_literal);
add_literal("D", five_hundreds, &mut roman_literal);
add_literal("C", hundreds, &mut roman_literal);
add_literal("L", fifties, &mut roman_literal);
add_literal("X", tens, &mut roman_literal);
add_literal("V", fives, &mut roman_literal);
add_literal("I", ones, &mut roman_literal);
roman_literal
}
fn add_literal( character: &'static str, times: i32, roman_literal: &mut String){
let mut n = 0;
while n < times{
roman_literal.push_str(character);
n += 1;
}
}
//fn convert_number(number: i32) -> String{
// easy = convert_number_simple(number);
// easy = easy.replace("DCCCC", "CM");
// easy = easy.replace("VIIII", "IX");
// easy = easy.replace("IIII", "IV");
//
// easy
//}
fn main(){
assert!("M" == convert_number_simple(1000));
assert!("MM" == convert_number_simple(2000));
assert!("D" == convert_number_simple(500));
assert!("MD" == convert_number_simple(1500));
assert!("C" == convert_number_simple(100));
assert!("L" == convert_number_simple(50));
assert!("X" == convert_number_simple(10));
assert!("V" == convert_number_simple(5));
assert!("I" == convert_number_simple(1));
assert!("MDCCCCLXXXIIII" == convert_number_simple(1984));
// assert!("MCMLXXXIV" == convert_number(1984));
// assert!("IV" == convert_number(4));
// assert!("IX" == convert_number(9));
}<file_sep>/fizzbuzz/src/main.rs
fn fizz_buzz(x: i32) -> String {
println!("Value is {}", x);
let mut val: String = String::new();
let fizz: &str = "Fizz";
let buzz: &str = "Buzz";
if x % 3 == 0{
val.push_str(fizz)
}
if x % 5 == 0{
val.push_str(buzz)
}
if val.is_empty(){
val = format!("{}", x);
}
println!("{}", val);
val
}
#[test]
fn it_works() {
assert!(fizz_buzz(1) == "1");
assert!(fizz_buzz(3) == "Fizz");
assert!(fizz_buzz(5) == "Buzz");
assert!(fizz_buzz(15) == "FizzBuzz");
}<file_sep>/roman_numerals/src/roman_numerals.py
def convert_number_simple(number):
roman_literal = ""
thousands = number / 1000
roman_literal += "M" * thousands
five_hundreds = (number % 1000) / 500
roman_literal += "D" * five_hundreds
hundreds = (number % 1000) % 500 / 100
roman_literal += "C" * hundreds
fifties = (number % 1000) % 500 % 100 / 50
roman_literal += "L" * fifties
tens = (number % 1000) % 500 % 100 % 50 / 10
roman_literal += "X" * tens
fives = (number % 1000) % 500 % 100 % 50 % 10 / 5
roman_literal += "V" * fives
ones = (number % 1000) % 500 % 100 % 50 % 10 % 5
roman_literal += "I" * ones
print roman_literal
return roman_literal
def convert_number(number):
easy = convert_number_simple(number)
easy = easy.replace("DCCCC", "CM")
easy = easy.replace("VIIII", "IX")
easy = easy.replace("IIII", "IV")
return easy
def main():
assert("M" == convert_number_simple(1000))
assert("MM" == convert_number_simple(2000))
assert("D" == convert_number_simple(500))
assert("MD" == convert_number_simple(1500))
assert("C" == convert_number_simple(100))
assert("L" == convert_number_simple(50))
assert("X" == convert_number_simple(10))
assert("V" == convert_number_simple(5))
assert("I" == convert_number_simple(5))
assert("MDCCCCLXXXIIII" == convert_number_simple(1984))
assert("MCMLXXXIV" == convert_number(1984))
assert("IV" == convert_number(4))
assert("IX" == convert_number(9))
if __name__ == '__main__':
main()
<file_sep>/karate_chop/src/karate_chop.py
import math
def bin_chop(the_number, the_array):
l = 0
r = len(the_array) - 1
while r >= l:
m = int(math.floor((l+r)/2))
if the_array[m] < the_number:
l = m + 1
elif the_array[m] > the_number:
r = m - 1
else:
return m
return -1
def main():
assert(0 == bin_chop(1, [1, 3, 4, 6, 7, 8, 10, 13, 14, 20]))
assert(1 == bin_chop(3, [1, 3, 4, 6, 7, 8, 10, 13, 14]))
assert(2 == bin_chop(4, [1, 3, 4, 6, 7, 8, 10, 13, 14, 20]))
assert(3 == bin_chop(6, [1, 3, 4, 6, 7, 8, 10, 13, 14]))
assert(4 == bin_chop(7, [1, 3, 4, 6, 7, 8, 10, 13, 14]))
assert(8 == bin_chop(14, [1, 3, 4, 6, 7, 8, 10, 13, 14]))
assert(-1 == bin_chop(15, [1, 3, 4, 6, 7, 8, 10, 13, 14]))
assert(-1 == bin_chop(3, []))
assert(-1 == bin_chop(3, [1]))
assert(0 == bin_chop(1, [1]))
assert(0 == bin_chop(1, [1, 3, 5]))
assert(1 == bin_chop(3, [1, 3, 5]))
assert(2 == bin_chop(5, [1, 3, 5]))
assert(-1 == bin_chop(0, [1, 3, 5]))
assert(-1 == bin_chop(2, [1, 3, 5]))
assert(-1 == bin_chop(4, [1, 3, 5]))
assert(-1 == bin_chop(6, [1, 3, 5]))
#
assert(0 == bin_chop(1, [1, 3, 5, 7]))
assert(1 == bin_chop(3, [1, 3, 5, 7]))
assert(2 == bin_chop(5, [1, 3, 5, 7]))
assert(3 == bin_chop(7, [1, 3, 5, 7]))
assert(-1 == bin_chop(0, [1, 3, 5, 7]))
assert(-1 == bin_chop(2, [1, 3, 5, 7]))
assert(-1 == bin_chop(4, [1, 3, 5, 7]))
assert(-1 == bin_chop(6, [1, 3, 5, 7]))
assert(-1 == bin_chop(8, [1, 3, 5, 7]))
if __name__ == '__main__':
main()<file_sep>/bubble_sort/src/bubble_sort.py
def bubble_sort(the_array):
print "########### start ###########"
for j in range(1, len(the_array)):
for i in range(0, len(the_array)-j):
x = the_array[i]
y = the_array[i+1]
if x > y:
the_array[i] = y
the_array[i+1] = x
print "After", str(j) + ". pass:", the_array
print "########### finished ###########"
return the_array
def main():
assert([1, 2, 3] == bubble_sort([3, 2, 1]))
assert([1, 2, 3, 4, 5] == bubble_sort([5, 4, 3, 2, 1]))
assert([1, 2, 3, 4, 5] == bubble_sort([3, 2, 1, 5, 4]))
#a = [x for x in range(500000)]
#bubble_sort(a)
if __name__ == '__main__':
main()
<file_sep>/rust_by_example/Cargo.toml
[package]
name = "rust_by_example"
version = "0.1.0"
authors = ["jboegeholz <<EMAIL>>"]
mod 2_2_tuples;<file_sep>/christmas_tree/Cargo.toml
[package]
name = "christmas_tree"
version = "0.1.0"
authors = ["jboegeholz <<EMAIL>>"]
<file_sep>/christmas_tree/src/christmas_tree.py
height = 27
for i in range(height):
print (" " * (height-i)) + "X" * (2*i+1)
print (" " * height) + "|"
|
91dff9bd131e1791944152ca95afa5a791e11f7b
|
[
"TOML",
"Rust",
"Python"
] | 11
|
Rust
|
jboegeholz/rust_katas
|
fd39c63d1cde26769b007436b2d7f773db52b481
|
3273e7cfbcf75623e77a8030de50c87c0642a70d
|
refs/heads/main
|
<repo_name>cafedomancer/petstore<file_sep>/spec/support/committee.rb
RSpec.configure do |config|
config.add_setting :committee_options
config.committee_options = {
schema_path: Rails.root.join('schema/openapi.yaml').to_s,
query_hash_key: 'rack.request.query_hash',
parse_response_by_content_type: false
}
end
<file_sep>/app/serializers/user_serializer.rb
class UserSerializer < ApplicationSerializer
def to_json(*)
@object.to_json(only: [:id, :username, :first_name, :last_name, :email, :phone])
end
end
<file_sep>/app/controllers/schema_controller.rb
class SchemaController < ActionController::Base
def openapi
render file: Rails.root.join('schema/openapi.yaml')
end
def rapidoc
render file: Rails.root.join('schema/rapidoc.html')
end
end
<file_sep>/spec/requests/schema_spec.rb
require 'rails_helper'
RSpec.describe "Schema", type: :request do
describe "GET /openapi.yaml" do
it "returns http success" do
get "/openapi.yaml"
expect(response).to have_http_status(:success)
end
end
describe "GET /rapidoc.html" do
it "returns http success" do
get "/rapidoc.html"
expect(response).to have_http_status(:success)
end
end
end
<file_sep>/app/serializers/error_serializer.rb
class ErrorSerializer < ApplicationSerializer
def to_json(*)
@object.errors.to_json
end
end
<file_sep>/spec/requests/users_spec.rb
require 'rails_helper'
RSpec.describe '/users', type: :request do
include Committee::Rails::Test::Methods
describe 'POST /users' do
context 'when valid' do
it 'creates a user' do
post users_url, params: {
username: 'johndoe',
password: '<PASSWORD>',
first_name: 'John',
last_name: 'Doe',
email: '<EMAIL>',
phone: '03-1234-5678',
}, as: :json
assert_request_schema_confirm
assert_response_schema_confirm(201)
end
end
context 'when invalid' do
before do
create(:user, {
username: 'johndoe',
password: '<PASSWORD>',
first_name: 'John',
last_name: 'Doe',
email: '<EMAIL>',
phone: '03-1234-5678',
})
end
it 'does not create a user' do
post users_url, params: {
username: 'johndoe',
password: '',
first_name: '',
last_name: '',
email: '<EMAIL>',
phone: '',
}, as: :json
assert_request_schema_confirm
assert_response_schema_confirm(422)
end
end
end
end
<file_sep>/app/serializers/application_serializer.rb
class ApplicationSerializer
def initialize(object)
@object = object
end
end
<file_sep>/spec/models/user_spec.rb
require 'rails_helper'
RSpec.describe User, type: :model do
it 'is invalid without a username' do
user = build(:user, username: nil)
user.valid?
expect(user.errors[:username]).to include("can't be blank")
end
it 'is invalid with a taken username' do
user = build(:user, username: create(:user).username)
user.valid?
expect(user.errors[:username]).to include('has already been taken')
end
it 'is invalid without a password' do
user = build(:user, password: nil)
user.valid?
expect(user.errors[:password]).to include("can't be blank")
end
it 'is invalid with a too short password' do
user = build(:user, password: '<PASSWORD>')
user.valid?
expect(user.errors[:password]).to include('is too short (minimum is 6 characters)')
end
it 'is invalid without a first name' do
user = build(:user, first_name: nil)
user.valid?
expect(user.errors[:first_name]).to include("can't be blank")
end
it 'is invalid without a last name' do
user = build(:user, last_name: nil)
user.valid?
expect(user.errors[:last_name]).to include("can't be blank")
end
it 'is invalid without an email' do
user = build(:user, email: nil)
user.valid?
expect(user.errors[:email]).to include("can't be blank")
end
it 'is invalid with a taken email' do
user = build(:user, email: create(:user).email)
user.valid?
expect(user.errors[:email]).to include('has already been taken')
end
it 'is invalid without a phone' do
user = build(:user, phone: nil)
user.valid?
expect(user.errors[:phone]).to include("can't be blank")
end
it 'is invalid with a invalid phone' do
user = build(:user, phone: 'lorem ipsum')
user.valid?
expect(user.errors[:phone]).to include('is invalid')
end
end
<file_sep>/config/routes.rb
Rails.application.routes.draw do
resources :users, only: :create
get 'openapi.yaml', to: 'schema#openapi'
get 'rapidoc.html', to: 'schema#rapidoc'
end
|
43197b40ed3eab716a55c9de9f7ac1aac2a09c55
|
[
"Ruby"
] | 9
|
Ruby
|
cafedomancer/petstore
|
ce7d7d15f2252efe29e9b35c0f848fc8bce68f72
|
5e984a8ba980acca27b18b86fb92a2273dd2390a
|
refs/heads/master
|
<repo_name>moisesolimpio/SistemaAtendimentoV2<file_sep>/DAO/estadoDAO.class.php
<?php
class estadoDAO {
public function getAll(){
$objVO = new estadoVO();
$retorno = array();
$sql = "SELECT * FROM estado";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdEstado($conteudo["idEstado"]);
$objVO->setEstado($conteudo["estado"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}
<file_sep>/DAO/devolucaoDocDAO.class.php
<?php
class devolucaoDocDAO {
public function getAll($id) {
$objVO = new devolucaoDocVO();
$retorno = array();
$sql = sprintf('SELECT * FROM devolucao_doc WHERE idDocumento = "%s"', $id);
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdDocumento($conteudo["idDocumento"]);
$objVO->setObservacoes($conteudo["observacoes"]);
$objVO->setData($conteudo["data"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
public function insert(devolucaoDocVO $objVO) {
$sql = sprintf("INSERT INTO devolucao_doc (idDocumento, observacoes, data) VALUES ('%s','%s','%s')",
$objVO->getIdDocumento(),
$objVO->getObservacoes(),
$objVO->getData()
);
$rs = mysql_query($sql);
$objVO->setIdDevolucaoDoc(mysql_insert_id());
return $objVO;
}
}
<file_sep>/DAO/funcCompDAO.class.php
<?php
class funcCompDAO {
public function insert(funcCompVO $objVO) {
$sql = sprintf("INSERT INTO func_comp (idFuncionario, idComputador, data) VALUES ('%s', '%s', '%s')",
$objVO->getIdFuncionario(),
$objVO->getIdComputador(),
$objVO->getData()
);
mysql_query($sql);
$objVO->setIdFuncComp(mysql_insert_id());
return $objVO;
}
public function update(funcCompVO $objVO) {
$sql = sprintf("UPDATE func_comp SET idFuncionario = '%s', idComputador = '%s', data = '%s' WHERE idFuncComp = '%s'",
$objVO->getIdFuncionario(),
$objVO->getIdComputador(),
$objVO->getData(),
$objVO->getIdFuncComp()
);
mysql_query($sql);
}
public function getById($idFuncComp) {
$objVO = new funcCompVO();
$sql = sprintf('SELECT * FROM func_comp WHERE idFuncComp = "%s"', $idFuncComp);
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdFuncComp($conteudo["idFuncComp"]);
$objVO->setIdFuncionario($conteudo["idFuncionario"]);
$objVO->setIdComputador($conteudo["idComputador"]);
$objVO->setData($conteudo["data"]);
$retorno = clone $objVO;
}
return $retorno;
}
public function getAll() {
$objVO = new funcCompVO();
$retorno = array();
$sql = "SELECT * FROM func_comp";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdFuncComp($conteudo["idFuncComp"]);
$objVO->setIdFuncionario($conteudo["idFuncionario"]);
$objVO->setIdComputador($conteudo["idComputador"]);
$objVO->setData($conteudo["data"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}
//fim da classe
<file_sep>/func_comp.php
<?php
include_once './sessao.php';
//conexao
include_once './Conexao/conexao.php';
//vo
include_once './VO/funcCompVO.class.php';
include_once './VO/funcionarioVO.class.php';
include_once './VO/computadorVO.class.php';
//dao
include_once './DAO/funcCompDAO.class.php';
include_once './DAO/funcionarioDAO.class.php';
include_once './DAO/computadorDAO.class.php';
//objVO
$funcCompVO = new funcCompVO();
$funcionarioVO = new funcionarioVO();
$data = date("d/m/Y"); //data e hora atual
$dataBd = implode("-", array_reverse(explode("/", $data))); //converte a data p/ salvar no bd
$idFuncComp = $_GET["par"]; //recupera o parametro do id
if ($idFuncComp == 0) {
//novo cadastro
} else {
//buscar o para atualizar
$funcCompDAO = new funcCompDAO();
$funcCompVO = $funcCompDAO->getById($idFuncComp);
}
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Salvar") {
//seta os dados
$funcCompVO->setIdFuncionario($_REQUEST["idFuncionario"]);
$funcCompVO->setIdComputador($_REQUEST["idComputador"]);
$funcCompVO->setData($dataBd);//formata para salvar
$funcCompDAO = new funcCompDAO();
//verifica o codigo
if (isset($_REQUEST["idFuncComp"]) && $_REQUEST["idFuncComp"] == "") {
//novo cadastro
$funcCompDAO->insert($funcCompVO);
echo "<script>msg(1)</script>";
$funcCompVO = new funcCompVO();
//header("Location: site.php");
//exit;
} else {
//atualizacao
$funcCompVO->setIdFuncComp($_REQUEST["idFuncComp"]);
$funcCompDAO->update($funcCompVO);
echo "<script>msg(2)</script>";
$funcCompVO = new funcCompVO();
//header("Location: site.php");
//exit;
}
}//fim do salvar
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Cancelar") {
header("Location: site.php");
exit;
}
?>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<title></title>
</head><br><br><br><br>
<body>
<form action="" method="post" id="funcComp">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!--Titulo-->
<tr>
<td colspan="4" align="center" class="titulo">Cadastro de Computador - Funcionario</td>
</tr>
<!--Codigo-->
<tr>
<td width="150" align="right">Código:</td>
<td>
<input type="text" name="idFuncComp" id="idFuncComp" size="5" maxlength="5" value="<?php echo $funcCompVO->getIdFuncComp(); ?>" class="readonly" readonly>
Data:
<input type="text" name="data" id="data" size="15" maxlength="10" value="<?php echo $data; ?>" class="readonly" readonly>
</td>
</tr>
<!--idFuncionario -->
<tr>
<td width="150" align="right">Funcionário:</td>
<td>
<select name="idFuncionario">
<?php
$sql = "SELECT * FROM funcionario WHERE ativo = 1 ORDER BY nomeFuncionario ASC";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idFuncionario = $funcCompVO->getIdFuncionario();
if($idFuncionario == $conteudo["idFuncionario"]){
echo "<option value=\"$conteudo[idFuncionario]\" selected>$conteudo[nomeFuncionario]</option>";
}else{
echo "<option value=\"$conteudo[idFuncionario]\">$conteudo[nomeFuncionario]</option>";
}
}
?>
</select>
</td>
</tr>
<!--idComputador -->
<tr>
<td width="150" align="right">Computador:</td>
<td>
<select name="idComputador">
<?php
$sql = "SELECT * FROM computador";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idComputador = $funcCompVO->getIdComputador();
if($idComputador == $conteudo["idComputador"]){
echo "<option value=\"$conteudo[idComputador]\" selected>$conteudo[nomeComputador]</option>";
}else{
echo "<option value=\"$conteudo[idComputador]\">$conteudo[nomeComputador]</option>";
}
}
?>
</select>
</td>
</tr>
<!-- botoes -->
<tr>
<td colspan="4" align="center">
<input type="submit" name="submit" value="Salvar">
</td>
</tr>
</table>
</form>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
<div style="width: 850px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Funcionário</th>
<th>Computador</th>
<th>Data</th>
<th>Editar</th>
</tr>
</thead>
<?php
$funcCompDAO = new funcCompDAO();
$listaFuncComp = $funcCompDAO->getAll();
$funcionarioDAO = new funcionarioDAO();
$listaFuncionarios = $funcionarioDAO->getAll();
$computadorDAO = new computadorDAO();
$listaComputadores = $computadorDAO->getAll();
//for para func_comp
for ($i = 0; $i < sizeof($listaFuncComp); $i++) {
$dadosFuncComp = $listaFuncComp[$i];
//for para funcionario
for ($z = 0; $z < sizeof($listaFuncionarios); $z++) {
$dadosFuncionarios = $listaFuncionarios[$z];
if($dadosFuncionarios->getIdFuncionario() == $dadosFuncComp->getIdFuncionario()){
$nomeFuncionario = $dadosFuncionarios->getNomeFuncionario();
}
}
//for para computador
for ($w = 0; $w < sizeof($listaComputadores); $w++) {
$dadosComputadores = $listaComputadores[$w];
if($dadosComputadores->getIdComputador() == $dadosFuncComp->getIdComputador()){
$nomeComputador = $dadosComputadores->getNomeComputador();
}
}
//formata a data para exibir
$dataExibe = implode("/", array_reverse(explode("-", $dadosFuncComp->getData()))); //converte a data p/ salvar no bd
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosFuncComp->getIdFuncComp() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeFuncionario . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeComputador . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dataExibe . "</td>
<td align=\"center\"><a href=\"?red=funcComp&par=" . $dadosFuncComp->getIdFuncComp() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}
?>
</table>
</div>
</div>
</body>
</html>
<file_sep>/DAO/documentoDAO.class.php
<?php
class documentoDAO {
//insert
public function insert(documentoVO $objVO) {
$sql = sprintf("INSERT INTO documento (data, idFuncionario, tipoDocumento, idCategoriaDoc, departamento, ac, descricaoDoc, caminhoArquivoAnexo) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s')",
$objVO->getData(),
$objVO->getIdFuncionario(),
$objVO->getTipoDocumento(),
$objVO->getIdCategoriaDoc(),
$objVO->getDepartamento(),
$objVO->getAc(),
$objVO->getDescricaoDoc(),
$objVO->getCaminhoAnexoArquivo()
);
mysql_query($sql);
$objVO->setIdDocumento(mysql_insert_id());
return $objVO;
}
//insert
public function update(documentoVO $objVO) {
$sql = sprintf("UPDATE documento SET data ='%s', idFuncionario='%s', tipoDocumento ='%s', idCategoriaDoc ='%s', departamento ='%s',
ac ='%s', descricaoDoc ='%s', caminhoArquivoAnexo ='%s' WHERE idDocumento ='%s'",
$objVO->getData(),
$objVO->getIdFuncionario(),
$objVO->getTipoDocumento(),
$objVO->getIdCategoriaDoc(),
$objVO->getDepartamento(),
$objVO->getAc(),
$objVO->getDescricaoDoc(),
$objVO->getCaminhoAnexoArquivo(),
$objVO->getIdDocumento()
);
mysql_query($sql);
}
//fez o if para usar no dataTable de devoluções
public function getAll($id) {
$objVO = new documentoVO();
$retorno = array();
if ($id == 0) {
$sql = "SELECT * FROM documento";
} elseif ($id != 0) {
$sql = sprintf('SELECT * FROM documento WHERE idDocumento = "%s"',$id);
}
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdDocumento($conteudo["idDocumento"]);
$objVO->setData($conteudo["data"]);
$objVO->setIdFuncionario($conteudo["idFuncionario"]);
$objVO->setTipoDocumento($conteudo["tipoDocumento"]);
$objVO->setIdCategoriaDoc($conteudo["idCategoriaDoc"]);
$objVO->setDepartamento($conteudo["departamento"]);
$objVO->setAc($conteudo["ac"]);
$objVO->setDescricaoDoc($conteudo["descricaoDoc"]);
$objVO->setCaminhoAnexoArquivo($conteudo["caminhoArquivoAnexo"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
//getById
public function getById($id) {
$objVO = new documentoVO();
$sql = sprintf('SELECT * FROM documento WHERE idDocumento = "%s"', $id);
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdDocumento($conteudo["idDocumento"]);
$objVO->setData($conteudo["data"]);
$objVO->setIdFuncionario($conteudo["idFuncionario"]);
$objVO->setTipoDocumento($conteudo["tipoDocumento"]);
$objVO->setIdCategoriaDoc($conteudo["idCategoriaDoc"]);
$objVO->setDepartamento($conteudo["departamento"]);
$objVO->setAc($conteudo["ac"]);
$objVO->setDescricaoDoc($conteudo["descricaoDoc"]);
$objVO->setCaminhoAnexoArquivo($conteudo["caminhoArquivoAnexo"]);
$retorno = clone $objVO;
}
return $retorno;
}
}
<file_sep>/VO/subCategoriaVO.class.php
<?php
class subCategoriaVO{
//atributos
private $idSubCategoria = null;
private $idCategoria = null;
private $subCategoria = null;
//construtor
public function subCategoriaVO(){
}
//get set
//idSubCategoria
public function getIdSubCategoria(){
return $this->idSubCategoria;
}
public function setIdSubCategoria($idSubCategoria){
$this->idSubCategoria = $idSubCategoria;
}
//idCategoria
public function getIdCategoria(){
return $this->idCategoria;
}
public function setIdCategoria($idCategoria){
$this->idCategoria = $idCategoria;
}
//subCategoria
public function getSubCategoria(){
return $this->subCategoria;
}
public function setSubCategoria($subCategoria){
$this->subCategoria = $subCategoria;
}
}
?>
<file_sep>/DAO/unidadeDAO.class.php
<?php
class unidadeDAO {
//inserir
public function insert(unidadeVO $objVO) {
$sql = sprintf("INSERT INTO unidade (nomeUnidade, idEstado, ativo) VALUES ('%s','%s','%s')",
$objVO->getNomeUnidade(),
$objVO->getIdEstado(),
$objVO->getAtivo()
);
mysql_query($sql);echo $sql;
$objVO->setIdUnidade(mysql_insert_id());
return $objVO;
}
public function update(unidadeVO $objVO) {
$sql = sprintf("UPDATE unidade SET nomeUnidade = '%s', idEstado = '%s', ativo = '%s' WHERE idUnidade = '%s'",
$objVO->getNomeUnidade(),
$objVO->getIdEstado(),
$objVO->getAtivo(),
$objVO->getIdUnidade()
);
mysql_query($sql);
}
public function getAll() {
$objVO = new unidadeVO();
$retorno = array();
$sql = "SELECT * FROM unidade";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdUnidade($conteudo["idUnidade"]);
$objVO->setNomeUnidade($conteudo["nomeUnidade"]);
$objVO->setIdEstado($conteudo["idEstado"]);
$objVO->setAtivo($conteudo["ativo"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
public function getById($id) {
$objVO = new unidadeVO();
$sql = sprintf('SELECT * FROM unidade WHERE idUnidade = "%s"',$id);
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdUnidade($conteudo["idUnidade"]);
$objVO->setNomeUnidade($conteudo["nomeUnidade"]);
$objVO->setIdEstado($conteudo["idEstado"]);
$objVO->setAtivo($conteudo["ativo"]);
$retorno = clone $objVO;
}
return $retorno;
}
}
//fim da classe
?>
<file_sep>/javascript/funcoes.js
function msg(parametro) {
if (parametro == 1) {
alert("Dados Cadastrados com Sucesso!!!");
} else if (parametro == 2) {
alert("Dados Atualizados com Sucesso!!!");
}
}
<file_sep>/pesquisaAtendimento.php
<?php
include_once './sessao.php';
//Conexao
include_once './Conexao/conexao.php';
//VO
include_once './VO/atendimentoCompletoVO.class.php';
include_once './VO/atendimentoVO.class.php';
include_once './VO/usuarioVO.class.php';
include_once './VO/funcionarioVO.class.php';
include_once './VO/tipoAtendimentoVO.class.php';
include_once './VO/statusVO.class.php';
include_once './VO/usuarioVO.class.php';
include_once './VO/clienteVO.class.php';
//DAO
include_once './DAO/atendimentoCompletoDAO.class.php';
include_once './DAO/atendimentoDAO.class.php';
include_once './DAO/usuarioDAO.class.php';
include_once './DAO/funcionarioDAO.class.php';
include_once './DAO/tipoAtendimentoDAO.class.php';
include_once './DAO/statusDAO.class.php';
include_once './DAO/usuarioDAO.class.php';
include_once './DAO/clienteDAO.class.php';
if (isset($_REQUEST["idAtendimentoCompleto"]) && $_REQUEST["idAtendimentoCompleto"] != "") {
header("Location: atendimentoCompleto.php");
}
?>
<html>
<head>
<meta charset="UTF-8">
<title></title>
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<script language="javascript" type="text/javascript" src="media/js/jquery.js"></script>
<script type="text/javascript" language="javascript" src="media/js/jquery.dataTables.js"></script>
<script language="javascript" type="text/javascript" src="media/js/jquery.validate.js"></script>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
</head><br><br><br><br>
<body>
<form action="#" method="get">
<br><br>
<div style="width: 1050px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Data</th>
<th>Atendimento</th>
<th>Cliente</th>
<th>Descrição</th>
<th>Status</th>
<th>Editar</th>
</tr>
</thead>
<?php
$atendimentoCompletoDAO = new atendimentoCompletoDAO();
$listaAtendimentosCompl = $atendimentoCompletoDAO->getAll();
$atendimentoDAO = new atendimentoDAO();
$listaAtendimentos = $atendimentoDAO->getAll();
$usuarioDAO = new usuarioDAO();
$listaUsuarios = $usuarioDAO->getAll();
$tipoAtendimentoDAO = new tipoAtendimentoDAO();
$listaTiposAtend = $tipoAtendimentoDAO->getAll();
$clienteDAO = new clienteDAO();
$listaClientes = $clienteDAO->getAll(1);
$statusDAO = new statusDAO();
$listaStatus = $statusDAO->getAll();
for ($i = 0; $i < sizeof($listaAtendimentos); $i++) {
$dadosAtenCompl = $listaAtendimentosCompl[$i];
$dadosAtendimentos = $listaAtendimentos[$i];
$dataExibe = implode("/", array_reverse(explode("-", $dadosAtendimentos->getData())));
for ($q = 0; $q < sizeof($listaTiposAtend); $q++) {
$dadosTipoAtend = $listaTiposAtend[$q];
if ($dadosTipoAtend->getIdTipoAtendimento() == $dadosAtendimentos->getIdTipoAtendimento()) {
$nomeTipoAtend = $dadosTipoAtend->getTipo();
}
}
for ($z = 0; $z < sizeof($listaClientes); $z++) {
$dadosCliente = $listaClientes[$z];
if ($dadosCliente->getIdCliente() == $dadosAtenCompl->getIdCliente()) {
$nomeCliente = $dadosCliente->getNomeCliente();
}
}
for ($x = 0; $x < sizeof($listaStatus); $x++) {
$dadosStatus = $listaStatus[$x];
if ($dadosStatus->getIdStatus() == $dadosAtenCompl->getIdStatus()) {
$nomeStatus = $dadosStatus->getStatus();
}
}
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosAtenCompl->getIdAtendimentoCompleto() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dataExibe . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeTipoAtend . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeCliente . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosAtenCompl->getDescricao() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeStatus . "</td>
<td align=\"center\"><a href=\"?red=iniciar&par=" . $dadosAtenCompl->getIdAtendimentoCompleto() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}//FECHA FOR ATENDIMENTO
?>
</table></div></div>
</body>
</form>
</html>
<file_sep>/funcionarios.php
<?php
include_once './sessao.php';
include_once './Conexao/conexao.php';
include_once './VO/funcionarioVO.class.php';
include_once './VO/unidadeVO.class.php';
include_once './VO/computadorVO.class.php';
include_once './VO/departamentoFuncVO.class.php';
include_once './DAO/funcionarioDAO.class.php';
include_once './DAO/unidadeDAO.class.php';
include_once './DAO/computadorDAO.class.php';
include_once './DAO/departamentoFuncDAO.class.php';
$funcionarioVO = new funcionarioVO();
$unidadeVO = new unidadeVO();
$computadorVO = new computadorVO();
//recupera o parametro do id
$idFun = $_GET["par"];
if ($idFun == 0) {
//novo cadastro de funcionario
} else {
//buscar o funcionario para atualizar
$funcionarioDAO = new funcionarioDAO();
$funcionarioVO = $funcionarioDAO->getById($idFun);
}
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Salvar") {
//seta os campos
$funcionarioVO->setNomeFuncionario($_REQUEST["nomeFuncionario"]);
$funcionarioVO->setEmail($_REQUEST["email"]);
$funcionarioVO->setIdUnidade($_REQUEST["idUnidade"]);
$funcionarioVO->setTelefone1($_REQUEST["telefone1"]);
$funcionarioVO->setTelefone2($_REQUEST["telefone2"]);
$funcionarioVO->setSkype($_REQUEST["skype"]);
$funcionarioVO->setAtivo($_REQUEST["ativo"]);
$funcionarioVO->setIdDepartamento($_REQUEST["idDepartamento"]);
//verifica o id
if (isset($_REQUEST["idFuncionario"]) && $_REQUEST["idFuncionario"] == "") {
//novo cadastro
$funcionarioDAO = new funcionarioDAO();
$funcionarioDAO->insert($funcionarioVO);
echo "<script>msg(1)</script>";
$funcionarioVO = new funcionarioVO();
} else {
//altera cadastro
$funcionarioDAO = new funcionarioDAO();
$funcionarioDAO->update($funcionarioVO);
echo "<script>msg(2)</script>";
$funcionarioVO = new funcionarioVO();
}
}//fim do salvar
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Cancelar") {
header("Location: site.php");
exit;
}
?>
<html>
<head><br><br><br><br>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<script type="text/javascript" language="javascript" src="media/js/jquery.min.js"></script>
<script type="text/javascript" src="media/js/jquery.meio.mask.js" charset="utf-8"></script>
<script type="text/javascript">
jQuery(function ($) {
$('input[type="text"]').setMask();
});
</script>
<title></title>
</head>
<form action="" method="post" id="funcionarios">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!--Titulo-->
<tr>
<td colspan="4" align="center" class="titulo">Cadastro dos Funcionários</td>
</tr>
<!--Codigo-->
<tr>
<td width="150" align="right">Código:</td>
<td><input type="text" name="idFuncionario" id="idFuncionario" value="<?php echo $funcionarioVO->getIdFuncionario(); ?>" size="11" maxlength="11" class="readonly" readonly></td>
<!--Ativo -->
<td width="50" align="right">Ativo:</td>
<td>
<select name="ativo">
<?php
if ($funcionarioVO->getAtivo() == 1) {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"2\">Não</option>";
} else if ($funcionarioVO->getAtivo() == 2) {
echo "<option value=\"1\">Sim</option>";
echo "<option value=\"2\" selected>Não</option>";
} else {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"2\">Não</option>";
}
?>
</select>
</td>
</tr>
<!--Nome -->
<tr>
<td width="150" align="right">Nome:</td>
<td colspan="4"><input type="text" required name="nomeFuncionario" id="nomeFucionario" value="<?php echo $funcionarioVO->getNomeFuncionario(); ?>" size="50" maxlength="50"></td>
</tr>
<!--email -->
<tr>
<td width="150" align="right">Email:</td>
<td colspan="4"><input type="text" required name="email" id="email" value="<?php echo $funcionarioVO->getEmail(); ?>" size="50" maxlength="50"></td>
</tr>
<!--Unidade -->
<tr>
<td width="150" align="right">Unidade:</td>
<td colspan="4">
<select name="idUnidade">
<?php
if ($funcionarioVO->getIdUnidade() != "") {
$idUnidade = $funcionarioVO->getIdUnidade();
} else {
$idUnidade = $unidadeVO->getIdUnidade();
}
$sql = "SELECT * FROM unidade";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
if ($idUnidade == $conteudo["idUnidade"]) {
echo "<option value=\"$conteudo[idUnidade]\" selected>$conteudo[nomeUnidade]</option>";
} else {
echo "<option value=\"$conteudo[idUnidade]\">$conteudo[nomeUnidade]</option>";
}
}
?>
</select>
</td>
</tr>
<!-- telefones -->
<tr>
<td width="150" align="right">Telefones:</td>
<td colspan="4">
<input type="text" required alt="phone" name="telefone1" id="telefone1" placeholder="(__) ____-____" value="<?php echo $funcionarioVO->getTelefone1(); ?>" size="15" maxlength="11">
<input type="text" alt="phone" name="telefone2" id="telefone2" placeholder="(__) ____-____" value="<?php echo $funcionarioVO->getTelefone2(); ?>" size="15" maxlength="11">
</td>
</tr>
<!-- skype-->
<tr>
<td width="150" align="right">Skype:</td>
<td colspan="4">
<input type="text" name="skype" id="skype" value="<?php echo $funcionarioVO->getSkype(); ?>" size="50" maxlength="50">
</td>
</tr>
<!--idDepartamento -->
<tr>
<td width="150" align="right">Departamento:</td>
<td colspan="2">
<select name="idDepartamento">
<?php
$sql = "SELECT * FROM departamento_func";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idDepartamento = $funcionarioVO->getIdDepartamento();
if ($idDepartamento == $conteudo["idDepartamento"]) {
echo "<option value=\"$conteudo[idDepartamento]\" selected>$conteudo[nomeDepartamento]</option>";
} else {
echo "<option value=\"$conteudo[idDepartamento]\">$conteudo[nomeDepartamento]</option>";
}
}
?>
</select>
</td>
</tr>
<!--Botao salvar e cancelar-->
<tr>
<td colspan="4" align="center">
<input type="submit" name="submit" value="Salvar">
</td>
</tr>
</table>
</form>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
<div style="width: 1150px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Nome</th>
<th>Email</th>
<th>Unidade</th>
<th>Telefone</th>
<th>Depart.</th>
<th>Skype</th>
<th>Ativo</th>
<th>Editar</th>
</tr>
</thead>
<?php
$funcionarioDAO = new funcionarioDAO();
$listaFuncionarios = $funcionarioDAO->getAll();
$unidadeDAO = new unidadeDAO();
$listaUnidades = $unidadeDAO->getAll();
$departamentoFuncDAO = new departamentoFuncDAO();
$listaDepFunc = $departamentoFuncDAO->getAll();
for ($i = 0; $i < sizeof($listaFuncionarios); $i++) {
$dadosFun = $listaFuncionarios[$i];
//for para unidade
for ($z = 0; $z < sizeof($listaUnidades); $z++) {
$dadosUnid = $listaUnidades[$z];
if ($dadosUnid->getIdUnidade() == $dadosFun->getIdUnidade()) {
$nomeUnid = $dadosUnid->getNomeUnidade();
}
}
//for para departamento
for ($w = 0; $w < sizeof($listaDepFunc); $w++) {
$dadosDepFunc = $listaDepFunc[$w];
if ($dadosDepFunc->getIdDepartamento() == $dadosFun->getIdDepartamento()) {
$nomeDep = $dadosDepFunc->getNomeDepartamento();
}
}
$ativo = $dadosFun->getAtivo();
if ($ativo == 1) {
$nomeAtivo = "Sim";
} elseif ($ativo == 2) {
$nomeAtivo = "Não";
}
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosFun->getIdFuncionario() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosFun->getNomeFuncionario() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosFun->getEmail() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeUnid . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosFun->getTelefone1() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeDep . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosFun->getSkype() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeAtivo . "</td>
<td align=\"center\"><a href=\"?red=cadastroFuncionarios&par=" . $dadosFun->getIdFuncionario() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}
?>
</table></div></div>
</body>
</html>
<file_sep>/DAO/clienteDAO.class.php
<?php
class clienteDAO {
public function getAll($par) {
$objVO = new clienteVO();
$retorno = array();
if($par == 1){//para exibir todos
$sql = "SELECT * FROM cliente";
}elseif ($par == 2) {//usa o 2 pq quer somente os clientes, lista de download
$sql = "SELECT * FROM cliente WHERE apagri = 2 AND ativo = 1";
}
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdCliente($conteudo["idCliente"]);
$objVO->setNomeCliente($conteudo["nomeCliente"]);
$objVO->setEmail($conteudo["email"]);
$objVO->setContato($conteudo["contato"]);
$objVO->setTelefone($conteudo["telefone"]);
$objVO->setCelular($conteudo["celular"]);
$objVO->setEndereco($conteudo["endereco"]);
$objVO->setCidade($conteudo["cidade"]);
$objVO->setIdEstado($conteudo["idEstado"]);
$objVO->setCep($conteudo["cep"]);
$objVO->setIdTipoCliente($conteudo["idTipoCliente"]);
$objVO->setAtivo($conteudo["ativo"]);
$objVO->setData($conteudo["data"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
public function getById($idCli) {
$objVO = new clienteVO();
$sql = sprintf('SELECT * FROM cliente WHERE idCliente = "%s"', $idCli);
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdCliente($conteudo["idCliente"]);
$objVO->setNomeCliente($conteudo["nomeCliente"]);
$objVO->setEmail($conteudo["email"]);
$objVO->setContato($conteudo["contato"]);
$objVO->setTelefone($conteudo["telefone"]);
$objVO->setCelular($conteudo["celular"]);
$objVO->setEndereco($conteudo["endereco"]);
$objVO->setCidade($conteudo["cidade"]);
$objVO->setIdEstado($conteudo["idEstado"]);
$objVO->setCep($conteudo["cep"]);
$objVO->setIdTipoCliente($conteudo["idTipoCliente"]);
$objVO->setAtivo($conteudo["ativo"]);
$objVO->setData($conteudo["data"]);
$objVO->setApagri($conteudo["apagri"]);
$retorno = clone $objVO;
}
return $retorno;
}
public function insert(clienteVO $objVO) {
$sql = sprintf("INSERT INTO cliente (nomeCliente, email, contato, telefone, celular, endereco, cidade, idEstado, cep, idTipoCliente, ativo, data, apagri)
VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')",
$objVO->getNomeCliente(),
$objVO->getEmail(),
$objVO->getContato(),
$objVO->getTelefone(),
$objVO->getCelular(),
$objVO->getEndereco(),
$objVO->getCidade(),
$objVO->getIdEstado(),
$objVO->getCep(),
$objVO->getIdTipoCliente(),
$objVO->getAtivo(),
$objVO->getData(),
$objVO->getApagri()
);
mysql_query($sql);
$objVO->setIdCliente(mysql_insert_id());
return $objVO;
}
public function update(clienteVO $objVO) {
$sql = sprintf("UPDATE cliente SET nomeCliente = '%s', email = '%s', contato = '%s', telefone = '%s', celular = '%s', endereco= '%s', cidade= '%s',
idEstado= '%s', cep= '%s', idTipoCliente = '%s', ativo = '%s', data = '%s', apagri = '%s' WHERE idCliente = '%s'",
$objVO->getNomeCliente(),
$objVO->getEmail(),
$objVO->getContato(),
$objVO->getTelefone(),
$objVO->getCelular(),
$objVO->getEndereco(),
$objVO->getCidade(),
$objVO->getIdEstado(),
$objVO->getCep(),
$objVO->getIdTipoCliente(),
$objVO->getAtivo(),
$objVO->getData(),
$objVO->getApagri(),
$objVO->getIdCliente()
);
mysql_query($sql);
}
}//fim da classe
<file_sep>/pdfFunc.php
<?php
include_once './sessao.php';
ini_set('default_charset', 'UTF-8');
include_once './Conexao/conexao.php';
$sql = "SELECT f.nomeFuncionario, f.email, u.nomeUnidade, f.telefone1, f.telefone2, f.skype FROM funcionario AS f
INNER JOIN unidade AS u ON u.idUnidade = f.idUnidade
WHERE f.ativo = 1 ORDER BY f.nomeFuncionario ASC";
$rs = mysql_query($sql);
//pdf
include_once("fpdf13/fpdf.php");
define('FPDF_FONTPATH', './fpdf13/font/');
//Novo documento PDF com orientação P - Retrato (Picture) que pode ser também L - Paisagem (Landscape)
$pdf = new FPDF('L', 'cm', 'A4');
$pdf->Open();
$pdf->AddPage();
$pdf->Image('img/apagri.jpg', 1.5, 1, 2.9, 1.2);
$pdf->SetFont('arial', 'B', 18);
$tituloRel = utf8_decode("Lista de Funcionários");
$pdf->Cell(19, 1.5, $tituloRel, "B", 1, 'C');
$pdf->SetFont('arial', '', 10); // Definindo Fonte
$pdf->SetMargins(0, 0);
$pdf->SetXY(1.5, 3);
//Cabeçalho
$tituloCod = utf8_decode("Código");
$pdf->Cell(6, 1, "Nome", 1, 0, 'C'); //largura, altura, titulo, borda 1-sim 0-não, 0, alinhamento
$pdf->Cell(6, 1, "Email", 1, 0, 'C');
$pdf->Cell(4, 1, "Unidade", 1, 0, 'C');
$pdf->Cell(3.1, 1, "Telefone 1", 1, 0, 'C');
$pdf->Cell(3.1, 1, "Telefone 2", 1, 0, 'C');
$pdf->Cell(4.4, 1, "Skype", 1, 0, 'C');
$controle = 4;//controle da linha onde sera plot a info
$linha = 0;//controla a qtde de linhas q esta plot na pagina
$qtdeLinhaPag = 15;//qtde de linhas p pagina
while ($conteudo = mysql_fetch_array($rs)) {
$nome = utf8_decode($conteudo["nomeFuncionario"]);
$email = utf8_decode($conteudo["email"]);
$unidade = utf8_decode($conteudo["nomeUnidade"]);
$telefone1 = utf8_decode($conteudo["telefone1"]);
$telefone2 = utf8_decode($conteudo["telefone2"]);
$skype = utf8_decode($conteudo["skype"]);
if ($linha < $qtdeLinhaPag) {
$pdf->SetXY(1.5, $controle); //seta o eixo x e y, variavel controle vai incrementando
$pdf->Cell(6, 1, $nome, 1, 0, 'C');
$pdf->Cell(6, 1, $email, 1, 0, 'C');
$pdf->Cell(4, 1, $unidade, 1, 0, 'C');
$pdf->Cell(3.1, 1, $telefone1, 1, 0, 'C');
$pdf->Cell(3.1, 1, $telefone2, 1, 0, 'C');
$pdf->Cell(4.4, 1, $skype, 1, 0, 'C');
$controle++;
$linha++;
} else {
$controle = 4;
$linha = 1;
$pdf->AddPage();
$pdf->Image('img/apagri.jpg', 1, 1, 2.9, 1.2);
$pdf->SetFont('arial', 'B', 18);
$pdf->Cell(21, 3.5, $tituloRel, "B", 1, 'C');
$pdf->SetFont('arial', '', 10); // Definindo Fonte
$pdf->SetMargins(0, 0);
$pdf->SetXY(1.5, 3);
//Cabeçalho
$tituloCod = utf8_decode("Código");
$pdf->Cell(6, 1, "Nome", 1, 0, 'C'); //largura, altura, titulo, borda 1-sim 0-não, 0, alinhamento
$pdf->Cell(6, 1, "Email", 1, 0, 'C');
$pdf->Cell(4, 1, "Unidade", 1, 0, 'C');
$pdf->Cell(3.1, 1, "Telefone 1", 1, 0, 'C');
$pdf->Cell(3.1, 1, "Telefone 2", 1, 0, 'C');
$pdf->Cell(4.4, 1, "Skype", 1, 0, 'C');
$pdf->SetXY(1.5, $controle); //seta o eixo x e y, variavel controle vai incrementando
$pdf->Cell(6, 1, $nome, 1, 0, 'C');
$pdf->Cell(6, 1, $email, 1, 0, 'C');
$pdf->Cell(4, 1, $unidade, 1, 0, 'C');
$pdf->Cell(3.1, 1, $telefone1, 1, 0, 'C');
$pdf->Cell(3.1, 1, $telefone2, 1, 0, 'C');
$pdf->Cell(4.4, 1, $skype, 1, 0, 'C');
$controle++;
}
}
$pdf->Output("contatos_funcionarios.pdf", "D");
?>
<file_sep>/DAO/tipoClienteDAO.class.php
<?php
class tipoClienteDAO{
public function getAll(){
$objVO = new tipoClienteVO();
$retorno = array();
$sql = "SELECT * FROM tipo_cliente";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdTipoCliente($conteudo["idTipoCliente"]);
$objVO->setNomeTipoCliente($conteudo["nomeTipoCliente"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}
?>
<file_sep>/downloadListaDadosAcesso.php
<?php
include_once './Conexao/conexao.php';
include_once './VO/loginClienteVO.class.php';
include_once './VO/clienteVO.class.php';
include_once './DAO/loginClienteDAO.class.php';
include_once './DAO/clienteDAO.class.php';
//cria o arquivo
$arquivo = "./img/relatorios/listaAcesso.txt";
//verifica se existe e sobrescreve
if (file_exists($arquivo)) {
unlink($arquivo);
}
//abre o arquivo
$abreArquivo = fopen($arquivo, "w");
if ($abreArquivo == FALSE) {
echo "Erro ao abrir o arquivo!!!";
exit;
}
$loginClienteVO = new loginClienteVO();
$loginClienteDAO = new loginClienteDAO();
$listaDados = $loginClienteDAO->getAll();
//para colocar o nome do cliente ao inves do codigo
$clienteVO = new clienteVO();
$clienteDAO = new clienteDAO();
$listaClientes = $clienteDAO->getAll(2);
//for para popular
for ($i = 0; $i < sizeof($listaDados); $i++) {
$listaDadosAcesso = $listaDados[$i];
for ($z = 0; $z < sizeof($listaClientes); $z++) {
$dadosClientes = $listaClientes[$z];
if ($dadosClientes->getIdCliente() == $listaDadosAcesso->getIdCliente()) {
$nomeCliente = $dadosClientes->getNomeCliente();
}
}
$usuario = $listaDadosAcesso->getLogin();
$senha = $listaDadosAcesso->getSenha();
$conteudo = "CLIENTE: $nomeCliente; USUARIO: $usuario; SENHA: $senha;\n";
$grava = fwrite($abreArquivo, $conteudo);
}
fclose($abreArquivo);
$aquivoNome = 'lista_dados_acesso.txt'; // nome do arquivo que será enviado p/ download
$arquivoLocal = './img/relatorios' . $aquivoNome; // caminho absoluto do arquivo
//download do arquivo
if (file_exists($arquivo)) {
header('Content-Description: File Transfer');
header('Content-Type: application/octet-stream');
header('Content-Disposition: attachment; filename=' . basename($arquivo));
header('Expires: 0');
header('Cache-Control: must-revalidate');
header('Pragma: public');
header('Content-Length: ' . filesize($arquivo));
readfile($arquivo);
exit;
}
<file_sep>/DAO/atendimentoCompletoDAO.class.php
<?php
class atendimentoCompletoDAO{
//insere os dados no banco na tabela atendimento_completo
public function insert (atendimentoCompletoVO $objVO){
$sql = sprintf("INSERT INTO atendimento_completo (idAtendimentoTempo, idCliente, nomeFuncCliente, idCategoria, idSubCategoria, descricao, solucao, observacao, idStatus) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s')",
$objVO->getIdAtendimentoTempo(),
$objVO->getIdCliente(),
$objVO->getNomeFuncCliente(),
$objVO->getIdCategoria(),
$objVO->getIdSubCategoria(),
$objVO->getDescricao(),
$objVO->getSolucao(),
$objVO->getObservacao(),
$objVO->getIdStatus()
);
mysql_query($sql);//echo $sql."<br>";
$objVO->setIdAtendimentoCompleto(mysql_insert_id());
return $objVO;
}
//-----------------------------------------------------------------
//retorna os dados para popular o dataTable
public function getAll(){
$objVO = new atendimentoCompletoVO();
$retorno = array();
$sql = "SELECT * FROM atendimento_completo";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdAtendimentoCompleto($conteudo["idAtendimentoCompleto"]);
$objVO->setIdAtendimentoTempo($conteudo["idAtendimentoTempo"]);
$objVO->setIdCliente($conteudo["idCliente"]);
$objVO->setNomeFuncCliente($conteudo["nomeFuncCliente"]);
$objVO->setIdCategoria($conteudo["idCategoria"]);
$objVO->setIdSubCategoria($conteudo["idSubCategoria"]);
$objVO->setDescricao($conteudo["descricao"]);
$objVO->setSolucao($conteudo["solucao"]);
$objVO->setObservacao($conteudo["observacao"]);
$objVO->setIdStatus($conteudo["idStatus"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
//-------------------------------------------------------------------------
//seta os campos para atualizar conforme é passado o id
public function getById( $id ){
$objVO = new atendimentoCompletoVO();
$sql = sprintf( 'SELECT * FROM atendimento_completo WHERE idAtendimentoCompleto = "%s"',
$id
);
$rs = mysql_query( $sql );
while ( $conteudo = mysql_fetch_array( $rs ) ) {
$objVO->setIdAtendimentoCompleto($conteudo["idAtendimentoCompleto"]);
$objVO->setIdAtendimentoTempo($conteudo["idAtendimentoTempo"]);
$objVO->setIdCliente($conteudo["idCliente"]);
$objVO->setNomeFuncCliente($conteudo["nomeFuncCliente"]);
$objVO->setIdCategoria($conteudo["idCategoria"]);
$objVO->setIdSubCategoria($conteudo["idSubCategoria"]);
$objVO->setDescricao($conteudo["descricao"]);
$objVO->setSolucao($conteudo["solucao"]);
$objVO->setObservacao($conteudo["observacao"]);
$objVO->setIdStatus($conteudo["idStatus"]);
$return = clone $objVO;
}
return $return;
}
//------------------------------------------------------------------
//atualiza os dados
public function update(atendimentoCompletoVO $objVO){
$sql = sprintf("UPDATE atendimento_completo SET idAtendimentoTempo = '%s', idCliente = '%s', nomeFuncCliente = '%s', idCategoria = '%s', idSubCategoria = '%s', descricao = '%s', solucao = '%s', observacao = '%s', idStatus = '%s' WHERE idAtendimentoCompleto = '%s'",
$objVO->getIdAtendimentoTempo(),
$objVO->getIdCliente(),
$objVO->getNomeFuncCliente(),
$objVO->getIdCategoria(),
$objVO->getIdSubCategoria(),
$objVO->getDescricao(),
$objVO->getSolucao(),
$objVO->getObservacao(),
$objVO->getIdStatus(),
$objVO->getIdAtendimentoCompleto()
);
mysql_query($sql);
}
//------------------------------------------------------------------
}//fim da classe
?>
<file_sep>/VO/tipoClienteVO.class.php
<?php
class tipoClienteVO{
private $idTipoCliente = null;
private $nomeTipoCliente = null;
//idTipoCliente
public function getIdTipoCliente(){
return $this->idTipoCliente;
}
public function setIdTipoCliente($idTipoCliente){
$this->idTipoCliente = $idTipoCliente;
}
//nomeTipoCliente
public function getNomeTipoCliente(){
return $this->nomeTipoCliente;
}
public function setNomeTipoCliente($nomeTipoCliente){
$this->nomeTipoCliente = $nomeTipoCliente;
}
}
?>
<file_sep>/verifica.php
<?php
session_start();
include_once './Conexao/conexao.php';
$login = $_POST["usuario"];
$pass = $_POST["<PASSWORD>"];
// Verifica se usuario e senha estão vazio
if ($login == "" || $pass == "") {
//header("Location: index.php");
echo "<a href=\"logout.php\"><h2>usuario ou senha em branco</h2></a>";
} else {
$sql = "SELECT * FROM usuario WHERE usuario = '$login' AND senha = '$pass'";
$rs = mysql_query($sql);
while($dados = mysql_fetch_array($rs)){
$usuario = $dados["usuario"];
$senha = $dados["senha"];
}
$totalLinhas = mysql_num_rows($rs);
if ($totalLinhas == 1) {
$dados = mysql_fetch_array($rs);
$_SESSION["usuario"] = $usuario;
header("Location: site.php");
} else {
echo "<a href=\"logout.php\"><h2>Usuario ou senha invalidos</h2></a>";
//header("Location: index.php");
}
}
?><file_sep>/funcoes.php
<?php
function calcularMinutos($horaInicial, $horaFinal) {
$separarHoraInicial = explode(":", $horaInicial);
$horaI = $separarHoraInicial[0];
$minutoI = $separarHoraInicial[1];
$separarHoraFinal = explode(":", $horaFinal);
$horaF = $separarHoraFinal[0];
$minutoF = $separarHoraFinal[1];
$totalMinutos = (($horaF * 60) + $minutoF) - (($horaI * 60) + $minutoI);
return $totalMinutos;
}
function formataNumero($numero) {
$numeroFormat = number_format($numero, 2);
return $numeroFormat;
}
function retira_acentos($texto) {
$carac = array(
'á' => 'a',
'à' => 'a',
'ã' => 'a',
'â' => 'a',
'é' => 'e',
'ê' => 'e',
'í' => 'i',
'ó' => 'o',
'ô' => 'o',
'õ' => 'o',
'ú' => 'u',
'ü' => 'u',
'ç' => 'c',
'Á' => 'A',
'À' => 'A',
'Ã' => 'A',
'Â' => 'A',
'É' => 'E',
'Ê' => 'E',
'Í' => 'I',
'Ó' => 'O',
'Ô' => 'O',
'Õ' => 'O',
'Ú' => 'U',
'Ü' => 'U',
'Ç' => 'C'
);
return strtr($texto, $carac);
}
<file_sep>/VO/logAcessoVO.class.php
<?php
class logAcessoVO{
//atributos
private $idLogAcesso = null;
private $idUsuario = null;
private $horarioLogin = null;
private $data = null;
//construtor
public function logAcessoVO(){
}
//get e set
//idLogAcesso
public function getIdLogAcesso(){
return $this->idLogAcesso;
}
public function setIdLogAcesso($idLogAcesso){
$this->idLogAcesso = $idLogAcesso;
}
//idUsuario
public function getIdUsuario(){
return $this->idUsuario;
}
public function setIdUsuario($idUsuario){
$this->idUsuario = $idUsuario;
}
//horarioLogin
public function getHorarioLogin(){
return $this->horarioLogin;
}
public function setHorarioLogin($horarioLogin){
$this->horarioLogin = $horarioLogin;
}
//data
public function getData(){
return $this->data;;
}
public function setData($data){
$this->data = $data;
}
}
?>
<file_sep>/VO/devolucaoDocVO.class.php
<?php
class devolucaoDocVO {
private $idDevolucaoDoc = null;
private $idDocumento = null;
private $observacoes = null;
private $data = null;
//idDevolucaoDoc
public function getIdDevolucaoDoc() {
return $this->idDevolucaoDoc;
}
public function setIdDevolucaoDoc($idDevolucaoDoc) {
$this->idDevolucaoDoc = $idDevolucaoDoc;
}
//idDocumento
public function getIdDocumento() {
return $this->idDocumento;
}
public function setIdDocumento($idDocumento) {
$this->idDocumento = $idDocumento;
}
//observacoes
public function getObservacoes() {
return $this->observacoes;
}
public function setObservacoes($observacoes) {
$this->observacoes = $observacoes;
}
//data
public function getData() {
return $this->data;
}
public function setData($data){
$this->data = $data;
}
}
<file_sep>/relatorioDetalhadoAtendimento.php
<?php
include_once './Conexao/conexao.php';
include_once './VO/tipoAtendimentoVO.class.php';
include_once './funcoes.php';
?>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title></title>
<link rel="stylesheet" type="text/css" href="css/fontes.css">
</head>
<body><br><br><br><br>
<form action="pdfDetalhadoAtendimento.php" method="post">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<tr>
<td align="center" class="titulo" colspan="2">Relatório Detalhado Atendimentos</td>
</tr>
<tr>
<td align="right">Data Inicial:</td>
<td><input type="date" required name="dataInicial" id="dataInicial"></td>
</tr>
<tr>
<td align="right">Data Final:</td>
<td><input type="date" required name="dataFinal" id="dataFinal"></td>
</tr>
<tr>
<td align="right">Atendimento:</td>
<td>
<select name="idAtendimento">
<?php
$tipoAtendimentoVO = new tipoAtendimentoVO();
$sql = "SELECT * FROM tipo_atendimento";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idTipoAtendimento = $tipoAtendimentoVO->getIdTipoAtendimento();
if ($idTipoAtendimento == $conteudo["idTipoAtendimento"]) {
echo "<option value=\"$conteudo[idTipoAtendimento]\" selected>$conteudo[tipo]</option>";
} else {
echo "<option value=\"$conteudo[idTipoAtendimento]\">$conteudo[tipo]</option>";
}
}
?>
</select>
</td>
</tr>
<tr>
<td align="center" colspan="2">
<input type="submit" name="submit" value="Gerar Relatório">
</td>
</tr>
</table>
</form>
</body>
</html>
<file_sep>/unidades.php
<?php
include_once './sessao.php';
//Conexao
include_once './Conexao/conexao.php';
//VO
include_once './VO/unidadeVO.class.php';
include_once './VO/estadoVO.class.php';
include_once './VO/funcionarioVO.class.php';
//DAO
include_once './DAO/unidadeDAO.class.php';
include_once './DAO/estadoDAO.class.php';
include_once './DAO/funcionarioDAO.class.php';
$unidadeVO = new unidadeVO();
$estadoVO = new estadoVO();
$funcionarioVO = new funcionarioVO();
//buscando os dados quando for editar
if (isset($_REQUEST["par"]) && $_REQUEST["par"] != "") {
$unidadeDAO = new unidadeDAO();
$unidadeVO = $unidadeDAO->getById($_REQUEST["par"]);
}
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
//tratando o botao Salvar
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Salvar") {
//setando os campos
$unidadeVO->setNomeUnidade($_REQUEST["nomeUnidade"]);
$unidadeVO->setIdEstado($_REQUEST["idEstado"]);
$unidadeVO->setAtivo($_REQUEST["ativo"]);
//verificar se existe id
if (isset($_REQUEST["idUnidade"]) && $_REQUEST["idUnidade"] == "") {
//cadastrar nova unidade
$unidadeDAO = new unidadeDAO();
$unidadeDAO->insert($unidadeVO);
echo "<script>msg(1)</script>";
$unidadeVO = new unidadeVO();
} else {
//atualizar
$unidadeVO->setIdUnidade($_REQUEST["idUnidade"]);
$unidadeDAO->update($unidadeVO);
echo "<script>msg(2)</script>";
$unidadeVO = new unidadeVO();
}
}//fim do salvar
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Cancelar") {
header("Location: site.php");
exit;
}
?>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<script language="javascript" type="text/javascript" src="media/js/jquery.js"></script>
<script language="javascript" type="text/javascript" src="media/js/jquery.validate.js"></script>
<title></title>
</head><br><br><br><br>
<form action="" method="post" id="unidades">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!--Titulo-->
<tr>
<td colspan="4" align="center" class="titulo">Cadastro das Unidades APagri</td>
</tr>
<!--Codigo-->
<tr>
<td width="150" align="right">Código:</td>
<td><input type="text" name="idUnidade" id="idUnidade" size="11" maxlength="11" value="<?php echo $unidadeVO->getIdUnidade(); ?>" class="readonly" readonly></td>
<!--Ativo -->
<td width="150" align="right">Ativo:</td>
<td>
<select name="ativo">
<?php
if ($unidadeVO->getAtivo() == 1) {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"nao\">Não</option>";
} elseif ($unidadeVO->getAtivo() == 2) {
echo "<option value=\"1\">Sim</option>";
echo "<option value=\"nao\" selected>Não</option>";
} else {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"nao\">Não</option>";
}
?>
</select>
</td>
</tr>
<!--Nome -->
<tr>
<td width="150" align="right">Nome:</td>
<td colspan="4"><input type="text" required name="nomeUnidade" id="nomeUnidade" size="50" maxlength="50" value="<?php echo $unidadeVO->getNomeUnidade(); ?>"></td>
</tr>
<!--estado -->
<tr>
<td width="150" align="right">Estado:</td>
<td colspan="4">
<select name ="idEstado">
<?php
$sql = "SELECT * FROM estado";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
//recupera o id
$idEstado = $unidadeVO->getIdEstado();
if ($idEstado == $conteudo["idEstado"]) {
echo "<option value=\"$conteudo[idEstado]\" selected>$conteudo[estado]</option>";
} else {
echo "<option value=\"$conteudo[idEstado]\">$conteudo[estado]</option>";
}
}
?>
</select>
</td>
</tr>
<!--Botao salvar e cancelar-->
<tr>
<td align="center" colspan="4">
<input type="submit" name="submit" value="Salvar">
</td>
</tr>
</table>
</form>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
<div style="width: 900px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Nome</th>
<th>Estado</th>
<th>Ativo</th>
<th>Editar</th>
</tr>
</thead>
<?php
$unidadeDAO = new unidadeDAO();
$listaDeUnidade = $unidadeDAO->getAll();
$estadoDAO = new estadoDAO();
$listaDeEstado = $estadoDAO->getAll();
$funcionarioDAO = new funcionarioDAO();
$listaDeFunc = $funcionarioDAO->getAll();
for ($i = 0; $i < sizeof($listaDeUnidade); $i++) {
$dadosUnidades = $listaDeUnidade[$i];
//for para estado
for ($z = 0; $z < sizeof($listaDeEstado); $z++) {
$dadosEstado = $listaDeEstado[$z];
if ($dadosEstado->getIdEstado() == $dadosUnidades->getIdEstado()) {
$nomeEstado = $dadosEstado->getEstado();
}
}
$ativo = $dadosUnidades->getAtivo();
if ($ativo == 1) {
$nomeAtivo = "Sim";
} elseif ($ativo == 2) {
$nomeAtivo = "Não";
}
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosUnidades->getIdUnidade() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosUnidades->getNomeUnidade() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeEstado . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeAtivo . "</td>
<td align=\"center\"><a href=\"?red=cadastroUnidades&par=" . $dadosUnidades->getIdUnidade() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}
?>
</table></div></div>
</body>
</html>
<file_sep>/DAO/atendimentoTempoDAO.class.php
<?php
class atendimentoTempoDAO{
//inserir
public function insert(atendimentoTempoVO $objVO){
$sql = sprintf("INSERT INTO atendimento_tempo (idAtendimento, idTempo) VALUES ('%s','%s')",
$objVO->getIdAtendimento(),
$objVO->getIdTempo()
);
mysql_query($sql);//echo $sql."<br>";
$objVO->setIdAtendimentoTempo(mysql_insert_id());
return $objVO;
}
}
?>
<file_sep>/DAO/funcionarioDAO.class.php
<?php
class funcionarioDAO{
public function insert(funcionarioVO $objVO){
$sql = sprintf("INSERT INTO funcionario (nomeFuncionario, email, idUnidade, telefone1, telefone2, skype, ativo, idDepartamento)
VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')",
$objVO->getNomeFuncionario(),
$objVO->getEmail(),
$objVO->getIdUnidade(),
$objVO->getTelefone1(),
$objVO->getTelefone2(),
$objVO->getSkype(),
$objVO->getAtivo(),
$objVO->getIdDepartamento()
);
mysql_query($sql);
$objVO->setIdFuncionario(mysql_insert_id());
return $objVO;
}
public function update(funcionarioVO $objVO){
$sql = sprintf("UPDATE funcionario SET nomeFuncionario = '%s', email = '%s', idUnidade = '%s', telefone1 = '%s', telefone2 = '%s',
skype = '%s', ativo = '%s', idDepartamento = '%s' WHERE idFuncionario = '%s'",
$objVO->getNomeFuncionario(),
$objVO->getEmail(),
$objVO->getIdUnidade(),
$objVO->getTelefone1(),
$objVO->getTelefone2(),
$objVO->getSkype(),
$objVO->getAtivo(),
$objVO->getIdDepartamento(),
$objVO->getIdFuncionario()
);
mysql_query($sql);
}
public function getAll(){
$objVO = new funcionarioVO();
$retorno = array();
$sql = "SELECT * FROM funcionario";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdFuncionario($conteudo["idFuncionario"]);
$objVO->setNomeFuncionario($conteudo["nomeFuncionario"]);
$objVO->setEmail($conteudo["email"]);
$objVO->setIdUnidade($conteudo["idUnidade"]);
$objVO->setTelefone1($conteudo["telefone1"]);
$objVO->setTelefone2($conteudo["telefone2"]);
$objVO->setSkype($conteudo["skype"]);
$objVO->setAtivo($conteudo["ativo"]);
$objVO->setIdDepartamento($conteudo["idDepartamento"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
public function getById($idFun){
$objVO = new funcionarioVO();
$sql = sprintf('SELECT * FROM funcionario WHERE idFuncionario = "%s"', $idFun);
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdFuncionario($conteudo["idFuncionario"]);
$objVO->setNomeFuncionario($conteudo["nomeFuncionario"]);
$objVO->setEmail($conteudo["email"]);
$objVO->setIdUnidade($conteudo["idUnidade"]);
$objVO->setTelefone1($conteudo["telefone1"]);
$objVO->setTelefone2($conteudo["telefone2"]);
$objVO->setSkype($conteudo["skype"]);
$objVO->setAtivo($conteudo["ativo"]);
$objVO->setIdDepartamento($conteudo["idDepartamento"]);
$retorno = clone $objVO;
}
return $retorno;
}
}
?>
<file_sep>/pdfGeralAtendimento.php
<?php
header('Content-Type: text/html; charset=UTF-8');
//ini_set('default_charset', 'UTF-8');
//jpGraph
include_once './jpgraph/src/jpgraph.php';
include_once './jpgraph/src/jpgraph_bar.php';
//fpdf
include_once './fpdf13/fpdf.php';
define('FPDF_FONTPATH', './fpdf13/font/');
//conexao
include_once './Conexao/conexao.php';
//funcoes
include_once './funcoes.php';
//variáveis
$dataInicial = $_REQUEST['dataInicial'];
$dataFinal = $_REQUEST['dataFinal'];
$dataI = implode("/", array_reverse(explode("-", $dataInicial)));
$dataF = implode("/", array_reverse(explode("-", $dataFinal)));
$sql = "
SELECT a.data, a.idTipoAtendimento, ta.tipo, SUM(t.tempoGastoMin) AS somaMinutos FROM atendimento AS a
INNER JOIN tipo_atendimento AS ta ON ta.idTipoAtendimento = a.idTipoAtendimento
INNER JOIN atendimento_tempo AS at ON at.idAtendimento = a.idAtendimento
INNER JOIN tempo AS t ON t.idTempo = at.idTempo
WHERE a.data >= '$dataInicial' AND a.data <= '$dataFinal' GROUP BY ta.idTipoAtendimento
";
$rs = mysql_query($sql);
$datax = array(); //dados do eixo x do grafico
$datay = array(); //dados do eixo y do grafico
$i = 0;
while ($conteudo = mysql_fetch_array($rs)) {
//retorna os dados armazenado no arrays
$datax[$i] = $conteudo['tipo']; //dados do eixo x
$datay[$i] = $conteudo['somaMinutos']; //dados do eixo y
$i++; //incremento
}
//conf. o utf8 nos nomes dos titulos das barras
$dados_utf8 = array();
$dados_utf8[0] = utf8_decode($datax[0]);
$dados_utf8[1] = utf8_decode($datax[1]);
$dados_utf8[2] = utf8_decode("Prestacao de Servico");
//calculo das horas
$qtdetotalMinutos = $datay[0] + $datay[1] + $datay[2];
//calculo da porcentagem
$suportePerc = ($datay[0] * 100) / $qtdetotalMinutos;
$desenvolvimentoPerc = ($datay[1] * 100) / $qtdetotalMinutos;
$prestServ = ($datay[2] * 100) / $qtdetotalMinutos;
//excluir grafico da pasta se existir
$caminhoImgExc = 'img/relatorios/geralAtend.png';
if (file_exists($caminhoImgExc)) {
unlink($caminhoImgExc);
}
//*******************************************GERANDO GRAFICO
//conf tamanho do grafico
$graph = new Graph(600, 500); //tamanho
$graph->img->SetMargin(150, 50, 40, 200); //margem esq/dir, sup/inf
$graph->SetScale("textlin");
$graph->SetMarginColor("Lightblue");
$graph->SetShadow();
//conf. eixo y
$graph->yaxis->title->Set("Minutos"); //titulo
$graph->yaxis->SetTitleMargin(60); //ajusta a margin do eixo y
$graph->yaxis->title->SetFont(FF_ARIAL, FS_NORMAL, 16); //tipo da fonte e tamanho
$graph->xaxis->SetFont(FF_ARIAL, FS_NORMAL, 10);
$graph->yaxis->SetFont(FF_ARIAL, FS_NORMAL, 10); //tipo da fonte e tamanho
//dados eixo x
$graph->xaxis->SetTickLabels($dados_utf8); //dados do eixo x
$graph->xaxis->SetLabelAngle(30); //angulo
//dados eixo y
$bplot = new BarPlot($datay); //dados do eixo y
$bplot->SetWidth(0.5); //espessura das barras do gráfico
$bplot->SetColor("white"); //cor
$graph->Add($bplot); //adiciona
$graph->Stroke('./img/relatorios/geralAtend.png'); //cria o grafico
//***********************************************GERANDO PDF
//Novo documento PDF com orientação P - Retrato (Picture) que pode ser também L - Paisagem (Landscape)
$pdf = new FPDF('P', 'cm', 'A4');
$pdf->Open();
$pdf->AddPage();
$pdf->Image('img/apagri.jpg', 2.5, 1, 3.3, 1.6);
$pdf->Image('img/inceres.jpg', 16, 1, 3.3, 1.7);
$pdf->SetFont('arial', 'B', 18);
$tituloRel = utf8_decode("Relatório dos Atendimentos");
$pdf->Cell(20, 1.5, $tituloRel, "B", 1, 'C');
//Periodo
$pdf->SetFont('arial', '', 14); // Definindo Fonte
$pdf->SetMargins(0, 0);
$pdf->SetXY(5.5, 3.5);
$stringPeriodo = utf8_decode("Período: " . $dataI . " - " . $dataF);
$pdf->Cell(10, 1, $stringPeriodo, 1, 0, 'C');
//Tabela de dados
$pdf->SetFont('arial', '', 12); // Definindo Fonte
$pdf->SetMargins(0, 0);
$pdf->SetXY(4, 5.5); //seta o eixo x e y, variavel controle vai incrementando
$prestServicos = utf8_decode("Prestação de Serviço");
$pdf->Cell(5, 1, "Suporte", 1, 0, 'C'); //largura, altura, titulo, borda 1-sim 0-não, 0, alinhamento
$pdf->Cell(3, 1, $datay[0] . " min", 1, 0, 'C');
$pdf->Cell(3, 1, formataNumero($suportePerc) . " %", 1, 0, 'C');
$pdf->Cell(3, 1, formataNumero($datay[0] / (8 * 60)) . " dias", 1, 0, 'C');
$pdf->SetXY(4, 6.5); //seta o eixo x e y, variavel controle vai incrementando
$pdf->Cell(5, 1, "Desenvolvimento", 1, 0, 'C');
$pdf->Cell(3, 1, $datay[1] . " min", 1, 0, 'C');
$pdf->Cell(3, 1, formataNumero($desenvolvimentoPerc) . " %", 1, 0, 'C');
$pdf->Cell(3, 1, formataNumero($datay[1] / (8 * 60)) . " dias", 1, 0, 'C');
$pdf->SetXY(4, 7.5); //seta o eixo x e y, variavel controle vai incrementando
$pdf->Cell(5, 1, $prestServicos, 1, 0, 'C');
$pdf->Cell(3, 1, $datay[2] . " min", 1, 0, 'C');
$pdf->Cell(3, 1, formataNumero($prestServ) . " %", 1, 0, 'C');
$pdf->Cell(3, 1, formataNumero($datay[2] / (8 * 60)) . " dias", 1, 0, 'C');
$pdf->Image('img/relatorios/geralAtend.png', 2.5, 10, 15); //margem, tamanho
$pdf->Output("relatorio_atendimentos_" . $dataI . " - " . $dataF . ".pdf", "D");
$pdf->Close();
<file_sep>/computadores.php
<?php
include_once './sessao.php';
include_once './Conexao/conexao.php';
include_once './VO/computadorVO.class.php';
include_once './VO/marcaVO.class.php';
include_once './VO/tipoComputadorVO.class.php';
include_once './VO/sistemaOperacionalVO.class.php';
include_once './VO/processadorVO.class.php';
include_once './VO/memoriaVO.class.php';
include_once './DAO/computadorDAO.class.php';
include_once './DAO/marcaDAO.class.php';
include_once './DAO/tipoComputadorDAO.class.php';
include_once './DAO/sistemaOperacionalDAO.class.php';
include_once './DAO/processadorDAO.class.php';
include_once './DAO/memoriaDAO.class.php';
include_once './DAO/funcionarioDAO.class.php';
$computadorVO = new computadorVO();
$memoriaVO = new memoriaVO();
$marcaVO = new marcaVO();
$tipoComputadorVO = new tipoComputadorVO();
$sistemaOperacionalVO = new sistemaOperacionalVO();
$processadorVO = new processadorVO();
//recupera o parametro do id
$idComp = $_GET["par"];
//data e hora atual
$dataS = date("d/m/Y");
if ($idComp == 0) {
//novo cadastro de computador
} else {
//buscar o computador para atualizar
$computadorDAO = new computadorDAO();
$computadorVO = $computadorDAO->getById($idComp);
//CONVERTE A DATA P/ EXIBIR
$dataUser = implode("/", array_reverse(explode("-", $computadorVO->getData())));
}
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Salvar") {
//converte a data p/ salvar no bd
$data = $_REQUEST["data"];
$dataBd = implode("-", array_reverse(explode("/", $data)));
//seta os campos
$computadorVO->setNomeComputador($_REQUEST["nomeComputador"]);
$computadorVO->setNumeroSerie($_REQUEST["numeroSerie"]);
$computadorVO->setIdMarca($_REQUEST["idMarca"]);
$computadorVO->setIdTipoComputador($_REQUEST["idTipoComputador"]);
$computadorVO->setIdSistemaOperacional($_REQUEST["idSistemaOperacional"]);
$computadorVO->setIdProcessador($_REQUEST["idProcessador"]);
$computadorVO->setIdMemoria($_REQUEST["idMemoria"]);
$computadorVO->setAtivo($_REQUEST["ativo"]);
$computadorVO->setData($dataBd);
$computadorVO->setObservacao($_REQUEST["observacao"]);
//verifica se existe o id
if (isset($_REQUEST["idComputador"]) && $_REQUEST["idComputador"] == "") {
//novo
$computadorDAO = new computadorDAO();
$computadorDAO->insert($computadorVO);
echo "<script>msg(1)</script>";
$computadorVO = new computadorVO();
} else {
//atualiza
$computadorVO->setIdComputador($_REQUEST["idComputador"]);
$computadorDAO = new computadorDAO();
$computadorDAO->update($computadorVO);
echo "<script>msg(2)</script>";
$computadorVO = new computadorVO();
}
}//fim do salvar
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Cancelar") {
header("Location: site.php");
exit;
}
?>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<script language="javascript" type="text/javascript" src="media/js/jquery.js"></script>
<script language="javascript" type="text/javascript" src="media/js/jquery.validate.js"></script>
<title></title>
</head><br><br><br><br>
<form action="" method="post" id="computadores">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!--Titulo-->
<tr>
<td colspan="4" align="center" class="titulo">Cadastro dos Computadores</td>
</tr>
<!--codigo computador-->
<tr>
<td width="250" align="right">Código:</td>
<td colspan="2">
<input type="text" name="idComputador" value="<?php echo $computadorVO->getIdComputador(); ?>" size="5" maxlength="5" class="readonly" readonly>
<!-- Data -->
Data:
<?php
if ($computadorVO->getData() == "") {
$exibeData = $dataS;
} else {
$exibeData = $dataUser;
}
?>
<input type="text" name="data" id="data" value="<?php echo $exibeData; ?>" alt="date" size="9" maxlength="12" class="readonly" readonly>
<!-- Ativo -->
Ativo:
<select name="ativo">
<?php
if ($computadorVO->getAtivo() != "") {
$ativo = $computadorVO->getAtivo();
if ($ativo == "Sim") {
echo "<option value=\"$ativo\" selected>$ativo</option>";
echo "<option value=\"Não\">Não</option>";
} else {
echo "<option value=\"Sim\" selected>Sim</option>";
echo "<option value=\"$ativo\" selected>$ativo</option>";
}
} else {
echo "<option value=\"Sim\" selected>Sim</option>";
echo "<option value=\"Não\">Não</option>";
}
?>
</select>
</td>
</tr>
<!--nome computador-->
<tr>
<td width="250" align="right">Nome Computador:</td>
<td colspan="2"><input type="text" required name="nomeComputador" id= "nomeComputador" value="<?php echo $computadorVO->getNomeComputador(); ?>" size="40" maxlength="30"></td>
</tr>
<!--numero de serie-->
<tr>
<td width="250" align="right">Número de Série:</td>
<td><input type="text" name="numeroSerie" size="40" value="<?php echo $computadorVO->getNumeroSerie(); ?>" maxlength="30"></td>
</tr>
<!--Marca -->
<tr>
<td width="250" align="right">Marca:</td>
<td colspan="4">
<select name="idMarca">
<?php
if ($computadorVO->getIdMarca() != "") {
$idMarca = $computadorVO->getIdMarca();
} else {
$idMarca = $marcaVO->getIdMarca();
}
$sql = "SELECT * FROM marca_computador";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
if ($idMarca == $conteudo["idMarca"]) {
echo "<option value=\"$conteudo[idMarca]\" selected>$conteudo[marca]</option>";
} else {
echo "<option value=\"$conteudo[idMarca]\" >$conteudo[marca]</option>";
}
}
?>
</select>
<!--tipoComputador-->
Tipo:
<select name="idTipoComputador">
<?php
if ($computadorVO->getIdTipoComputador() != "") {
$idTipoComp = $computadorVO->getIdTipoComputador();
} else {
$idTipoComp = $tipoComputadorVO->getIdTipoComputador();
}
$sql = "SELECT * FROM tipo_computador";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
if ($idTipoComp == $conteudo["idTipoComputador"]) {
echo " <option value=\"$conteudo[idTipoComputador]\" selected>$conteudo[nomeTipoComputador]</option>";
} else {
echo " <option value=\"$conteudo[idTipoComputador]\" >$conteudo[nomeTipoComputador]</option>";
}
}
?>
</select>
</td>
</tr>
<!--sistema operacional -->
<tr>
<td width="250" align="right">Sistema Operacional:</td>
<td colspan="4">
<select name="idSistemaOperacional">
<?php
if ($computadorVO->getIdSistemaOperacional() != "") {
$idSistemaOperacional = $computadorVO->getIdSistemaOperacional();
} else {
$idSistemaOperacional = $sistemaOperacionalVO->getIdSistemaOperacional();
}
$sql = "SELECT * FROM sistema_operacional";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
if ($idSistemaOperacional == $conteudo["idSistemaOperacional"]) {
echo "<option value=\"$conteudo[idSistemaOperacional]\" selected>$conteudo[sistemaOperacional]</option>";
} else {
echo "<option value=\"$conteudo[idSistemaOperacional]\">$conteudo[sistemaOperacional]</option>";
}
}
?>
</select>
<!--memoria-->
Memória:
<select name="idMemoria">
<?php
if ($computadorVO->getIdMemoria() != "") {
$idMemoria = $computadorVO->getIdMemoria();
} else {
$idMemoria = $memoriaVO->getIdMemoria();
}
$sql = "SELECT * FROM memoria ";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
if ($idMemoria == $conteudo["idMemoria"]) {
echo "<option value=\"$conteudo[idMemoria]\" selected>$conteudo[memoria]</option>";
} else {
echo "<option value=\"$conteudo[idMemoria]\">$conteudo[memoria]</option>";
}
}
?>
</select>
</td>
</tr>
<!--processador-->
<tr>
<td width="250" align="right">Processador:</td>
<td colspan="4">
<select name="idProcessador">
<?php
if ($computadorVO->getIdProcessador()) {
$idProcessador = $computadorVO->getIdProcessador();
} else {
$idProcessador = $processadorVO->getIdProcessador();
}
$sql = "SELECT * FROM processador";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
if ($idProcessador == $conteudo["idProcessador"]) {
echo "<option value=\"$conteudo[idProcessador]\" selected>$conteudo[processador]</option>";
} else {
echo "<option value=\"$conteudo[idProcessador]\">$conteudo[processador]</option>";
}
}
?>
</select>
</tr>
<!-- observacao -->
<tr>
<td align="right">Observação:</td>
<td colspan="4">
<textarea name="observacao" id="observacao" rows="4" cols="50" maxlength="250"><?php echo $computadorVO->getObservacao(); ?></textarea>
</td>
</tr>
<!--Botao salvar e cancelar-->
<tr>
<td colspan="4" align="center">
<input type="submit" name="submit" value="Salvar">
</td>
</tr>
</table>
</form>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
<div style="width: 1150px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Nome Computador</th>
<th>Marca</th>
<th>Tipo</th>
<th>Sistema Op.</th>
<th>Processador</th>
<th>Memoria</th>
<th>Data</th>
<th>Ativo</th>
<th>Editar</th>
</tr>
</thead>
<?php
$computadorDAO = new computadorDAO();
$listaComp = $computadorDAO->getAll();
$marcaDAO = new marcaDAO();
$listaMarca = $marcaDAO->getAll();
$tipoComputadorDAO = new tipoComputadorDAO();
$listaTipos = $tipoComputadorDAO->getAll();
$sistemaOperacionalDAO = new sistemaOperacionalDAO();
$listaSO = $sistemaOperacionalDAO->getAll();
$processadorDAO = new processadorDAO();
$listaProcessador = $processadorDAO->getAll();
$memoriaDAO = new memoriaDAO();
$listaMemoria = $memoriaDAO->getAll();
for ($i = 0; $i < sizeof($listaComp); $i++) {
$dadosComp = $listaComp[$i];
//CONVERTE A DATA P/ EXIBIR
$dataExibe = implode("/", array_reverse(explode("-", $dadosComp->getData())));
for ($x = 0; $x < sizeof($listaMarca); $x++) {
$dadosMarca = $listaMarca[$x];
if ($dadosMarca->getIdMarca() == $dadosComp->getIdMarca()) {
$nomeMarca = $dadosMarca->getMarca();
}
}
for ($z = 0; $z < sizeof($listaTipos); $z++) {
$dadosTipo = $listaTipos[$z];
if ($dadosTipo->getIdTipoComputador() == $dadosComp->getIdTipoComputador()) {
$nomeTipo = $dadosTipo->getNomeTipoComputador();
}
}
for ($q = 0; $q < sizeof($listaSO); $q++) {
$dadosSO = $listaSO[$q];
if ($dadosSO->getIdSistemaOperacional() == $dadosComp->getIdSistemaOperacional()) {
$nomeSO = $dadosSO->getSistemaOperacional();
}
}
for ($w = 0; $w < sizeof($listaProcessador); $w++) {
$dadosProc = $listaProcessador[$w];
if ($dadosProc->getIdProcessador() == $dadosComp->getIdProcessador()) {
$nomeProc = $dadosProc->getProcessador();
}
}
for ($r = 0; $r < sizeof($listaMemoria); $r++) {
$dadosMem = $listaMemoria[$r];
if ($dadosMem->getIdMemoria() == $dadosComp->getIdMemoria()) {
$nomeMem = $dadosMem->getMemoria();
}
}
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosComp->getIdComputador() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosComp->getNomeComputador() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeMarca . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeTipo . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeSO . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeProc . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeMem . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dataExibe . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosComp->getAtivo() . "</td>
<td align=\"center\"><a href=\"?red=cadastroComputadores&par=" . $dadosComp->getIdComputador() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}
?>
</table></div></div>
</body>
</html>
<file_sep>/DAO/tempoDAO.class.php
<?php
class tempoDAO{
//inserir
public function insert(tempoVO $objVO){
$sql = sprintf("INSERT INTO tempo (horarioInicial, horarioFinal, tempoGastoMin) VALUES ('%s','%s','%s')",
$objVO->getHorarioInicial(),
$objVO->getHorarioFinal(),
$objVO->getTempoGastoMin()
);
mysql_query($sql);//echo $sql."<br>";
$objVO->setIdTempo(mysql_insert_id());
return $objVO;
}
}
?>
<file_sep>/VO/categoriaVO.class.php
<?php
class categoriaVO{
//atributos
private $idCategoria = null;
private $categoria = null;
//construtor
public function categoriaVO(){
}
//get set
//idCategoria
public function getIdCategoria(){
return $this->idCategoria;
}
public function setIdCategoria($idCategoria){
$this->idCategoria = $idCategoria;
}
//categoria
public function getCategoria(){
return $this->categoria;
}
public function setCategoria($categoria){
$this->categoria = $categoria;
}
}
?>
<file_sep>/VO/statusVO.class.php
<?php
class statusVO{
//atributos
private $idStatus = null;
private $status = null;
//construtor
public function statusVO(){
}
//get e set
//idStatus
public function getIdStatus(){
return $this->idStatus;
}
public function setIdStatus($idStatus){
$this->idStatus = $idStatus;
}
//status
public function getStatus(){
return $this->status;
}
public function setStatus($status){
$this->status = $status;
}
}
?>
<file_sep>/VO/memoriaVO.class.php
<?php
class memoriaVO{
//atributos
private $idMemoria = null;
private $memoria = null;
//construtor
public function memoriaVO(){
}
//get set
//idMemoria
public function getIdMemoria(){
return $this->idMemoria;
}
public function setIdMemoria($idMemoria){
$this->idMemoria = $idMemoria;
}
//memoria
public function getMemoria(){
return $this->memoria;
}
public function setMemoria($memoria){
$this->memoria = $memoria;
}
}
?>
<file_sep>/DAO/computadorDAO.class.php
<?php
class computadorDAO {
//inserir
public function insert(computadorVO $objVO) {
$sql = sprintf("INSERT INTO computador (nomeComputador, numeroSerie, idMarca, idTipoComputador, idSistemaOperacional, idProcessador, idMemoria, ativo, data, observacao) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')",
$objVO->getNomeComputador(),
$objVO->getNumeroSerie(),
$objVO->getIdMarca(),
$objVO->getIdTipoComputador(),
$objVO->getIdSistemaOperacional(),
$objVO->getIdProcessador(),
$objVO->getIdMemoria(),
$objVO->getAtivo(),
$objVO->getData(),
$objVO->getObservacao()
);
mysql_query($sql);
$objVO->setIdComputador(mysql_insert_id());
return $objVO;
}
//update
public function update(computadorVO $objVO) {
$sql = sprintf("UPDATE computador SET nomeComputador = '%s', numeroSerie = '%s', idMarca = '%s', idTipoComputador = '%s', idSistemaOperacional = '%s', idProcessador = '%s', idMemoria = '%s', ativo = '%s', data = '%s', observacao = '%s' WHERE idComputador = '%s'",
$objVO->getNomeComputador(),
$objVO->getNumeroSerie(),
$objVO->getIdMarca(),
$objVO->getIdTipoComputador(),
$objVO->getIdSistemaOperacional(),
$objVO->getIdProcessador(),
$objVO->getIdMemoria(),
$objVO->getAtivo(),
$objVO->getData(),
$objVO->getObservacao(),
$objVO->getIdComputador()
);
mysql_query($sql);
}
//retorna os dados para popular o dataTable
public function getAll() {
$objVO = new computadorVO();
$retorno = array();
$sql = "SELECT * FROM computador";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdComputador($conteudo["idComputador"]);
$objVO->setNomeComputador($conteudo["nomeComputador"]);
$objVO->setNumeroSerie($conteudo["numeroSerie"]);
$objVO->setIdMarca($conteudo["idMarca"]);
$objVO->setIdTipoComputador($conteudo["idTipoComputador"]);
$objVO->setIdSistemaOperacional($conteudo["idSistemaOperacional"]);
$objVO->setIdProcessador($conteudo["idProcessador"]);
$objVO->setIdMemoria($conteudo["idMemoria"]);
$objVO->setAtivo($conteudo["ativo"]);
$objVO->setData($conteudo["data"]);
$objVO->setObservacao($conteudo["observacao"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
public function getById($idComp){
$objVO = new computadorVO();
$sql = sprintf('SELECT * FROM computador WHERE idComputador = "%s"', $idComp);
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdComputador($conteudo["idComputador"]);
$objVO->setNomeComputador($conteudo["nomeComputador"]);
$objVO->setNumeroSerie($conteudo["numeroSerie"]);
$objVO->setIdMarca($conteudo["idMarca"]);
$objVO->setIdTipoComputador($conteudo["idTipoComputador"]);
$objVO->setIdSistemaOperacional($conteudo["idSistemaOperacional"]);
$objVO->setIdProcessador($conteudo["idProcessador"]);
$objVO->setIdMemoria($conteudo["idMemoria"]);
$objVO->setAtivo($conteudo["ativo"]);
$objVO->setData($conteudo["data"]);
$objVO->setObservacao($conteudo["observacao"]);
$return = clone $objVO;
}
return $return;
}
}//fim da classe
?>
<file_sep>/VO/atendimentoCompletoVO.class.php
<?php
class atendimentoCompletoVO{
//atributos
private $idAtendimentoCompleto = null;
private $idAtendimentoTempo = null;
private $idCliente = null;
private $nomeFuncCliente = null;
private $idCategoria = null;
private $idSubCategoria = null;
private $descricao = null;
private $solucao = null;
private $observacao = null;
private $idStatus = null;
//get e set
//idAtendimentoCompleto
public function getIdAtendimentoCompleto(){
return $this->idAtendimentoCompleto;
}
public function setIdAtendimentoCompleto($idAtendimentoCompleto){
$this->idAtendimentoCompleto = $idAtendimentoCompleto;
}
//idAtendimentoTempo
public function getIdAtendimentoTempo(){
return $this->idAtendimentoTempo;
}
public function setIdAtendimentoTempo($idAtendimentoTempo){
$this->idAtendimentoTempo = $idAtendimentoTempo;
}
//idCliente
public function getIdCliente(){
return $this->idCliente;
}
public function setIdCliente($idCliente){
$this->idCliente = $idCliente;
}
//nomeFuncCliente
public function getNomeFuncCliente(){
return $this->nomeFuncCliente;
}
public function setNomeFuncCliente($nomeFuncCliente){
$this->nomeFuncCliente = $nomeFuncCliente;
}
//idCategoria
public function getIdCategoria(){
return $this->idCategoria;
}
public function setIdCategoria($idCategoria){
$this->idCategoria = $idCategoria;
}
//idSubCategoria
public function getIdSubCategoria(){
return $this->idSubCategoria;
}
public function setIdSubCategoria($idSubCategoria){
$this->idSubCategoria = $idSubCategoria;
}
//descricao
public function getDescricao(){
return $this->descricao;
}
public function setDescricao($descricao){
$this->descricao = $descricao;
}
//solucao
public function getSolucao(){
return $this->solucao;
}
public function setSolucao($solucao){
$this->solucao = $solucao;
}
//observacao
public function getObservacao(){
return $this->observacao;
}
public function setObservacao($observacao){
$this->observacao = $observacao;
}
//idStatus
public function getIdStatus(){
return $this->idStatus;
}
public function setIdStatus($idStatus){
$this->idStatus = $idStatus;
}
}
?>
<file_sep>/VO/tempoVO.class.php
<?php
class tempoVO{
//atributos
private $idTempo = null;
private $horarioInicial = null;
private $horarioFinal = null;
private $tempoGastoMin = null;
//construtor
public function tempoVO(){
}
//get set
//idTempo
public function getIdTempo(){
return $this->idTempo;
}
public function setIdTempo($idTempo){
$this->idTempo = $idTempo;
}
//horarioInicial
public function getHorarioInicial(){
return $this->horarioInicial;
}
public function setHorarioInicial($horarioInicial){
$this->horarioInicial = $horarioInicial;
}
//horarioFinal
public function getHorarioFinal(){
return $this->horarioFinal;
}
public function setHorarioFinal($horarioFinal){
$this->horarioFinal = $horarioFinal;
}
//tempoGastoMin
public function getTempoGastoMin(){
return $this->tempoGastoMin;
}
public function setTempoGastoMin($tempoGastoMin){
$this->tempoGastoMin = $tempoGastoMin;
}
}
?>
<file_sep>/DAO/categoriaDocDAO.class.php
<?php
class categoriaDocDAO {
public function getAll(){
$objVO = new categoriaDocVO();
$retorno = array();
$sql = "SELECT * FROM categoria_doc";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdCategoriaDoc($conteudo["idCategoriaDoc"]);
$objVO->setNomeCategoriaDoc($conteudo["nomeCategoriaDoc"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
public function getById($id){
$objVO = new categoriaDocVO();
$sql = sprintf('SELECT * FROM categoria_doc WHERE idCategoriaDoc = "%s"',$id);
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdCategoriaDoc($conteudo["idCategoriaDoc"]);
$objVO->setNomeCategoriaDoc($conteudo["nomeCategoriaDoc"]);
$retorno = clone $objVO;
}
return $retorno;
}
public function insert(categoriaDocVO $objVO) {
$sql = sprintf("INSERT INTO categoria_doc (nomeCategoriaDoc) VALUES ('%s')",
$objVO->getNomeCategoriaDoc()
);
mysql_query($sql);
$objVO->setIdCategoriaDoc(mysql_insert_id());
return $objVO;
}
public function update(categoriaDocVO $objVO) {
$sql = sprintf("UPDATE categoria_doc SET nomeCategoriaDoc = '%s' WHERE idCategoriaDoc = '%s'",
$objVO->getNomeCategoriaDoc(),
$objVO->getIdCategoriaDoc()
);
mysql_query($sql);
}
}
<file_sep>/VO/documentoVO.class.php
<?php
class documentoVO {
private $idDocumento = null;
private $data = null;
private $idFuncionario = null;
private $tipoDocumento = null;
private $idCategoriaDoc = null;
private $departamento = null;
private $ac = null;
private $descricaoDoc = null;
private $caminhoAnexoArquivo = null;
//idDocumento
public function getIdDocumento(){
return $this->idDocumento;
}
public function setIdDocumento($idDocumento){
$this->idDocumento = $idDocumento;
}
//data
public function getData(){
return $this->data;
}
public function setData($data){
$this->data = $data;
}
//idFuncionario
public function getIdFuncionario(){
return $this->idFuncionario;
}
public function setIdFuncionario($idFuncionario){
$this->idFuncionario = $idFuncionario;
}
//tipoDocumento
public function getTipoDocumento(){
return $this->tipoDocumento;
}
public function setTipoDocumento($tipoDocumento){
$this->tipoDocumento = $tipoDocumento;
}
//idCategoriaDoc
public function getIdCategoriaDoc(){
return $this->idCategoriaDoc;
}
public function setIdCategoriaDoc($idCategoriaDoc){
$this->idCategoriaDoc = $idCategoriaDoc;
}
//departamento
public function getDepartamento(){
return $this->departamento;
}
public function setDepartamento($departamento){
$this->departamento = $departamento;
}
//ac
public function getAc(){
return $this->ac;
}
public function setAc($ac){
$this->ac = $ac;
}
//descricaoDoc
public function getDescricaoDoc(){
return $this->descricaoDoc;
}
public function setDescricaoDoc($descricaoDoc){
$this->descricaoDoc = $descricaoDoc;
}
//caminhoAnexoArquivo
public function getCaminhoAnexoArquivo(){
return $this->caminhoAnexoArquivo;
}
public function setCaminhoAnexoArquivo($caminhoAnexoArquivo){
$this->caminhoAnexoArquivo = $caminhoAnexoArquivo;
}
}
?><file_sep>/DAO/processadorDAO.class.php
<?php
class processadorDAO{
public function getAll(){
$objVO = new processadorVO();
$retorno = array();
$sql = "SELECT * FROM processador";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdProcessador($conteudo["idProcessador"]);
$objVO->setProcessador($conteudo["processador"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}//fim da classe
?>
<file_sep>/subCategorias.php
<?php
include "inc/JSON.php";
$json = new Services_JSON();
$data = array();
if(isset($_REQUEST["idCategoria"])){
//categoria inceres
if($_REQUEST["idCategoria"] == 1){
$data[0]['id'] = 1;
$data[0]['nome'] = 'Grade Amostral';
$data[1]['id'] = 2;
$data[1]['nome'] = 'Processamento';
$data[2]['id'] = 3;
$data[2]['nome'] = 'Importação Resultados de Solo';
$data[3]['id'] = 4;
$data[3]['nome'] = 'Recomendação';
$data[4]['id'] = 5;
$data[4]['nome'] = 'Apresentação Demo';
$data[5]['id'] = 6;
$data[5]['nome'] = 'Relatório';
$data[6]['id'] = 7;
$data[6]['nome'] = 'Arquivos do Controlador';
$data[7]['id'] = 8;
$data[7]['nome'] = 'Importação de Arquivo';
$data[8]['id'] = 9;
$data[8]['nome'] = 'Administração do Sistema';
//categoria desenvolvimento
}else if($_REQUEST["idCategoria"] == 2){
$data[0]['id'] = 10;
$data[0]['nome'] = 'Manuais';
$data[1]['id'] = 11;
$data[1]['nome'] = 'Vídeos';
$data[2]['id'] = 12;
$data[2]['nome'] = 'Sistema de Atendimento';
$data[3]['id'] = 13;
$data[3]['nome'] = 'Outros';
//categoria prestação de serviço
}else if($_REQUEST["idCategoria"] == 3){
$data[0]['id'] = 14;
$data[0]['nome'] = 'Instalação de Software';
$data[1]['id'] = 15;
$data[1]['nome'] = 'Manutenção de Computador';
$data[2]['id'] = 16;
$data[2]['nome'] = 'Orientação em Aplicativo';
$data[3]['id'] = 17;
$data[3]['nome'] = 'Conversão de Arquivo';
$data[4]['id'] = 18;
$data[4]['nome'] = 'Outros';
}
}
echo $json->encode($data);<file_sep>/clientes.php
<?php
error_reporting(0);
session_start();
if (empty($_SESSION["usuario"])) {
session_destroy();
header("Location: index.php");
}
//Conexao
include_once './Conexao/conexao.php';
//VO
include_once './VO/tipoClienteVO.class.php';
include_once './VO/clienteVO.class.php';
include_once './VO/estadoVO.class.php';
//DAO
include_once './DAO/clienteDAO.class.php';
include_once './DAO/tipoClienteDAO.class.php';
//OBJ VO
$tipoClienteVO = new tipoClienteVO();
$clienteVO = new clienteVO();
$data = date("d/m/Y"); //data e hora atual
$dataBd = implode("-", array_reverse(explode("/", $data))); //converte a data p/ salvar no bd
$idCli = $_GET["par"]; //recupera o parametro do id
if ($idCli == 0) {
//novo cadastro de computador
} else {
//buscar o para atualizar
$clienteDAO = new clienteDAO();
$clienteVO = $clienteDAO->getById($idCli);
}
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Salvar") {
//seta os dados
$clienteVO->setNomeCliente($_REQUEST["nomeCliente"]);
$clienteVO->setEmail($_REQUEST["email"]);
$clienteVO->setContato($_REQUEST["contato"]);
$clienteVO->setTelefone($_REQUEST["telefone"]);
$clienteVO->setCelular($_REQUEST["celular"]);
$clienteVO->setEndereco($_REQUEST["endereco"]);
$clienteVO->setCep($_REQUEST["cep"]);
$clienteVO->setCidade($_REQUEST["cidade"]);
$clienteVO->setIdEstado($_REQUEST["idEstado"]);
$clienteVO->setIdTipoCliente($_REQUEST["idTipoCliente"]);
$clienteVO->setAtivo($_REQUEST["ativo"]);
$clienteVO->setApagri($_REQUEST["apagri"]);
$clienteVO->setData($dataBd); //data no formato bd
$clienteDAO = new clienteDAO();
//verifica o codigo
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
if (isset($_REQUEST["idCliente"]) && $_REQUEST["idCliente"] == "") {
//novo cadastro
$clienteDAO->insert($clienteVO);
$clienteVO = new clienteVO();
echo "<script>msg(1)</script>";
} elseif (isset($_REQUEST["idCliente"]) && $_REQUEST["idCliente"] != "") {
//atualizacao
$clienteVO->setIdCliente($_REQUEST["idCliente"]);
$clienteDAO->update($clienteVO);
$clienteVO = new clienteVO();
echo "<script>msg(2)</script>";
}
}//fim do salvar
?>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<title></title>
<!--conf mensagens de erro-->
<style type="text/css">
div.error {
font-family: verdana;
font-size: 14px;
font-weight: bold;
color: red;
padding: 2px;
}
</style>
<script type="text/javascript" language="javascript" src="media/js/jquery.min.js"></script>
<script type="text/javascript" language="javascript" src="media/js/jquery.validate.js"></script>
<script type="text/javascript" src="media/js/jquery.meio.mask.js" charset="utf-8"></script>
<script type="text/javascript">
jQuery(function ($) {
$('input[type="text"]').setMask();
});
</script>
</head>
<body><br><br><br><br>
<form action="" method="post" id="meu_form">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!-- Titulo -->
<tr>
<td colspan="2" class="titulo" align="center">Cadastro de Clientes</td>
</tr>
<!-- Codigo-->
<tr>
<td align="right">Código:</td>
<td><input type="text" name="idCliente" id="idCliente" value="<?php echo $clienteVO->getIdCliente(); ?>" class="readonly" readonly>
Data: <input type="text" name="data" value="<?php echo $data; ?>" id="data" class="readonly" readonly size="12">
</td>
</tr>
<!-- Nome -->
<tr>
<td align="right">Nome:</td>
<td><input type="text" required name="nomeCliente" id="nomeCliente" required value="<?php echo $clienteVO->getNomeCliente(); ?>" maxlength="50" size="50"></td>
</tr>
<!-- Email -->
<tr>
<td align="right">Email:</td>
<td><input type="email" required name="email" id="email" required value="<?php echo $clienteVO->getEmail(); ?>" maxlength="50" size="50"></td>
</tr>
<!-- Contato -->
<tr>
<td align="right">Contato:</td>
<td>
<input type="text" name="contato" id="contato" value="<?php echo $clienteVO->getContato(); ?>" maxlength="30" size="30">
Cliente APagri <select name="apagri">
<?php
if ($clienteVO->getApagri() == 1) {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"2\">Não</option>";
} else if ($clienteVO->getAtivo() == 2) {
echo "<option value=\"1\">Sim</option>";
echo "<option value=\"2\" selected>Não</option>";
} else {
echo "<option value=\"1\">Sim</option>";
echo "<option value=\"2\" selected>Não</option>";
}
?>
</select>
</td>
</tr>
<!--telefone e celular-->
<tr>
<td colspan="2">
Telefone: <input type="text" alt="phone" name="telefone" id="telefone" placeholder="(__) ____-____" value="<?php echo $clienteVO->getTelefone(); ?>" size="15">
Celular: <input type="text" alt="phone" name="celular" id="celular" placeholder="(__) ____-____" value="<?php echo $clienteVO->getCelular(); ?>" size="15">
</td>
</tr>
<!--endereco-->
<tr>
<td align="right">Endereço:</td>
<td><input type="text" name="endereco" id="endereco" placeholder="Ex.: R: Alto da Floresta, 234, Centro" value="<?php echo $clienteVO->getEndereco(); ?>" size="60" maxlength="50"></td>
</tr>
<!-- cep, cidade, estado -->
<tr>
<td colspan="2">
CEP: <input type="text" alt="cep" name="cep" id="cep" placeholder="13424-155" value="<?php echo $clienteVO->getCep(); ?>" size="11">
Cidade: <input type="text" name="cidade" id="cidade" placeholder="Piracicaba" value="<?php echo $clienteVO->getCidade(); ?>">
Estado:
<select name="idEstado">
<?php
$sql = "SELECT * FROM estado";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
//recupera o id
$idEstado = $clienteVO->getIdEstado();
if ($idEstado == $conteudo["idEstado"]) {
echo "<option value=\"$conteudo[idEstado]\" selected>$conteudo[estado]</option>";
} else {
echo "<option value=\"$conteudo[idEstado]\">$conteudo[estado]</option>";
}
}
?>
</select>
</td>
</tr>
<!-- TipoCliente -->
<tr>
<td align="right">Tipo Cliente:</td>
<td>
<select name="idTipoCliente">
<?php
$sql = "SELECT * FROM tipo_cliente";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idTipoCliente = $clienteVO->getIdTipoCliente();
if ($idTipoCliente == $conteudo["idTipoCliente"]) {
echo "<option value=\"$conteudo[idTipoCliente]\" selected>$conteudo[nomeTipoCliente]</option>";
} else {
echo "<option value=\"$conteudo[idTipoCliente]\">$conteudo[nomeTipoCliente]</option>";
}
}
?>
</select>
<!--Ativo-->
Ativo:
<select name="ativo">
<?php
if ($clienteVO->getAtivo() == 1) {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"2\">Não</option>";
} else if ($clienteVO->getAtivo() == 2) {
echo "<option value=\"1\">Sim</option>";
echo "<option value=\"2\" selected>Não</option>";
} else {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"2\">Não</option>";
}
?>
</select>
<input type="submit" name="submit" value="Salvar">
<?php echo "<a href=\"downloadListaEmailClientes.php\">Download da Lista de Emails </a>"; ?>
</td>
</tr>
</table>
</form>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
<div style="width: 1100px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Nome</th>
<th>Telefone</th>
<th>Celular</th>
<th>Email</th>
<th>Contato</th>
<th>Tipo</th>
<th>Ativo</th>
<th>Editar</th>
</tr>
</thead>
<?php
$clienteDAO = new clienteDAO();
$listaClientes = $clienteDAO->getAll(1);
$tipoClienteDAO = new tipoClienteDAO();
$listaTipos = $tipoClienteDAO->getAll();
for ($i = 0; $i < sizeof($listaClientes); $i++) {
$dadosClientes = $listaClientes[$i];
for ($z = 0; $z < sizeof($listaTipos); $z++) {
$dadosTipos = $listaTipos[$z];
if ($dadosTipos->getIdTipoCliente() == $dadosClientes->getIdTipoCliente()) {
$nomeTipoCliente = $dadosTipos->getNomeTipoCliente();
}
}
$ativo = $dadosClientes->getAtivo();
if ($ativo == 1) {
$nomeAtivo = "Sim";
} elseif ($ativo == 2) {
$nomeAtivo = "Não";
}
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosClientes->getIdCliente() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosClientes->getNomeCliente() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosClientes->getTelefone() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosClientes->getCelular() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosClientes->getEmail() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosClientes->getContato() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeTipoCliente . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeAtivo . "</td>
<td><a href=\"?red=cadastroClientes&par=" . $dadosClientes->getIdCliente() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}
?>
</table>
</div>
</div>
</body>
</html>
<file_sep>/downloadListaEmailClientes.php
<?php
include_once './Conexao/conexao.php';
include_once './VO/clienteVO.class.php';
include_once './DAO/clienteDAO.class.php';
$arquivo = "./img/relatorios/lista.txt";
if (file_exists($arquivo)) {
unlink($arquivo);
}
//cria o arquivo e abre
$arquivo = "./img/relatorios/lista.txt";
$abreArquivo = fopen($arquivo, "w");
if($abreArquivo == FALSE){
echo "Erro ao abrir o arquivo!!!";
exit;
}
$clienteVO = new clienteVO();
$clienteDAO = new clienteDAO();
$lista = $clienteDAO->getAll(2);
for ($i = 0; $i < sizeof($lista); $i++) {
$listaEmail = $lista[$i];
$email = $listaEmail->getEmail();
$conteudo = "$email;";
$grava = fwrite($abreArquivo, $conteudo);
}
fclose($abreArquivo);
$aquivoNome = 'lista_emails.txt'; // nome do arquivo que será enviado p/ download
$arquivoLocal = './img/relatorios' . $aquivoNome; // caminho absoluto do arquivo
//download do arquivo
if (file_exists($arquivo)) {
header('Content-Description: File Transfer');
header('Content-Type: application/octet-stream');
header('Content-Disposition: attachment; filename='.basename($arquivo));
header('Expires: 0');
header('Cache-Control: must-revalidate');
header('Pragma: public');
header('Content-Length: ' . filesize($arquivo));
readfile($arquivo);
exit;
}<file_sep>/site.php
<?php
session_start();
if (empty($_SESSION["usuario"])) {
session_destroy();
header("Location: index.php");
}
?>
<html>
<head>
<meta charset="UTF-8">
<title></title>
<link rel="stylesheet" type="text/css" href="css/estilo.css">
<link rel="stylesheet" type="text/css" href="css/menu.css">
</head>
<body>
<div id="superior">
<!--pagina de login -->
<?php
include_once './index.php';
?>
</div>
<div id="menu">
<!--pagina de menu -->
<ul>
<!--menu com sub menu Atendimento-->
<li>
<a>Atendimento</a>
<ul>
<li><a href="?red=iniciar&par=0">Iniciar</a></li>
<li><a href="?red=pesquisarAtendimentos">Pesquisar</a></li>
</ul>
</li><!--fim-->
<!--menu com sub menu Consultar-->
<li>
<a>Consultar</a>
<ul>
<li><a href="?red=funcComp&par=0">Computador - Funcionário</a></li>
<!--<li><a href="?red=dadosAcesso&par=0">Dados Acesso</a></li>-->
</ul>
</li>
<!--menu com sub menu Cadastro-->
<li>
<a>Cadastro</a>
<ul>
<li><a href="?red=cadastroClientes&par=0">Clientes</a></li>
<li><a href="?red=cadastroFuncionarios&par=0">Funcionários</a></li>
<li><a href="?red=cadastroUnidades">Unidades</a></li>
<li><a href="?red=cadastroComputadores&par=0">Computadores</a></li>
</ul>
</li>
<!--relatorios-->
<li>
<a>Relatórios</a>
<ul>
<li><a href="?red=relatFunc">Funcionários</a></li>
<li><a href="?red=atendGeral">Atendimentos Geral</a></li>
<li><a href="?red=atendDetalhado">Atendimento Detalhado</a></li>
</ul>
</li>
<!--documentos-->
<li>
<a>Documentos</a>
<ul>
<li><a href="?red=documentos&par=0">Cadastro</a></li>
<li><a href="?red=categorias&par=0">Categorias</a></li>
</ul>
</li>
</ul><!--fim do menu-->
</div>
<div id="conteudo">
<!--pagina de conteudo -->
<?php
$red = NULL;
if (isset($_REQUEST['red'])) {
$red = $_REQUEST['red']; //sessão
}
$par = NULL;
if (isset($_REQUEST['par'])) {
$par = $_REQUEST['par']; //sessão
}
//primeiro item do menu ATENDIMENTO
if ($red == "iniciar" && $par != "") {
include_once './atendimentoCompleto.php';
}elseif ($red == "pesquisarAtendimentos") {
include_once './pesquisaAtendimento.php';
}
//segundo item do menu CONSULTAR
elseif ($red == "funcComp" && $par != "") {
include_once './func_comp.php';
} elseif ($red == "dadosAcesso" && $par != "") {
include_once './loginCliente.php';
}
//terceiro item do menu CADASTRO
elseif ($red == "cadastroClientes" && $par != "") {
include_once './clientes.php';
} elseif ($red == "cadastroFuncionarios" && $par != "") {
include_once './funcionarios.php';
} elseif ($red == "cadastroUnidades" && $par != "") {
include_once './unidades.php';
} elseif ($red == "cadastroUnidades") {
include_once './unidades.php';
} elseif ($red == "cadastroComputadores" && $par != "") {
include_once './computadores.php';
}
//quarto item relatorios
elseif ($red == "relatFunc") {
include_once './relatorioFuncionarios.php';
}elseif ($red == "atendGeral") {
include_once './relatorioGeralAtendimento.php';
}elseif ($red == "atendDetalhado") {
include_once './relatorioDetalhadoAtendimento.php';
}
//quinto item documentos
elseif ($red == "documentos" && $par != "") {
include_once './documento.php';
} elseif ($red == "categorias" && $par != "") {
include_once './categoriaDoc.php';
}elseif ($red == "listaDoc" && $par != 0) {
include_once './devolucaoDoc.php';
}elseif ($red == "pdfDoc" && $par != 0) {//envia p página de relatorio e depois faz o download
include_once './relatorioDoc.php';
}elseif ($red == "pesqDevDoc") {//envia p página de relatorio e depois faz o download
include_once './pesquisaDevolucaoDoc.php';
}
?>
</div>
<div id="rodape">
<!--pagina de rodape -->
</div>
</body>
</html>
<file_sep>/index.php
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/menu.css">
<title></title>
</head>
<body>
<form action="verifica.php" method="post">
<table border="0" align="center" cellpading="10" cellspacing="10">
<tr>
<td><img src="img/inceres.jpg"></td>
<td> Usuario: <input type="text" name="usuario" id="usuario"></td>
<td>Senha: <input type="password" name="senha" id="senha"></td>
<td> <input type="submit" value="OK"></td>
<?php
echo "<td> <a href=\"logout.php\">Sair</a></td>";
?>
</tr>
</table>
</form>
</body>
</html>
<file_sep>/VO/tipoAtendimentoVO.class.php
<?php
class tipoAtendimentoVO{
//atributos
private $idTipoAtendimento = null;
private $tipo = null;
//construtor
public function tipoAtendimentoVO(){
}
//get e set
//idTipo
public function getIdTipoAtendimento(){
return $this->idTipoAtendimento;
}
public function setIdTipoAtendimento($idTipoAtendimento){
$this->idTipoAtendimento = $idTipoAtendimento;
}
//tipo
public function getTipo(){
return $this->tipo;
}
public function setTipo($tipo){
$this->tipo = $tipo;
}
}
?>
<file_sep>/DAO/tipoComputadorDAO.class.php
<?php
class tipoComputadorDAO{
public function getAll(){
$objVO = new tipoComputadorVO();
$retorno = array();
$sql = "SELECT * FROM tipo_computador";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdTipoComputador($conteudo["idTipoComputador"]);
$objVO->setNomeTipoComputador($conteudo["nomeTipoComputador"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}//fim
?>
<file_sep>/pdfDocumento.php
<?php
include_once './sessao.php';
ini_set('default_charset', 'UTF-8');
include_once './Conexao/conexao.php';
include_once("fpdf13/fpdf.php");
define('FPDF_FONTPATH', './fpdf13/font/');
//recupera o codigo
$idDocumento = $_REQUEST["idDocumento"];
//busca os dados
$sql = "SELECT d.idDocumento, cd.nomeCategoriaDoc, d.ac, d.descricaoDoc FROM documento AS d INNER JOIN
categoria_doc AS cd ON cd.idCategoriaDoc = d.idCategoriaDoc WHERE d.idDocumento = $idDocumento";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$categoria = $conteudo["nomeCategoriaDoc"];
$ac = $conteudo["ac"];
$descricaoDocumentos = $conteudo["descricaoDoc"];
}
$categF = utf8_decode ($categoria);
$acF = utf8_decode($ac);
$descDocF = utf8_decode($descricaoDocumentos);
//Novo documento PDF com orientação P - Retrato (Picture) que pode ser também L - Paisagem (Landscape)
$pdf = new FPDF('P', 'cm', 'A4');
$pdf->Open();
$pdf->AddPage();
$pdf->SetFont('arial', 'B', 10);
$tituloRel = utf8_decode("APAGRI CONSULTORIA AGRONÔMICA COMÉRCIO E LOCAÇÃO DE EQUIPAMENTOS AGRÍCOLAS LTDA");
$pdf->SetMargins(2, 0);
$pdf->Cell(20, 1.5, $tituloRel, "B", 1, 'C');
$pdf->SetFont('arial', '', 10); // Definindo Fonte
$pdf->SetMargins(2, 0);
//PROTOCOLO 1
$protocolo = utf8_decode("PROTOCOLO DE ENTREGA DE MALOTE Nº ");
$pdf->Cell(10, 0.7, $protocolo . " " . $idDocumento, "B", 1); //largura, altura, titulo, borda 1-sim 0-não, 0, alinhamento
$pdf->Cell(3, 0.7, "PARA: " . $categF, "B", 1);
$pdf->Cell(3, 0.7, "A/C: " . $acF, "B", 1);
$pdf->Cell(10, 0.7, "DATA: ____/____/______", "B", 1);
$pdf->Cell(10, 0.7, "ASSINATURA: _________________________", "B", 1);
$pdf->Cell(100, 0.7, "__________________________________________________________________________________________", "B", 1);
//PROTOCOLO 2
$protocolo = utf8_decode("PROTOCOLO DE ENTREGA DE MALOTE Nº ");
$pdf->SetFont('arial', 'B', 10);
$pdf->SetMargins(2, 0);
$pdf->Cell(18, 1.5, $tituloRel, "B", 1, 'C');
$pdf->SetFont('arial', '', 10); // Definindo Fonte
$pdf->Cell(10, 0.7, $protocolo . " " . $idDocumento, "B", 1); //largura, altura, titulo, borda 1-sim 0-não, 0, alinhamento
$pdf->Cell(3, 0.7, "PARA: " . $categF, "B", 1);
$pdf->Cell(3, 0.7, "A/C: " . $acF, "B", 1);
$descricaoDoc = utf8_decode("DESCRIÇÃO DOS DOCUMENTOS ENTREGUES");
$pdf->Cell(10, 0.7, $descricaoDoc, "B", 1);
//escrever o conteudo
$pdf->SetTextColor(255,0,0);
$pdf->MultiCell(19, 0.5, $descDocF, "",1);
$pdf->SetTextColor(0,0,0);
$pdf->SetXY(2, 14); //controla o espaço
$pdf->Cell(10, 0.7, "DATA: ____/____/______", "B", 1);
$pdf->Cell(10, 0.7, "ASSINATURA: _________________________", "B", 1);
$viaRemetente = utf8_decode("VIA REMETENTE, DEVOLVER ASSINADO E DATADO APÓS A CONFERÊNCIA");
$pdf->Cell(10, 0.7, $viaRemetente, "B", 1);
$pdf->Cell(100, 0.7, "__________________________________________________________________________________________", "B", 1);
//PROTOCOLO 3
$pdf->SetFont('arial', 'B', 10);
$pdf->SetMargins(2, 0);
$pdf->Cell(18, 1.5, $tituloRel, "B", 1, 'C');
$pdf->SetFont('arial', '', 10); // Definindo Fonte
$pdf->Cell(10, 0.7, $protocolo . " " . $idDocumento, "B", 1); //largura, altura, titulo, borda 1-sim 0-não, 0, alinhamento
$pdf->Cell(3, 0.7, "PARA: " . $categF, "B", 1);
$pdf->Cell(3, 0.7, "A/C: " . $acF, "B", 1);
$pdf->Cell(10, 0.7, $descricaoDoc, "B", 1);
//escrever o conteudo
$pdf->SetTextColor(255,0,0);
$pdf->MultiCell(19, 0.5, $descDocF, "",1);
$pdf->SetTextColor(0,0,0);
$pdf->SetXY(2, 25); //controla o espaço
$pdf->Cell(10, 0.7, "DATA: ____/____/______", "B", 1);
$pdf->Cell(10, 0.7, "ASSINATURA: _________________________", "B", 1);
$viaDestinatario = utf8_decode("VIA DESTINATÁRIO");
$pdf->Cell(10, 0.7, $viaDestinatario, "B", 1);
$string = "protocolo_".$idDocumento;
$pdf->Output($string.".pdf","D");
?>
<file_sep>/VO/funcionarioVO.class.php
<?php
class funcionarioVO{
//atributos
private $idFuncionario = null;
private $nomeFuncionario = null;
private $email = null;
private $idUnidade = null;
private $telefone1 = null;
private $telefone2 = null;
private $skype = null;
private $ativo = null;
private $idDepartamento = null;
//get set
//idFuncionario
public function getIdFuncionario(){
return $this->idFuncionario;
}
public function setIdFuncionario($idFuncionario){
$this->idFuncionario = $idFuncionario;
}
//nomeFuncionario
public function getNomeFuncionario(){
return $this->nomeFuncionario;
}
public function setNomeFuncionario($nomeFuncionario){
$this->nomeFuncionario = $nomeFuncionario;
}
//email
public function getEmail(){
return $this->email;
}
public function setEmail($email){
$this->email = $email;
}
//idUnidade
public function getIdUnidade(){
return $this->idUnidade;
}
public function setIdUnidade($idUnidade){
$this->idUnidade = $idUnidade;
}
//telefone1
public function getTelefone1(){
return $this->telefone1;
}
public function setTelefone1($telefone1){
$this->telefone1 = $telefone1;
}
//telefone2
public function getTelefone2(){
return $this->telefone2;
}
public function setTelefone2($telefone2){
$this->telefone2 = $telefone2;
}
//skype
public function getSkype(){
return $this->skype;
}
public function setSkype($skype){
$this->skype = $skype;
}
//ativo
public function getAtivo(){
return $this->ativo;
}
public function setAtivo($ativo){
$this->ativo = $ativo;
}
public function getIdDepartamento() {
return $this->idDepartamento;
}
public function setIdDepartamento($idDepartamento){
$this->idDepartamento = $idDepartamento;
}
}// fim da classe
?>
<file_sep>/relatorioFuncionarios.php
<?php
include_once './sessao.php';
?>
<html>
<head>
<meta charset="UTF-8">
<title></title>
</head>
<body>
<form action="pdfFunc.php" method="post">
<br><br><br>
<table align ="center" border="0">
<tr>
<td>
<?php
echo "<a href=\"pdfFunc.php\" target=\"_blank\">Clique para baixar a lista de Funcionários</a>";
?>
</td>
</tr>
</table>
</form>
</body>
</html>
<file_sep>/categoriaDoc.php
<?php
include_once './sessao.php';
//Conexao
include_once './Conexao/conexao.php';
//VO
include_once './VO/categoriaDocVO.class.php';
//DAO
include_once './DAO/categoriaDocDAO.class.php';
//OBJVO
$categoriaDocVO = new categoriaDocVO();
$idCatDoc = $_GET["par"]; //recupera o parametro do id
if ($idCatDoc == 0) {
//novo cadastro de computador
} else {
//buscar o para atualizar
$categoriaDocDAO = new categoriaDocDAO();
$categoriaDocVO = $categoriaDocDAO->getById($idCatDoc);
}
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Salvar") {
//seta os campos
$categoriaDocVO->setNomeCategoriaDoc($_REQUEST["nomeCategoriaDoc"]);
if (isset($_REQUEST["idCategoriaDoc"]) && $_REQUEST["idCategoriaDoc"] == "") {
$categoriaDocDAO = new categoriaDocDAO();
$categoriaDocDAO->insert($categoriaDocVO);
$categoriaDocVO = new categoriaDocVO();
echo "<script>msg(1)</script>";
} else {
$categoriaDocVO->setIdCategoriaDoc($_REQUEST["idCategoriaDoc"]);
$categoriaDocDAO->update($categoriaDocVO);
$categoriaDocVO = new categoriaDocVO();
echo "<script>msg(2)</script>";
}
} elseif (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Cancelar") {
header("Location: site.php");
exit;
}
?>
<html>
<head><br><br><br><br>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<script language="javascript" type="text/javascript" src="media/js/jquery.js"></script>
<script language="javascript" type="text/javascript" src="media/js/jquery.validate.js"></script>
<title></title>
</head>
<body>
<form action="#" method="post" id="categoria">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!-- Titulo -->
<tr>
<td colspan="4" class="titulo" align="center">Cadastro de Categorias</td>
</tr>
<!-- Codigo-->
<tr>
<td align="right">Código:</td>
<td>
<input type="text" name="idCategoriaDoc" id="idCategoriaDoc" value="<?php echo $categoriaDocVO->getIdCategoriaDoc() ?>" class="readonly" readonly>
</td>
</tr>
<!-- nomeCategoria -->
<tr>
<td>Nome da Categoria:</td>
<td><input type="text" name="nomeCategoriaDoc" required id="nomeCategoriaDoc" value="<?php echo $categoriaDocVO->getNomeCategoriaDoc(); ?>" maxlength="50" size="60"></td>
</tr>
<!-- botoes -->
<tr>
<td colspan="4" align="center">
<input type="submit" name="submit" value="Salvar">
</td>
</tr>
</table>
</form>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
<div style="width: 750px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Nome</th>
<th>Editar</th>
</tr>
</thead>
<?php
$categoriaDocDAO = new categoriaDocDAO();
$listaCategoriaDoc = $categoriaDocDAO->getAll();
for ($i = 0; $i < sizeof($listaCategoriaDoc); $i++) {
$dadosCategDoc = $listaCategoriaDoc[$i];
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosCategDoc->getIdCategoriaDoc() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosCategDoc->getNomeCategoriaDoc() . "</td>
<td align=\"center\"><a href=\"?red=categorias&par=" . $dadosCategDoc->getIdCategoriaDoc() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}
?>
</table></div></div>
</body>
</html>
<file_sep>/VO/processadorVO.class.php
<?php
class processadorVO{
//atributos
private $idProcessador = null;
private $processador = null;
//construtor
public function processadorVO(){
}
//get set
//idProcessador
public function getIdProcessador(){
return $this->idProcessador;
}
public function setIdProcessador($idProcessador){
$this->idProcessador = $idProcessador;
}
//processador
public function getProcessador(){
return $this->processador;
}
public function setProcessador($processador){
$this->processador = $processador;
}
}
?>
<file_sep>/VO/atendimentoVO.class.php
<?php
class atendimentoVO{
//atributos
private $idAtendimento = null;
private $data = null;
private $idTipoAtendimento = null;
private $idUSuario = null;
//get e set
//idAtendimento
public function getIdAtendimento(){
return $this->idAtendimento;
}
public function setIdAtendimento($idAtendimento){
$this->idAtendimento = $idAtendimento;
}
//data
public function getData(){
return $this->data;
}
public function setData($data){
$this->data = $data;
}
//idTipo
public function getIdTipoAtendimento(){
return $this->idTipoAtendimento;
}
public function setIdTipoAtendimento($idTipoAtendimento){
$this->idTipoAtendimento = $idTipoAtendimento;
}
//idUsuario
public function getIdUsuario(){
return $this->idUSuario;
}
public function setIdUsuario($idUsuario){
$this->idUSuario = $idUsuario;
}
}
?>
<file_sep>/DAO/sistemaOperacionalDAO.class.php
<?php
class sistemaOperacionalDAO{
public function getAll(){
$objVO = new sistemaOperacionalVO();
$retorno = array();
$sql = "SELECT * FROM sistema_operacional";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)){
$objVO->setIdSistemaOperacional($conteudo["idSistemaOperacional"]);
$objVO->setSistemaOperacional($conteudo["sistemaOperacional"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}//fim da classe
?>
<file_sep>/devolucaoDoc.php
<?php
include_once './sessao.php';
//Conexao
include_once './Conexao/conexao.php';
//VO
include_once './VO/devolucaoDocVO.class.php';
//DAO
include_once './DAO/devolucaoDocDAO.class.php';
//OBJVO
$devolucaoDocVO = new devolucaoDocVO();
$data = date("d/m/Y"); //data e hora atual
$dataBd = implode("-", array_reverse(explode("/", $data))); //converte a data p/ salvar no bd
//recupera o id
$idDevolucao = $_GET["par"];
//tratamento dos botoes
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Finalizar") {
//seta os campos
$devolucaoDocVO->setIdDocumento($_REQUEST["idDocumento"]);
$devolucaoDocVO->setObservacoes($_REQUEST["observacoes"]);
$devolucaoDocVO->setData($dataBd);
$devolucaoDocDAO = new devolucaoDocDAO();
$devolucaoDocDAO->insert($devolucaoDocVO);
$devolucaoDocVO = new devolucaoDocVO();
header("Location: site.php");
exit;
} elseif (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Cancelar") {
$devolucaoDocVO = new devolucaoDocVO();
header("Location: site.php");
exit;
}
?>
<html>
<head><br><br><br><br>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<script language="javascript" type="text/javascript" src="media/js/jquery.js"></script>
<script language="javascript" type="text/javascript" src="media/js/jquery.validate.js"></script>
<script type="text/javascript" src="media/js/jquery-1.2.6.pack.js" charset="utf-8"></script>
<script type="text/javascript" src="media/js/jquery.maskedinput-1.1.4.pack.js" charset="utf-8"></script>
<title></title>
</head>
<body>
<form action="" method="post">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!-- Titulo -->
<tr>
<td colspan="4" class="titulo" align="center">Devolução de Documentos</td>
</tr>
<tr>
<!--codigo-->
<td align="right">Código:</td>
<td><input type="text" name="idDocumento" id="idDocumento" size="15" maxlength="11" value="<?php echo $idDevolucao; ?>" class="readonly" readonly>
<!--data-->
Data:
<input type="text" name="data" id="data" size="15" maxlength="10" value="<?php echo $data; ?>" class="readonly" readonly></td>
</tr>
<!-- observacoes -->
<tr>
<td>Observações:</td>
<td><textarea name="observacoes" id="observacoes" rows="2" cols="55" maxlength="100"></textarea></td>
</tr>
<!-- botoes -->
<tr>
<td colspan="4" align="center">
<input type="submit" name="submit" value="Finalizar">
<input type="submit" name="submit" value="Cancelar">
</td>
</tr>
</table>
</form>
</body>
</html>
<file_sep>/VO/atendimentoTempoVO.class.php
<?php
class atendimentoTempoVO{
//atributos
private $idAtemdimentoTempo = null;
private $idAtendimento = null;
private $idTempo;
//get set
//idAtendimentoTempo
public function getIdAtendimentoTempo(){
return $this->idAtemdimentoTempo;
}
public function setIdAtendimentoTempo($idAtendimentoTempo){
$this->IdAtendimentoTempo = $idAtendimentoTempo;
}
//idAtendimento
public function getIdAtendimento(){
return $this->idAtendimento;
}
public function setIdAtendimento($idAtendimento){
$this->idAtendimento = $idAtendimento;
}
//idTempo
public function getIdTempo(){
return $this->idTempo;
}
public function setIdTempo($idTempo){
$this->idTempo = $idTempo;
}
}
?>
<file_sep>/DAO/memoriaDAO.class.php
<?php
class memoriaDAO{
public function getAll(){
$objVO = new memoriaVO();
$retorno = array();
$sql = "SELECT * FROM memoria";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdMemoria($conteudo["idMemoria"]);
$objVO->setMemoria($conteudo["memoria"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}//fim da classe
?><file_sep>/Conexao/conexao.php
<?php
$servidor = "mysql.apagri.com.br";
$usuario = "apagri02";
$senha = "<PASSWORD>";
$bd = "apagri02";
$conexao = mysql_connect($servidor, $usuario, $senha);
$bancoDados = mysql_select_db($bd, $conexao);
mysql_set_charset('UTF8', $conexao);
mysql_query("SET NAMES 'utf8'");
mysql_query('SET character_set_connection=utf8');
mysql_query('SET character_set_client=utf8');
mysql_query('SET character_set_results=utf8');
<file_sep>/VO/sistemaOperacionalVO.class.php
<?php
class sistemaOperacionalVO{
//atributos
private $idSistemaOperacional = null;
private $sistemaOperacional = null;
//construtor
public function sistemaOperacionalVO(){
}
//get set
//idSistemaOperacional
public function getIdSistemaOperacional(){
return $this->idSistemaOperacional;
}
public function setIdSistemaOperacional($idSistemaOperacional){
$this->idSistemaOperacional = $idSistemaOperacional;
}
//sistemaOperacional
public function getSistemaOperacional(){
return $this->sistemaOperacional;
}
public function setSistemaOperacional($sistemaOperacional){
$this->sistemaOperacional = $sistemaOperacional;
}
}
?>
<file_sep>/VO/departamentoFuncVO.class.php
<?php
class departamentoFuncVO {
private $idDepartamento;
private $nomeDepartamento;
//get e set
//idDepartamento
public function getIdDepartamento() {
return $this->idDepartamento;
}
public function setIdDepartamento($idDepartamento) {
$this->idDepartamento = $idDepartamento;
}
//nomeDepartamento
public function getNomeDepartamento() {
return $this->nomeDepartamento;
}
public function setNomeDepartamento($nomeDepartamento) {
$this->nomeDepartamento = $nomeDepartamento;
}
}
<file_sep>/loginCliente.php
<?php
include_once 'Conexao/conexao.php';
include_once './VO/loginClienteVO.class.php';
include_once './VO/clienteVO.class.php';
include_once './DAO/loginClienteDAO.class.php';
include_once './DAO/clienteDAO.class.php';
$clienteVO = new clienteVO();
$loginClienteVO = new loginClienteVO();
//recupera o parametro do id
$idLogin = $_GET["par"];
if ($idLogin == 0) {
//novo cadastro
} else {
//buscar os dados
$loginClienteDAO = new loginClienteDAO();
$loginClienteVO = $loginClienteDAO->getById($idLogin);
}
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Salvar") {
//seta os dados
$loginClienteVO->setIdCliente($_REQUEST["idCliente"]);
$loginClienteVO->setLogin($_REQUEST["login"]);
$loginClienteVO->setSenha($_REQUEST["senha"]);
$loginClienteVO->setAtivo($_REQUEST["ativo"]);
$loginClienteDAO = new loginClienteDAO();
//verifica o codigo
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
if (isset($_REQUEST["idLoginCliente"]) && $_REQUEST["idLoginCliente"] == "") {
//novo cadastro
$loginClienteDAO->insert($loginClienteVO);
$loginClienteVO = new loginClienteVO();
echo "<script>msg(1)</script>";
} else {
//atualiza
$loginClienteVO->setIdLoginCliente($_REQUEST["idLoginCliente"]);
$loginClienteDAO->update($loginClienteVO);
$loginClienteVO = new loginClienteVO();
echo "<script>msg(2)</script>";
}
}
?>
<html>
<head><br><br><br><br>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<title></title>
</head>
<body>
<form action="" method="post" id="funcionarios">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!-- Titulo -->
<tr>
<td colspan="2" class="titulo" align="center">Cadastro de Acessos</td>
</tr>
<!-- Codigo-->
<tr>
<td align="right">Código:</td>
<td>
<input type="text" name="idLoginCliente" id="idLoginCliente" value="<?php echo $loginClienteVO->getIdLoginCliente(); ?>" class="readonly" readonly>
</td>
</tr>
<!-- cliente-->
<tr>
<td>Selecione:</td>
<td>
<select name="idCliente">
<?php
if ($loginClienteVO->getIdCliente() != "") {
$idCliente = $loginClienteVO->getIdCliente();
} else {
$idCliente = $clienteVO->getIdCliente();
}
$sql = "SELECT * FROM cliente";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
if ($idCliente == $conteudo["idCliente"]) {
echo "<option value=\"$conteudo[idCliente]\" selected>$conteudo[nomeCliente]</option>";
} else {
echo "<option value=\"$conteudo[idCliente]\">$conteudo[nomeCliente]</option>";
}
}
?>
</select>
</td>
</tr>
<!-- login -->
<tr>
<td>Login:</td>
<td><input type="text" name="login" id="login" required value="<?php echo $loginClienteVO->getLogin(); ?>" placeholder="nome do usuario" maxlength="25" size="25"></td>
</tr>
<!-- senha -->
<tr>
<td>Senha:</td>
<td><input type="text" name="senha" id="senha" required value="<?php echo $loginClienteVO->getSenha(); ?>" placeholder="senha do usuario" maxlength="25" size="25"></td>
</tr>
<!--Ativo-->
<tr>
<td>Ativo:</td>
<td>
<select name="ativo">
<?php
if ($loginClienteVO->getAtivo() == 1) {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"2\">Não</option>";
} else if ($loginClienteVO->getAtivo() == 2) {
echo "<option value=\"1\">Sim</option>";
echo "<option value=\"2\" selected>Não</option>";
} else {
echo "<option value=\"1\" selected>Sim</option>";
echo "<option value=\"2\">Não</option>";
}
?>
</select>
<input type="submit" name="submit" value="Salvar">
<input type="submit" name="submit" value="Salvar">
<?php echo "<a href=\"downloadListaDadosAcesso.php\">Download da Lista </a>"; ?>
</td>
</tr>
</table>
</form>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
<div style="width: 800px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Cliente</th>
<th>Usuário</th>
<th>Senha</th>
<th>Ativo</th>
<th>Editar</th>
</tr>
</thead>
<?php
$loginClienteDAO = new loginClienteDAO();
$listaLogin = $loginClienteDAO->getAll();
$clienteDAO = new clienteDAO();
$listaClientes = $clienteDAO->getAll(1);
for ($i = 0; $i < sizeof($listaLogin); $i++) {
$dadosLogin = $listaLogin[$i];
for ($z = 0; $z < sizeof($listaClientes); $z++) {
$dadosClientes = $listaClientes[$z];
if ($dadosClientes->getIdCliente() == $dadosLogin->getIdCliente()) {
$nomeCliente = $dadosClientes->getNomeCliente();
}
}
$ativo = $dadosLogin->getAtivo();
if ($ativo == 1) {
$nomeAtivo = "Sim";
} elseif ($ativo == 2) {
$nomeAtivo = "Não";
}
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosLogin->getIdLoginCliente() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeCliente . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosLogin->getLogin() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dadosLogin->getSenha() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeAtivo . "</td>
<td align=\"center\"><a href=\"?red=dadosAcesso&par=" . $dadosLogin->getIdLoginCliente() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}
?>
</table>
</div>
</div>
</body>
</html><file_sep>/VO/tipoComputadorVO.class.php
<?php
class tipoComputadorVO{
//atributos
private $idTipoComputador = null;
private $nomeTipoComputador = null;
//construtor
public function tipoComputadorVO(){
}
//get set
//idTipoComputador
public function getIdTipoComputador(){
return $this->idTipoComputador;
}
public function setIdTipoComputador($idTipoComputador){
$this->idTipoComputador = $idTipoComputador;
}
//nomeTipoComp
public function getNomeTipoComputador(){
return $this->nomeTipoComputador;
}
public function setNomeTipoComputador($nomeTipoComputador){
$this->nomeTipoComputador = $nomeTipoComputador;
}
}
?><file_sep>/VO/computadorVO.class.php
<?php
class computadorVO{
//atributos
private $idComputador = null;
private $nomeComputador = null;
private $numeroSerie = null;
private $idMarca = null;
private $idTipoComputador = null;
private $idSistemaOperacional = null;
private $idProcessador = null;
private $idMemoria = null;
private $ativo = null;
private $data = null;
private $observacao = null;
//get set
//idComputador
public function getIdComputador(){
return $this->idComputador;
}
public function setIdComputador($idComputador){
$this->idComputador = $idComputador;
}
//nomeComputador
public function getNomeComputador(){
return $this->nomeComputador;
}
public function setNomeComputador($nomeComputador){
$this->nomeComputador = $nomeComputador;
}
//numeroSerie
public function getNumeroSerie(){
return $this->numeroSerie;
}
public function setNumeroSerie($numeroSerie){
$this->numeroSerie = $numeroSerie;
}
//idMarca
public function getIdMarca(){
return $this->idMarca;
}
public function setIdMarca($idMarca){
$this->idMarca = $idMarca;
}
//idTipoComputador
public function getIdTipoComputador(){
return $this->idTipoComputador;
}
public function setIdTipoComputador($idTipoComputador){
$this->idTipoComputador = $idTipoComputador;
}
//idSistemaOperacional
public function getIdSistemaOperacional(){
return $this->idSistemaOperacional;
}
public function setIdSistemaOperacional($idSistemaOperacional){
$this->idSistemaOperacional = $idSistemaOperacional;
}
//idProcessador
public function getIdProcessador(){
return $this->idProcessador;
}
public function setIdProcessador($idProcessador){
$this->idProcessador = $idProcessador;
}
//idMemoria
public function getIdMemoria(){
return $this->idMemoria;
}
public function setIdMemoria($idMemoria){
$this->idMemoria = $idMemoria;
}
//ativo
public function getAtivo(){
return $this->ativo;
}
public function setAtivo($ativo){
$this->ativo = $ativo;
}
//data
public function getData(){
return $this->data;
}
public function setData($data){
$this->data = $data;
}
//observacao
public function getObservacao(){
return $this->observacao;
}
public function setObservacao($observacao){
$this->observacao = $observacao;
}
}//fim da classe
?>
<file_sep>/DAO/marcaDAO.class.php
<?php
class marcaDAO{
public function getAll(){
$objVO = new marcaVO();
$retorno = array();
$sql = "SELECT * FROM marca_computador";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)){
$objVO->setIdMarca($conteudo["idMarca"]);
$objVO->setMarca($conteudo["marca"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}//fim da classe
?>
<file_sep>/VO/estadoVO.class.php
<?php
class estadoVO{
//atributos
private $idEstado = null;
private $estado = null;
//construtor
public function estadoVO(){
}
//get e set
//idEstado
public function getIdEstado(){
return $this->idEstado;
}
public function setIdEstado($idEstado){
$this->idEstado = $idEstado;
}
//estado
public function getEstado(){
return $this->estado;
}
public function setEstado($estado){
$this->estado = $estado;
}
}
?><file_sep>/DAO/tipoAtendimentoDAO.class.php
<?php
class tipoAtendimentoDAO{
public function getAll(){
$objVO = new tipoAtendimentoVO();
$retorno = array();
$sql = "SELECT * FROM tipo_atendimento";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdTipoAtendimento($conteudo["idTipoAtendimento"]);
$objVO->setTipo($conteudo["tipo"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}
?>
<file_sep>/VO/loginClienteVO.class.php
<?php
class loginClienteVO{
//atributos
private $idLoginCliente = null;
private $idCliente = null;
private $login = null;
private $senha = null;
private $ativo = null;
/**
* @return null
*/
public function getIdLoginCliente()
{
return $this->idLoginCliente;
}
/**
* @param null $idLoginCliente
*/
public function setIdLoginCliente($idLoginCliente)
{
$this->idLoginCliente = $idLoginCliente;
}
/**
* @return null
*/
public function getIdCliente()
{
return $this->idCliente;
}
/**
* @param null $idCliente
*/
public function setIdCliente($idCliente)
{
$this->idCliente = $idCliente;
}
/**
* @return null
*/
public function getLogin()
{
return $this->login;
}
/**
* @param null $login
*/
public function setLogin($login)
{
$this->login = $login;
}
/**
* @return null
*/
public function getSenha()
{
return $this->senha;
}
/**
* @param null $senha
*/
public function setSenha($senha)
{
$this->senha = $senha;
}
/**
* @return null
*/
public function getAtivo()
{
return $this->ativo;
}
/**
* @param null $ativo
*/
public function setAtivo($ativo)
{
$this->ativo = $ativo;
}
}//fim da classe
<file_sep>/DAO/atendimentoDAO.class.php
<?php
class atendimentoDAO {
//inserir
public function insert(atendimentoVO $objVO) {
$sql = sprintf("INSERT INTO atendimento (data, idTipoAtendimento, idUsuario) VALUES ('%s','%s','%s')",
$objVO->getData(),
$objVO->getIdTipoAtendimento(),
$objVO->getIdUsuario()
);
mysql_query($sql); //echo $sql."<br>";
$objVO->setIdAtendimento(mysql_insert_id());
return $objVO;
}
public function getAll() {
$objVO = new atendimentoVO();
$retorno = array();
$sql = "SELECT * FROM atendimento";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdAtendimento($conteudo["idAtendimento"]);
$objVO->setData($conteudo["data"]);
$objVO->setIdTipoAtendimento($conteudo["idTipoAtendimento"]);
$objVO->setIdUsuario($conteudo["idUsuario"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
public function getById($id) {
$objVO = new atendimentoVO();
$sql = sprintf('SELECT * FROM atendimento WHERE idAtendimento = "%s"', $id
);
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdAtendimento($conteudo["idAtendimento"]);
$objVO->setData($conteudo["data"]);
$objVO->setIdTipoAtendimento($conteudo["idTipoAtendimento"]);
$objVO->setIdUsuario($conteudo["idUsuario"]);
$return = clone $objVO;
}
return $return;
}
}
?>
<file_sep>/relatorioDoc.php
<?php
include_once './sessao.php';
//recupera o codigo
$idDocumento = $_GET["par"];
?>
<html>
<head>
<meta charset="UTF-8">
<title></title>
</head>
<body>
<form action="pdfDocumento.php" method="get">
<br><br><br>
<table align ="center" border="0">
<tr>
<td>
<input type="submit" name="submit" value="Gerar Relatório">
<input type="hidden" name="idDocumento" id="idDocumento" value="<?php echo $idDocumento; ?>">
</td>
</tr>
</table>
</form>
</body>
</html>
<file_sep>/pesquisaDevolucaoDoc.php
<?php
include_once './sessao.php';
//Conexao
include_once './Conexao/conexao.php';
//VO
include_once './VO/documentoVO.class.php';
include_once './VO/funcionarioVO.class.php';
include_once './VO/categoriaDocVO.class.php';
include_once './VO/devolucaoDocVO.class.php';
//DAO
include_once './DAO/funcionarioDAO.class.php';
include_once './DAO/documentoDAO.class.php';
include_once './DAO/devolucaoDocDAO.class.php';
include_once './DAO/categoriaDocDAO.class.php';
//recupera o id
$idDevolucao = $_GET["par"];
?>
<html>
<head>
<meta charset="UTF-8">
<title></title>
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<script language="javascript" type="text/javascript" src="media/js/jquery.js"></script>
<script type="text/javascript" language="javascript" src="media/js/jquery.dataTables.js"></script>
<script language="javascript" type="text/javascript" src="media/js/jquery.validate.js"></script>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
</head><br><br><br><br>
<form action="#" method="get">
<br><br>
<div style="width: 1050px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Cadastrado Em</th>
<th>Funcionario</th>
<th>Categoria</th>
<th>Observações</th>
<th>Data Dev.</th>
</tr>
</thead>
<?php
$documentoDAO = new documentoDAO();
$listaDocumentos = $documentoDAO->getAll($idDevolucao);
$devolucaoDocDAO = new devolucaoDocDAO();
$funcionarioDAO = new funcionarioDAO();
$listaFuncionarios = $funcionarioDAO->getAll();
$categoriaDocDAO = new categoriaDocDAO();
$listaCategDoc = $categoriaDocDAO->getAll();
for ($i = 0; $i < sizeof($listaDocumentos); $i++) {
$dadosDocumentos = $listaDocumentos[$i];
$listaDevolucaoDoc = $devolucaoDocDAO->getAll($dadosDocumentos->getIdDocumento());
$dataCadastro = implode("/", array_reverse(explode("-", $dadosDocumentos->getData())));
for ($z = 0; $z < sizeof($listaDevolucaoDoc); $z++) {
$dadosDevDoc = $listaDevolucaoDoc[$z];
$dataDev = implode("/", array_reverse(explode("-", $dadosDevDoc->getData())));
for ($w = 0; $w < sizeof($listaFuncionarios); $w++) {
$dadosFuncionarios = $listaFuncionarios[$w];
if($dadosFuncionarios->getIdFuncionario() == $dadosDocumentos->getIdFuncionario()){
$nomeFuncionario = $dadosFuncionarios->getNomeFuncionario();
}
}
for ($q = 0; $q < sizeof($listaCategDoc); $q++) {
$dadosCatDoc = $listaCategDoc[$q];
if($dadosCatDoc->getIdCategoriaDoc() == $dadosDocumentos->getIdCategoriaDoc()){
$nomeCatDoc = $dadosCatDoc->getNomeCategoriaDoc();
}
}
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosDocumentos->getIdDocumento() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $dataCadastro . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeFuncionario . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeCatDoc . "</td>
<td align=\"center\" class=\"textoDataTable\">" .$dadosDevDoc->getObservacoes(). "</td>
<td align=\"center\" class=\"textoDataTable\">" .$dataDev. "</td>
</tr>
";
}}
?>
</table></div></div>
</body>
</form>
</html><file_sep>/atendimentoCompleto.php
<?php
include_once './sessao.php';
error_reporting(0);
//Conexao
include_once './Conexao/conexao.php';
//VO
include_once './VO/atendimentoVO.class.php';
include_once './VO/clienteVO.class.php';
include_once './VO/atendimentoCompletoVO.class.php';
include_once './VO/categoriaVO.class.php';
include_once './VO/subCategoriaVO.class.php';
include_once './VO/tipoAtendimentoVO.class.php';
include_once './VO/statusVO.class.php';
include_once './VO/tempoVO.class.php';
include_once './VO/atendimentoTempoVO.class.php';
//DAO
include_once './DAO/atendimentoDAO.class.php';
include_once './DAO/tempoDAO.class.php';
include_once './DAO/atendimentoCompletoDAO.class.php';
include_once './DAO/atendimentoTempoDAO.class.php';
//funcoes
include_once './funcoes.php';
//Obj VO
$atendimentoVO = new atendimentoVO();
$clienteVO = new clienteVO();
$atendimentoCompletoVO = new atendimentoCompletoVO();
$categoriaVO = new categoriaVO();
$subCategoriaVO = new subCategoriaVO();
$tipoAtendimentoVO = new tipoAtendimentoVO();
$statusVO = new statusVO();
$atendimentoTempoVO = new atendimentoTempoVO();
$tempoVO = new tempoVO();
//$usuario = $_SESSION["usuario"];//recupera o usuario logado
$data = date("d/m/Y"); //data e hora atual
$dataBd = implode("-", array_reverse(explode("/", $data))); //converte a data p/ salvar no bd
$horario = date("H:i:s", time()); //recupera o horario atual
$id = $_GET["par"]; //recupera o parametro do id
//tratando o request do editar do dataTable
if ($id == 0) {
//iniciar atendimento novo
} else {
//atendimento que vai editar
$atendimentoCompletoDAO = new atendimentoCompletoDAO();
$atendimentoCompletoVO = $atendimentoCompletoDAO->getById($id);
$atendimentoDAO = new atendimentoDAO();
$atendimentoVO = $atendimentoDAO->getById($id);
}
//tratando o botao salvar
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Finalizar Atendimento") {
//verificar se existe o id
if (isset($_REQUEST["idAtendimento"]) && $_REQUEST["idAtendimento"] == "") {
//setar os campos para novo atendimento
//tabela atendimento (data, idTipoAtendimento, idUsuario)
//************************************************************************
$atendimentoVO->setData($dataBd);
$atendimentoVO->setIdTipoAtendimento($_REQUEST["idTipoAtendimento"]);
$atendimentoVO->setIdUsuario(1);
//cadastrar na tabela atendimento
$atendimentoDAO = new atendimentoDAO();
$atendimentoDAO->insert($atendimentoVO);
//************************************************************************
//tabela tempo (horarioInicial, horarioFinal, tempoGastoMin)
$tempoVO->setHorarioInicial($_REQUEST["horarioInicial"]);
$tempoVO->setHorarioFinal($horario);
//chama a função para calcular o tempo
$inicial = $_REQUEST["horarioInicial"];
$final = $horario;
$tempoGastoMin = calcularMinutos($inicial, $final);
$tempoVO->setTempoGastoMin($tempoGastoMin);
//cadastrar na tabela tempo
$tempoDAO = new tempoDAO();
$tempoDAO->insert($tempoVO);
//************************************************************************
//tabela atendimento_tempo (idAtendimento, idTempo)
$ultimoIdAtend = $atendimentoVO->getIdAtendimento(); //busca o ultimo idAtendimento
$ultimoIdTemp = $tempoVO->getIdTempo(); //busca o ultimo idTempo
//seta os campos da tabela atendimento_tempo
$atendimentoTempoVO->setIdAtendimento($ultimoIdAtend);
$atendimentoTempoVO->setIdTempo($ultimoIdTemp);
//cadastrar na tabela atendimentoTempo
$atendimentoTempoDAO = new atendimentoTempoDAO();
$atendimentoTempoDAO->insert($atendimentoTempoVO);
//********************************************************************************
//tabela atendimento_completo
//(idAtendimentoTempo, idCliente, nomeFuncCliente, idCategoria, idSubCategoria, descricao, solucao, observacao, idStatus)
$ultimoIdTempo = $tempoVO->getIdTempo(); //busca o ultimo idTempo
$atendimentoCompletoVO->setIdAtendimentoTempo($ultimoIdTempo);
$atendimentoCompletoVO->setIdCliente($_REQUEST["idCliente"]);
$atendimentoCompletoVO->setNomeFuncCliente($_REQUEST["nomeFuncCliente"]);
$atendimentoCompletoVO->setIdCategoria($_REQUEST["idCategoria"]);
$atendimentoCompletoVO->setIdSubCategoria($_REQUEST["idSubCategoria"]);
$atendimentoCompletoVO->setDescricao($_REQUEST["descricao"]);
$atendimentoCompletoVO->setSolucao($_REQUEST["solucao"]);
$atendimentoCompletoVO->setObservacao($_REQUEST["observacao"]);
$atendimentoCompletoVO->setIdStatus($_REQUEST["idStatus"]);
$atendimentoCompletoDAO = new atendimentoCompletoDAO();
$atendimentoCompletoDAO->insert($atendimentoCompletoVO);
$atendimentoCompletoVO = new atendimentoCompletoVO();
//echo "<script>msg(1)</script>";
header("Location: site.php");
exit;
} else {
//atualizacao
//************************************************************************
//tabela tempo (horarioInicial, horarioFinal, tempoGastoMin)
$tempoVO->setHorarioInicial($_REQUEST["horarioInicial"]);
$tempoVO->setHorarioFinal($horario);
//chama a função para calcular o tempo
$inicial = $_REQUEST["horarioInicial"];
$final = $horario;
$tempoGastoMin = calcularMinutos($inicial, $final);
$tempoVO->setTempoGastoMin($tempoGastoMin);
//cadastrar na tabela tempo
$tempoDAO = new tempoDAO();
$tempoDAO->insert($tempoVO);
//************************************************************************
//tabela atendimento_tempo (idAtendimento, idTempo)
$ultimoIdTemp2 = $tempoVO->getIdTempo(); //busca o ultimo idTempo
//seta os campos da tabela atendimento_tempo
$atendimentoTempoVO->setIdAtendimento($_REQUEST["idAtendimento"]);
$atendimentoTempoVO->setIdTempo($ultimoIdTemp2);
//cadastrar na tabela atendimentoTempo
$atendimentoTempoDAO = new atendimentoTempoDAO();
$atendimentoTempoDAO->insert($atendimentoTempoVO);
//************************************************************************
//tabela atendimento_completo
//(idAtendimentoTempo, idCliente, nomeFuncCliente, idCategoria, idSubCategoria, descricao, solucao, observacao, idStatus)
$atendimentoCompletoVO->setIdAtendimentoTempo($_REQUEST["idAtendimentoTempo"]);
$atendimentoCompletoVO->setIdCliente($_REQUEST["idCliente"]);
$atendimentoCompletoVO->setNomeFuncCliente($_REQUEST["nomeFuncCliente"]);
$atendimentoCompletoVO->setIdCategoria($_REQUEST["idCategoria"]);
$atendimentoCompletoVO->setIdSubCategoria($_REQUEST["idSubCategoria"]);
$atendimentoCompletoVO->setDescricao($_REQUEST["descricao"]);
$atendimentoCompletoVO->setSolucao($_REQUEST["solucao"]);
$atendimentoCompletoVO->setObservacao($_REQUEST["observacao"]);
$atendimentoCompletoVO->setIdStatus($_REQUEST["idStatus"]);
$atendimentoCompletoDAO = new atendimentoCompletoDAO();
$atendimentoCompletoDAO->update($atendimentoCompletoVO);
$atendimentoCompletoVO = new atendimentoCompletoVO();
//echo "<script>msg(2)</script>";
header("Location: site.php");
exit;
}
}
?>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<script language="javascript" type="text/javascript" src="media/js/jquery.js"></script>
<script language="javascript" type="text/javascript" src="media/js/jquery.validate.js"></script>
<title></title>
</head><br><br><br><br>
<form action="" method="post" id="atendimentoApagri">
<table border="0" align="center" cellpadding="5" cellspacing="5">
<!--Titulo-->
<tr>
<td colspan="6" align="center" class="titulo">Atendimento</td>
</tr>
<!--Codigo-->
<tr>
<td width="150" align="right">Código:</td>
<td><input type="text" name="idAtendimento" id="idAtendimento" size="15" maxlength="11" class="readonly" readonly value="<?php echo $atendimentoCompletoVO->getIdAtendimentoCompleto(); ?>"></td>
<!--hidden para setar o idAtendimentoTempo-->
<input type="hidden" name="idAtendimentoTempo" id="idAtendimentoTempo" value="<?php echo $atendimentoCompletoVO->getIdAtendimentoTempo(); ?>">
<!--Data -->
<td width="150" align="right">Data:</td>
<td><input type="text" name="data" id="data" size="15" maxlength="10" class="readonly" value="<?php echo $data; ?>" readonly></td>
<!--Usuario
<td width="150" align="right">Usuário:</td>
<td><input type="text" name="idUsuario" id="idUsuario" size="15" maxlength="10" class="readonly" value="<?php echo $usuario; ?> " readonly></td>
-->
</tr>
<!--Tipo Atendimento -->
<tr>
<td width="150" align="right">Atendimento:</td>
<td>
<select name="idTipoAtendimento">
<?php
$sql = "";
if ($atendimentoVO->getIdTipoAtendimento() == null) {
$sql = "SELECT * FROM tipo_atendimento";
} else {
$idTipoA = $atendimentoVO->getIdTipoAtendimento();
$sql = "SELECT * FROM tipo_atendimento WHERE idTipoAtendimento = $idTipoA";
}
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idTipoAtendimento = $tipoAtendimentoVO->getIdTipoAtendimento();
if ($idTipoAtendimento == $conteudo["idTipoAtendimento"]) {
echo "<option value=\"$conteudo[idTipoAtendimento]\" selected>$conteudo[tipo]</option>";
} else {
echo "<option value=\"$conteudo[idTipoAtendimento]\">$conteudo[tipo]</option>";
}
}
?>
</select>
</td>
<!--horario inicial -->
<td width="150" align="right">Horário Inicial:</td>
<td><input type="text" name="horarioInicial" id="horarioInicial" size="15" maxlength="10" class="readonly" value="<?php echo $horario; ?> " readonly></td>
</tr>
</table>
<br>
<table border="0" align="center" cellpadding="5" cellspacing="5">
<tr>
<!--idCliente -->
<td width="150" align="right">Cliente:</td>
<td colspan="4">
<select name="idCliente">
<?php
$sql = "";
if ($atendimentoCompletoVO->getIdCliente() == "") {
$sql = "SELECT * FROM cliente";
} else {
$idClienteA = $atendimentoCompletoVO->getIdCliente();
$sql = "SELECT * FROM cliente WHERE idCliente = $idClienteA";
}
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idCliente = $clienteVO->getIdCliente();
if ($idCliente == $conteudo["idCliente"]) {
echo "<option value=\"$conteudo[idCliente]\" selected>$conteudo[nomeCliente]</option>";
} else {
echo "<option value=\"$conteudo[idCliente]\">$conteudo[nomeCliente]</option>";
}
}
?>
</select>
</td>
<!--nomeFuncCliente -->
<td width="150" align="right">Nome:</td>
<td colspan="4"><input type="text" required name="nomeFuncCliente" id="nomeFuncCliente" size="50" maxlength="50" value="<?php echo $atendimentoCompletoVO->getNomeFuncCliente(); ?>"></td>
</tr>
</table>
<br>
<table border="0" align="center" cellpadding="5" cellspacing="5">
<!--Categoria-->
<tr>
<td width="100" align="right">Categoria:</td>
<td>
<select name="idCategoria">
<?php
$sql = "";
if ($atendimentoCompletoVO->getIdCategoria() == "") {
$sql = "SELECT * FROM categoria_atendimento";
} else {
$idCatagoriaA = $atendimentoCompletoVO->getIdCategoria();
$sql = "SELECT * FROM categoria_atendimento WHERE idCategoria = $idCatagoriaA";
}
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idCategoria = $categoriaVO->getIdCategoria();
if ($idCategoria == $conteudo["idCategoria"]) {
echo "<option value=\"$conteudo[idCategoria]\" selected>$conteudo[nomeCategoria]</option>";
} else {
echo "<option value=\"$conteudo[idCategoria]\">$conteudo[nomeCategoria]</option>";
}
}
?>
</select>
<!--SubCategoria-->
<td width="100" align="right">Sub Categoria:</td>
<td colspan="4">
<select name="idSubCategoria">
<?php
$sql = "";
if ($atendimentoCompletoVO->getIdSubCategoria() == "") {
$sql = "SELECT * FROM sub_categoria";
} else {
$idSubCategoriaA = $atendimentoCompletoVO->getIdSubCategoria();
$sql = "SELECT * FROM sub_categoria WHERE idSubcategoria = $idSubCategoriaA";
}
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$idSubCategoria = $subCategoriaVO->getIdSubCategoria();
if ($idSubCategoria == $conteudo["idSubCategoria"]) {
echo "<option value=\"$conteudo[idSubCategoria]\" selected>$conteudo[nomeSubCategoria]</option>";
} else {
echo "<option value=\"$conteudo[idSubCategoria]\">$conteudo[nomeSubCategoria]</option>";
}
}
?>
</select>
</td>
</tr>
<tr>
<!--descricao -->
<td width="100" align="right">Descrição:</td>
<td colspan="4"><textarea name="descricao" required id="descricao" rows="6" cols="70" maxlength="250"><?php echo $atendimentoCompletoVO->getDescricao(); ?></textarea></td>
</tr>
<!--solucao -->
<tr>
<td width="100" align="right">Solução:</td>
<td colspan="4"><textarea name="solucao" required id="solucao" rows="6" cols="70" maxlength="250"><?php echo $atendimentoCompletoVO->getSolucao(); ?></textarea></td>
</tr>
<!--observacao -->
<tr>
<td width="100" align="right">Observação:</td>
<td colspan="4"><textarea name="observacao" id="observacao" rows="6" cols="70" maxlength="250"><?php echo $atendimentoCompletoVO->getObservacao(); ?></textarea></td>
</tr>
<!--status -->
<tr>
<td width="150" align="right">Status:</td>
<td>
<select name="idStatus">
<?php
if ($atendimentoCompletoVO->getIdStatus() != "") {
$idStatus = $atendimentoCompletoVO->getIdStatus();
} else {
$idStatus = $statusVO->getIdStatus();
}
$sql = "SELECT * FROM status";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
if ($idStatus == $conteudo["idStatus"]) {
echo "<option value=\"$conteudo[idStatus]\" selected>$conteudo[status]</option>";
} else {
echo "<option value=\"$conteudo[idStatus]\">$conteudo[status]</option>";
}
}
?>
</select>
</td>
<!--Botao finalizar e cancelar-->
<td>
<input type="submit" name="submit" value="Finalizar Atendimento">
</td>
<input type="hidden" name="horarioInicialF" id="horarioInicialF" value="<?php echo $horario; ?>">
</tr>
</table>
</form>
<script>
$(function () {
$("select[name=idCategoria]").change(function () {
idCategoria = $(this).val();
if (idCategoria === '')
return false;
resetaCombo('idSubCategoria');
$.getJSON('subCategorias.php?idCategoria=' + idCategoria, function (data) {
// console.log(data);
var option = new Array();
$.each(data, function (i, obj) {
option[i] = document.createElement('option');
$(option[i]).attr({value: obj.id});
$(option[i]).append(obj.nome);
$("select[name='idSubCategoria']").append(option[i]);
});
});
});
});
function resetaCombo(el) {
$("select[name='" + el + "']").empty();
var option = document.createElement('option');
$(option).attr({value: ''});
$(option).append('--');
$("select[name='" + el + "']").append(option);
}
</script>
</body>
</html>
<file_sep>/VO/unidadeVO.class.php
<?php
class unidadeVO{
//atributos
private $idUnidade = null;
private $nomeUnidade = null;
private $idEstado = null;
private $ativo = null;
//get e set
//idUnidade
public function getIdUnidade(){
return $this->idUnidade;
}
public function setIdUnidade($idUnidade){
$this->idUnidade = $idUnidade;
}
//nomeUnidade
public function getNomeUnidade(){
return $this->nomeUnidade;
}
public function setNomeUnidade($nomeUnidade){
$this->nomeUnidade = $nomeUnidade;
}
//idEstado
public function getIdEstado(){
return $this->idEstado;
}
public function setIdEstado($idEstado){
$this->idEstado = $idEstado;
}
//ativo
public function getAtivo(){
return $this->ativo;
}
public function setAtivo($ativo){
$this->ativo = $ativo;
}
}
?>
<file_sep>/pdfDetalhadoAtendimento.php
<?php
ini_set('default_charset', 'UTF-8');
//jpGraph
include_once './jpgraph/src/jpgraph.php';
include_once './jpgraph/src/jpgraph_bar.php';
//fpdf
include_once './fpdf13/fpdf.php';
define('FPDF_FONTPATH', './fpdf13/font/');
//conexao
include_once './Conexao/conexao.php';
//funcoes
include_once './funcoes.php';
//variáveis
$dataInicial = $_REQUEST['dataInicial'];
$dataFinal = $_REQUEST['dataFinal'];
$idAtendimento = $_REQUEST['idAtendimento'];
$dataI = implode("/", array_reverse(explode("-", $dataInicial)));
$dataF = implode("/", array_reverse(explode("-", $dataFinal)));
$nomeAtendimento = "";
if ($idAtendimento == 1) {
$nomeAtendimento = "Suporte";
} elseif ($idAtendimento == 2) {
$nomeAtendimento = "Desenvolvimento";
} elseif ($idAtendimento == 3) {
$nomeAtendimento = "Prestação Serviço";
}
$sql = "
SELECT a.idAtendimento, a.data, ca.nomeCategoria, sb.nomeSubCategoria, ta.tipo, SUM(t.tempoGastoMin) AS somaTempo FROM tipo_atendimento AS ta INNER JOIN
atendimento AS a ON a.idTipoAtendimento = ta.idTipoAtendimento INNER JOIN
atendimento_tempo AS at ON at.idAtendimento = a.idAtendimento INNER JOIN
atendimento_completo AS ac ON ac.idAtendimentoTempo = at.idAtendimentoTempo INNER JOIN
categoria_atendimento AS ca ON ca.idCategoria = ac.idCategoria INNER JOIN
sub_categoria AS sb ON sb.idSubCategoria = ac.idSubCategoria INNER JOIN
tempo AS t ON t.idTempo = at.idTempo
WHERE a.data >= '$dataI' AND a.data <= '$dataF' AND a.idTipoAtendimento = $idAtendimento GROUP BY sb.idSubCategoria;
";
$rs = mysql_query($sql);
$num_linhas = mysql_num_rows($rs);
//aqui estou tratando o erro caso não tenha páginas o relatório
if ($num_linhas == 0) {
header("Location: site.php");
} else {
$datax = array(); //dados do eixo x do grafico
$datay = array(); //dados do eixo y do grafico
$i = 0;
while ($conteudo = mysql_fetch_array($rs)) {
//retorna os dados armazenado no arrays
$datax[$i] = $conteudo['nomeSubCategoria']; //dados do eixo x
$datay[$i] = $conteudo['somaTempo']; //dados do eixo y
$i++; //incremento
}
for ($i = 0; $i < sizeof($datax); $i++) {
$nomeCorreto[$i] = retira_acentos($datax[$i]);
}
//excluir grafico da pasta se existir
$caminhoImgExc = 'img/relatorios/detalhadoAtend.png';
if (file_exists($caminhoImgExc)) {
unlink($caminhoImgExc);
}
//*******************************************GERANDO GRAFICO
//conf tamanho do grafico
$graph = new Graph(1200, 800); //larg/altura
$graph->img->SetMargin(100, 0, 50, 150); //margem esq/dir, sup/inf
$graph->SetScale("textlin");
$graph->SetMarginColor("Lightblue");
$graph->SetShadow();
//conf. eixo y
$graph->yaxis->title->Set("Minutos"); //titulo
$graph->yaxis->SetTitleMargin(60);//ajusta a margin do eixo y
$graph->yaxis->title->SetFont(FF_ARIAL, FS_NORMAL, 16); //tipo da fonte e tamanho
$graph->xaxis->SetFont(FF_ARIAL, FS_NORMAL, 9);
$graph->yaxis->SetFont(FF_ARIAL, FS_NORMAL, 10); //tipo da fonte e tamanho
//dados eixo x
$graph->xaxis->SetTickLabels($nomeCorreto); //dados do eixo x
$graph->xaxis->SetLabelAngle(45); //angulo
//dados eixo y
$bplot = new BarPlot($datay); //dados do eixo y
$bplot->SetWidth(0.7); //espessura das barras do gráfico
$bplot->SetColor("white"); //cor
$graph->Add($bplot); //adiciona
$graph->Stroke('./img/relatorios/detalhadoAtend.png'); //cria o grafico
//***********************************************GERANDO PDF
//Novo documento PDF com orientação P - Retrato (Picture) que pode ser também L - Paisagem (Landscape)
$pdf = new FPDF('L', 'cm', 'A4');
$pdf->Open();
$pdf->AddPage();
$pdf->Image('img/apagri.jpg', 2.5, 1, 3.3, 1.6);
$pdf->Image('img/inceres.jpg', 23, 1, 3.3, 1.7);
$pdf->SetFont('arial', 'B', 18);
$tituloRel = utf8_decode("Relatório dos Atendimentos - " . $nomeAtendimento);
$pdf->Cell(25, 1.5, $tituloRel, "B", 1, 'C');
//Periodo
$pdf->SetFont('arial', '', 12); // Definindo Fonte
$pdf->SetMargins(0, 0);
$pdf->SetXY(17.5, 18);
$stringPeriodo = utf8_decode("Período: " . $dataI . " - " . $dataF);
$pdf->Cell(10, 0.8, $stringPeriodo, 0, 0, 'C');
$pdf->Image('img/relatorios/detalhadoAtend.png', 4.5, 3, 20, 15); //margem, tamanho
$pdf->Output("relatorio_atendimentos_".$nomeAtendimento."_" . $dataI . " - " . $dataF . ".pdf", "D");
$pdf->Close();
}<file_sep>/VO/marcaVO.class.php
<?php
class marcaVO{
//atributos
private $idMarca = null;
private $marca = null;
//contrutor
public function marcaVO(){
}
//get set
//idMarca
public function getIdMarca(){
return $this->idMarca;
}
public function setIdMarca($idMarca){
$this->idMarca = $idMarca;
}
//marca
public function getMarca(){
return $this->marca;
}
public function setMarca($marca){
$this->marca = $marca;
}
}
?>
<file_sep>/VO/clienteVO.class.php
<?php
class clienteVO {
private $idCliente = null;
private $nomeCliente = null;
private $email = null;
private $contato = null;
private $telefone = null;
private $celular = null;
private $endereco = null;
private $cidade = null;
private $idEstado = null;
private $cep = null;
private $idTipoCliente = null;
private $ativo = null;
private $data = null;
private $apagri = null;
//idCliente
public function getIdCliente() {
return $this->idCliente;
}
public function setIdCliente($idCliente) {
$this->idCliente = $idCliente;
}
//nomeCliente
public function getNomeCliente() {
return $this->nomeCliente;
}
public function setNomeCliente($nomeCliente) {
$this->nomeCliente = $nomeCliente;
}
//email
public function getEmail() {
return $this->email;
}
public function setEmail($email) {
$this->email = $email;
}
//contato
public function getContato() {
return $this->contato;
}
public function setContato($contato) {
$this->contato = $contato;
}
//telefone
public function getTelefone() {
return $this->telefone;
}
public function setTelefone($telefone) {
$this->telefone = $telefone;
}
//celular
public function getCelular() {
return $this->celular;
}
public function setCelular($celular) {
$this->celular = $celular;
}
//endereco
public function getEndereco() {
return $this->endereco;
}
public function setEndereco($endereco) {
$this->endereco = $endereco;
}
//cidade
public function getCidade() {
return $this->cidade;
}
public function setCidade($cidade) {
$this->cidade = $cidade;
}
//idEstado
public function getIdEstado(){
return $this->idEstado;
}
public function setIdEstado($idEstado){
$this->idEstado = $idEstado;
}
//cep
public function getCep() {
return $this->cep;
}
public function setCep($cep) {
$this->cep = $cep;
}
//idTipoCliente
public function getIdTipoCliente() {
return $this->idTipoCliente;
}
public function setIdTipoCliente($idTipoCliente) {
$this->idTipoCliente = $idTipoCliente;
}
//ativo
public function getAtivo() {
return $this->ativo;
}
public function setAtivo($ativo) {
$this->ativo = $ativo;
}
//data
public function getData() {
return $this->data;
}
public function setData($data) {
$this->data = $data;
}
//apagri
public function getApagri() {
return $this->apagri;
}
public function setApagri($apagri) {
$this->apagri = $apagri;
}
}
?>
<file_sep>/DAO/statusDAO.class.php
<?php
class statusDAO {
public function getAll(){
$objVO = new statusVO();
$retorno = array();
$sql = "SELECT * FROM status";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdStatus($conteudo["idStatus"]);
$objVO->setStatus($conteudo["status"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}
?>
<file_sep>/DAO/loginClienteDAO.class.php
<?php
class loginClienteDAO{
//insert
public function insert(loginClienteVO $objVO) {
$sql = sprintf("INSERT INTO login_cliente (idCliente, login, senha, ativo)
VALUES ('%s','%s','%s','%s')",
$objVO->getIdCliente(),
$objVO->getLogin(),
$objVO->getSenha(),
$objVO->getAtivo()
);
mysql_query($sql);
$objVO->setIdLoginCliente(mysql_insert_id());
return $objVO;
}
//update
public function update(loginClienteVO $objVO) {
$sql = sprintf("UPDATE login_cliente SET idCliente = '%s', login = '%s',
senha = '%s', ativo = '%s' WHERE idLoginCliente = '%s'",
$objVO->getIdCliente(),
$objVO->getLogin(),
$objVO->getSenha(),
$objVO->getAtivo(),
$objVO->getIdLoginCliente()
);
mysql_query($sql);
}
public function getById($idLogin) {
$objVO = new loginClienteVO();
$sql = sprintf('SELECT * FROM login_cliente WHERE idLoginCliente = "%s"', $idLogin);
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdLoginCliente($conteudo["idLoginCliente"]);
$objVO->setIdCliente($conteudo["idCliente"]);
$objVO->setLogin($conteudo["login"]);
$objVO->setSenha($conteudo["senha"]);
$objVO->setAtivo($conteudo["ativo"]);
$retorno = clone $objVO;
}
return $retorno;
}
public function getAll() {
$objVO = new loginClienteVO();
$retorno = array();
$sql = "SELECT * FROM login_cliente ";
$rs = mysql_query($sql);
while ($conteudo = mysql_fetch_array($rs)) {
$objVO->setIdLoginCliente($conteudo["idLoginCliente"]);
$objVO->setIdCliente($conteudo["idCliente"]);
$objVO->setLogin($conteudo["login"]);
$objVO->setSenha($conteudo["senha"]);
$objVO->setAtivo($conteudo["ativo"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}
<file_sep>/DAO/usuarioDAO.class.php
<?php
class usuarioDAO{
//buscar id do usuario logado
public function retornaId($usuario){
$id;
$sql = sprintf("SELECT idUsuario FROM usuario WHERE usuario = '%s'",$usuario);
$rs = mysql_query($sql);
$linhasAfetadas = mysql_num_rows($rs);
if($linhasAfetadas == 1){
while($conteudo = mysql_fetch_array($rs)){
$id = $conteudo["idUsuario"];
}
}
return $id;
}
public function getAll(){
$objVO = new usuarioVO();
$retorno = array();
$sql = "SELECT * FROM usuario";
$rs = mysql_query($sql);
while($conteudo = mysql_fetch_array($rs)){
$objVO->setIdUsuario($conteudo["idUsuario"]);
$objVO->setNome($conteudo["nomeUsuario"]);
$objVO->setUsuario($conteudo["usuario"]);
$objVO->setSenha($conteudo["senha"]);
$retorno[] = clone $objVO;
}
return $retorno;
}
}
?>
<file_sep>/VO/categoriaDocVO.class.php
<?php
class categoriaDocVO {
private $idCategoriaDoc = null;
private $nomeCategoriaDoc = null;
//idCategoriaDoc
public function getIdCategoriaDoc() {
return $this->idCategoriaDoc;
}
public function setIdCategoriaDoc($idCategoriaDoc) {
$this->idCategoriaDoc = $idCategoriaDoc;
}
//nomeCategoriaDoc
public function getNomeCategoriaDoc() {
return $this->nomeCategoriaDoc;
}
public function setNomeCategoriaDoc($nomeCategoriaDoc) {
$this->nomeCategoriaDoc = $nomeCategoriaDoc;
}
}
<file_sep>/documento.php
<?php
include_once './sessao.php';
include_once './Conexao/conexao.php';
//VO
include_once './VO/documentoVO.class.php';
include_once './VO/categoriaDocVO.class.php';
include_once './VO/funcionarioVO.class.php';
//DAO
include_once './DAO/documentoDAO.class.php';
include_once './DAO/funcionarioDAO.class.php';
include_once './DAO/categoriaDocDAO.class.php';
//OBJ VO
$documentoVO = new documentoVO();
$categoriaDocVO = new categoriaDocVO();
$funcionarioVO = new funcionarioVO();
//recupera o parametro do id
$idDoc = $_GET["par"];
if ($idDoc == 0) {
//novo cadastro
$data = date("d/m/Y"); //data e hora atual
$dataBd = implode("-", array_reverse(explode("/", $data))); //converte a data p/ salvar no bd
} else {
//buscar os dados
$documentoDAO = new documentoDAO();
$documentoVO = $documentoDAO->getById($idDoc);
}
echo "<script type=\"text/javascript\" src=\"javascript/funcoes.js\"></script>";
//tratando o botao salvar
if (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Salvar") {
//seta os campos
$data = $_REQUEST["data"];
$dataBd = implode("-", array_reverse(explode("/", $data))); //converte a data p/ salvar no bd
$documentoVO->setData($dataBd);
$documentoVO->setIdFuncionario($_REQUEST["idFuncionario"]);
$documentoVO->setTipoDocumento($_REQUEST["tipoDocumento"]);
$documentoVO->setIdCategoriaDoc($_REQUEST["idCategoriaDoc"]);
$documentoVO->setDepartamento($_REQUEST["departamento"]);
$documentoVO->setAc($_REQUEST["ac"]);
$documentoVO->setDescricaoDoc($_REQUEST["descricaoDoc"]);
$documentoVO->setCaminhoAnexoArquivo("vazio"); //implementar futuramente
//verifica se existe o id
if (isset($_REQUEST["idDocumento"]) && $_REQUEST["idDocumento"] == "") {
//novo cadastro
$documentoDAO = new documentoDAO();
$documentoDAO->insert($documentoVO);
echo "<script>msg(1)</script>";
$documentoVO = new documentoVO();
} else {
//atualização
$documentoDAO = new documentoDAO();
$documentoVO->setIdDocumento($_REQUEST["idDocumento"]);
$documentoDAO->update($documentoVO);
echo "<script>msg(2)</script>";
$documentoVO = new documentoVO();
}
} elseif (isset($_REQUEST["submit"]) && $_REQUEST["submit"] == "Cancelar") {
header("Location: site.php");
exit;
}
?>
<html>
<head><br><br><br><br>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="css/fontes.css">
<title></title>
</head>
<body>
<form action="" method="post" id="documento">
<table border="0" align="center" cellpadding="15" cellspacing="15">
<!-- Titulo -->
<tr>
<td colspan="4" class="titulo" align="center">Cadastro de Documentos</td>
</tr>
<tr>
<!--codigo-->
<td align="right">Código:</td>
<td><input type="text" name="idDocumento" id="idDocumento" size="15" maxlength="11" value="<?php echo $documentoVO->getIdDocumento(); ?>" class="readonly" readonly>
<!--data-->
Data:
<input type="text" name="data" id="data" size="15" maxlength="10" value="<?php
if ($idDoc != 0) {
$dataExibe = implode("/", array_reverse(explode("-", $documentoVO->getData())));
echo $dataExibe;
} else {
echo $data;
}
?>" class="readonly" readonly></td>
</tr>
<tr>
<td align="right">Cadastrado Por:</td>
<td>
<select name="idFuncionario">
<?php
$sql = "SELECT * FROM funcionario WHERE idDepartamento = 1 AND ativo = 1";
$rs = mysql_query($sql);
$idFuncionario = $documentoVO->getIdFuncionario();
while ($conteudo = mysql_fetch_array($rs)) {
if ($idFuncionario == $conteudo["idFuncionario"]) {
echo "<option value=\"$conteudo[idFuncionario]\" selected>$conteudo[nomeFuncionario]</option>";
} else {
echo "<option value=\"$conteudo[idFuncionario]\">$conteudo[nomeFuncionario]</option>";
}
}
?>
</select>
<!-- Tipo Documento -->
Tipo Doc.:
<select name="tipoDocumento">
<?php
if ($documentoVO->getTipoDocumento() == 1) {
echo "<option value=\"1\" selected>Enviado</option>";
echo "<option value=\"2\">Recebido</option>";
} elseif ($documentoVO->getTipoDocumento() == 2) {
echo "<option value=\"1\">Enviado</option>";
echo "<option value=\"2\" selected>Recebido</option>";
} else {
echo "<option value=\"1\" selected>Enviado</option>";
echo "<option value=\"2\">Recebido</option>";
}
?>
</select>
</td>
</tr>
<!-- categoria-->
<tr>
<td align="right">Categoria:</td>
<td>
<select name="idCategoriaDoc">
<?php
$sql = "SELECT * FROM categoria_doc";
$rs = mysql_query($sql);
$idCategoriaDoc = $documentoVO->getIdCategoriaDoc();
while ($conteudo = mysql_fetch_array($rs)) {
if ($idCategoriaDoc == $conteudo["idCategoriaDoc"]) {
echo "<option value=\"$conteudo[idCategoriaDoc]\" selected>$conteudo[nomeCategoriaDoc]</option>";
} else {
echo "<option value=\"$conteudo[idCategoriaDoc]\">$conteudo[nomeCategoriaDoc]</option>";
}
}
?>
</select>
</td>
</tr>
<!-- departamento-->
<tr>
<td align="right">Departamento:</td>
<td><input type="text" name="departamento" id="departamento" value="<?php echo $documentoVO->getDepartamento(); ?>"></td>
</tr>
<!-- ac -->
<tr>
<td align="right">A/C:</td>
<td><input type="text" name="ac" id="ac" value="<?php echo $documentoVO->getAc(); ?>"></td>
</tr>
<!-- descricao -->
<tr>
<td align="right">Descrição <br> Documentos:</td>
<td colspan="4"><textarea name="descricaoDoc" required id="descricaoDoc" rows="9" cols="50" maxlength="700"><?php echo $documentoVO->getDescricaoDoc(); ?></textarea></td>
</tr>
<!-- botoes -->
<tr>
<td colspan="4" align="center">
<input type="submit" name="submit" value="Salvar">
</td>
</tr>
</table>
</form>
<!--Formatação com dataTable-->
<script src="media/js/jquery.js"></script>
<script src="media/js/jquery.dataTables.min.js"></script>
<link rel="stylesheet" href="media/css/jquery.dataTables.css">
<script type="text/javascript">
$(document).ready(function () {
$('#tabela1').dataTable({
"scrollY": "300px",
"scrollCollapse": true,
"paging": false,
"oLanguage": {
"sUrl": "media/pt-br.txt"
}
});
});
</script>
<div style="width: 1150px; margin: auto;">
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="tabela1" align="center">
<thead>
<tr>
<th>Código</th>
<th>Tipo</th>
<th>Categoria</th>
<th>Descrição</th>
<th>Registrar Dev.</th>
<th>Visuaizar Dev.</th>
<th>Baixar PDF</th>
<th>Editar</th>
</tr>
</thead>
<?php
$documentoDAO = new documentoDAO();
$listaDocumentos = $documentoDAO->getAll(0); //recebe 0 para listar tudo
$funcionarioDAO = new funcionarioDAO();
$listaFuncionarios = $funcionarioDAO->getAll();
$categoriaDocDAO = new categoriaDocDAO();
$listaCategDoc = $categoriaDocDAO->getAll();
for ($i = 0; $i < sizeof($listaDocumentos); $i++) {
$dadosDocumentos = $listaDocumentos[$i];
$dataCadastro = implode("/", array_reverse(explode("-", $dadosDocumentos->getData())));
for ($w = 0; $w < sizeof($listaFuncionarios); $w++) {
$dadosFuncionarios = $listaFuncionarios [$w];
if ($dadosFuncionarios->getIdFuncionario() == $dadosDocumentos->getIdFuncionario()) {
$nomeFuncionario = $dadosFuncionarios->getNomeFuncionario();
}
}
for ($q = 0; $q < sizeof($listaCategDoc); $q++) {
$dadosCatDoc = $listaCategDoc[$q];
if ($dadosCatDoc->getIdCategoriaDoc() == $dadosDocumentos->getIdCategoriaDoc()) {
$nomeCatDoc = $dadosCatDoc->getNomeCategoriaDoc();
}
}
//exibe o tipo de documento
$tipoDoc = "";
if ($dadosDocumentos->getTipoDocumento() == 1) {
$tipoDoc = "Enviado";
} elseif ($dadosDocumentos->getTipoDocumento() == 2) {
$tipoDoc = "Recebido";
}
//formata a descrição para exibir
$tamanhoString = strlen($dadosDocumentos->getDescricaoDoc());
$exibeString = "";
if ($tamanhoString > 50) {
$exibeString = substr($dadosDocumentos->getDescricaoDoc(), 0, 50);
} else {
$exibeString = $dadosDocumentos->getDescricaoDoc();
}
echo "
<tr>
<td align=\"center\" class=\"textoDataTable\">" . $dadosDocumentos->getIdDocumento() . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $tipoDoc . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $nomeCatDoc . "</td>
<td align=\"center\" class=\"textoDataTable\">" . $exibeString . "</td>
<td align=\"center\"><a href=\"?red=listaDoc&par=" . $dadosDocumentos->getIdDocumento() . "\"><img src=\"img/detalhes.png\"></a></td>
<td align=\"center\"><a href=\"?red=pesqDevDoc&par=" . $dadosDocumentos->getIdDocumento() . "\"><img src=\"img/view.png\"></a></td>
<td align=\"center\"><a href=\"?red=pdfDoc&par=" . $dadosDocumentos->getIdDocumento() . "\"><img src=\"img/pdf.png\"></a></td>
<td align=\"center\"><a href=\"?red=documentos&par=" . $dadosDocumentos->getIdDocumento() . "\"><img src=\"img/lapis.png\"></a></td>
</tr>
";
}
?>
</table>
</div>
</div>
</body>
</html>
<file_sep>/VO/funcCompVO.class.php
<?php
class funcCompVO {
private $idFuncComp = null;
private $idFuncionario = null;
private $idComputador = null;
private $data = null;
//get e set
//idFuncComp
public function getIdFuncComp() {
return $this->idFuncComp;
}
public function setIdFuncComp($idFuncComp) {
$this->idFuncComp = $idFuncComp;
}
//idFuncionario
public function getIdFuncionario() {
return $this->idFuncionario;
}
public function setIdFuncionario($idFuncionario) {
$this->idFuncionario = $idFuncionario;
}
//idComputador
public function getIdComputador() {
return $this->idComputador;
}
public function setIdComputador($idComputador) {
$this->idComputador = $idComputador;
}
//data
public function getData() {
return $this->data;
}
public function setData($data) {
$this->data = $data;
}
}
|
a34c4d8908912a9fdc1c38c5044a722657704acf
|
[
"JavaScript",
"PHP"
] | 77
|
PHP
|
moisesolimpio/SistemaAtendimentoV2
|
4815f049f4de683b31c6707890b2b6cde502089f
|
2782ab9685faa9767e0f796bcab7fd7e350ed866
|
refs/heads/main
|
<file_sep># 4Balls_Procedural
Using Processing to complete a 4 balls challenge in procedural programming format
<file_sep>import processing.core.PApplet;
public class fourBalls extends PApplet{
public static final int WIDTH = 640;
public static final int HEIGHT = 480;
public static final int DIAMETER = 10;
int ball1X=0;
int ball2X=0;
int ball3X=0;
int ball4X=0;
public static void main(String args[]){PApplet.main("fourBalls",args);}
@Override
public void settings() {
super.settings();
size(WIDTH, HEIGHT);
}
@Override
public void draw() {
drawCircle1(1,1);
drawCircle2(2,2);
drawCircle3(3,3);
drawCircle4(4,4);
}
private void drawCircle1(int y,int speed) {
ellipse(ball1X, (y * HEIGHT) / 5, DIAMETER, DIAMETER);
ball1X+=speed;
}
private void drawCircle2(int y,int speed) {
ellipse(ball2X, (y * HEIGHT) / 5, DIAMETER, DIAMETER);
ball2X+=speed;
}
private void drawCircle3(int y,int speed) {
ellipse(ball3X, (y * HEIGHT) / 5, DIAMETER, DIAMETER);
ball3X+=speed;
}
private void drawCircle4(int y,int speed) {
ellipse(ball4X, (y * HEIGHT) / 5, DIAMETER, DIAMETER);
ball4X+=speed;
}
}
|
8c27f1acdc6a6c0eecd7a82a4b74d987cc24cb55
|
[
"Markdown",
"Java"
] | 2
|
Markdown
|
Suvarna221B/4Balls_Procedural
|
8b5933e4313995fb9761609e36fa9a458c20d466
|
caaabe0ed16ee87e24d876b8e2fb2f50a1951b04
|
refs/heads/master
|
<repo_name>Eelis96/codeigniter-harjoitus<file_sep>/application/views/templates/header.php
<html>
<head>
<title>rojekti</title>
<link rel="stylesheet" href="<?php echo base_url(); ?>/assets/css/bootstrap.min.css">
<link rel="stylesheet" href="<?php echo base_url(); ?>/assets/css/style.css">
</head>
<body>
<nav class="navbar navbar-expand-lg navbar-dark bg-primary">
<a class="navbar-brand" href="<?php echo base_url(); ?>">CI-Harjoitus</a>
<div class="collapse navbar-collapse" id="navbarColor01">
<ul class="nav navbar-nav">
<li class="nav-item">
<a class="nav-link" href="<?php echo base_url(); ?>">Etusivu</a>
</li>
<li class="nav-item">
<a class="nav-link" href="<?php echo base_url(); ?>about">Tietoa</a>
</li>
<li class="nav-item">
<a class="nav-link" href="<?php echo base_url(); ?>categories">Kategoriat</a>
</li>
<li class="nav-item">
<a class="nav-link" href="<?php echo base_url(); ?>posts">Viestit</a>
</li>
</ul>
<ul class="nav navbar-nav navbar-right">
<li>
<a class="nav-link" href="<?php echo base_url(); ?>posts/create">Lähetä Viesti</a>
</li>
<li>
<a class="nav-link" href="<?php echo base_url(); ?>categories/create">Luo Kategoria</a>
</li>
</ul>
</div>
</nav>
<div class="container">
|
5b959f07412f2a33ddda44da7437ea160c3f30a6
|
[
"PHP"
] | 1
|
PHP
|
Eelis96/codeigniter-harjoitus
|
ff6653991c7dd741590a4109f147ec87b68ee5be
|
7cc2a21a60e45979da676af7392ce05162afc491
|
refs/heads/master
|
<file_sep>angular.module('simpf').controller('simpfController', ['$scope', '$injector',
function ($scope, $injector) {
var $interval = $injector.get('$interval');
var simpfService = $injector.get('simpfService');
var stop;
var times = 0;
$scope.startWatchDogTimer = function () {
if (angular.isDefined(stop)) {
return;
}
stop = $interval(function () {
times++;
if (!simpfService.isActive()) {
return;
}
simpfService.ping()
.then(function checkResult(data) {
$scope.times = times * 5;
$scope.result = data.result;
if (data.result == 'FAILED') {
$scope.stopWatchDogTimer();
simpfService.logout();
}
})
['catch'](function () {
$scope.stopWatchDogTimer();
});
}, 5000);
};
$scope.stopWatchDogTimer = function () {
if (angular.isDefined(stop)) {
$interval.cancel(stop);
stop = undefined;
}
};
$scope.$on('$destroy', function () {
$scope.stopWatchDogTimer();
});
$scope.startWatchDogTimer();
}]);
<file_sep>package jp.neuroinf.sim;
abstract public class SimPFVmDatabase {
abstract public SimPFVmNode getVmNode(String session_id) throws SimPFException;
}
<file_sep># guacamole-auth-simpf
Simulation Platform Authentication Extension for Guacamole<file_sep>-- MySQL dump 10.16 Distrib 10.1.41-MariaDB, for Linux (x86_64)
--
-- Host: localhost Database: vmsession
-- ------------------------------------------------------
-- Server version 10.1.41-MariaDB
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `connection`
--
DROP TABLE IF EXISTS `connection`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `connection` (
`sid` varchar(32) NOT NULL,
`hostname` varchar(512) NOT NULL,
`protocol` varchar(32) NOT NULL,
`port` int(11) NOT NULL,
`password` varchar(256) NOT NULL,
`status` varchar(32) NOT NULL,
`timestamp` int(11) NOT NULL,
PRIMARY KEY (`sid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `connection`
--
LOCK TABLES `connection` WRITE;
/*!40000 ALTER TABLE `connection` DISABLE KEYS */;
/*!40000 ALTER TABLE `connection` ENABLE KEYS */;
UNLOCK TABLES;
--
-- Table structure for table `parameter`
--
DROP TABLE IF EXISTS `parameter`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `parameter` (
`sid` varchar(32) NOT NULL,
`name` varchar(128) NOT NULL,
`value` varchar(4096) NOT NULL,
PRIMARY KEY (`sid`,`name`),
KEY `sid` (`sid`),
CONSTRAINT `parameter_ibfk_1` FOREIGN KEY (`sid`) REFERENCES `connection` (`sid`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `parameter`
--
LOCK TABLES `parameter` WRITE;
/*!40000 ALTER TABLE `parameter` DISABLE KEYS */;
/*!40000 ALTER TABLE `parameter` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2019-09-27 21:43:16
<file_sep><?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>jp.neuroinf.sim</groupId>
<artifactId>guacamole-auth-simpf</artifactId>
<packaging>jar</packaging>
<version>1.4.0</version>
<name>guacamole-auth-simpf</name>
<url>https://sim.neuroinf.jp/</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<build>
<plugins>
<!-- Written for Java 1.8 -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.3</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<compilerArgs>
<arg>-Xlint:all</arg>
<arg>-Werror</arg>
</compilerArgs>
<fork>true</fork>
</configuration>
</plugin>
<!-- Copy dependencies prior to packaging -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>2.10</version>
<executions>
<execution>
<id>unpack-dependencies</id>
<phase>prepare-package</phase>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<configuration>
<includeScope>runtime</includeScope>
<outputDirectory>${project.build.directory}/classes</outputDirectory>
</configuration>
</execution>
</executions>
</plugin>
<!-- Assembly plugin - for easy distribution -->
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<version>2.5.3</version>
<configuration>
<finalName>${project.artifactId}-${project.version}</finalName>
<appendAssemblyId>false</appendAssemblyId>
<descriptors>
<descriptor>src/main/assembly/dist.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>make-dist-archive</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<!-- JS/CSS Minification Plugin -->
<plugin>
<groupId>com.samaxes.maven</groupId>
<artifactId>minify-maven-plugin</artifactId>
<version>1.7.5</version>
<executions>
<execution>
<id>default-cli</id>
<configuration>
<charset>UTF-8</charset>
<webappSourceDir>${basedir}/src/main/resources</webappSourceDir>
<webappTargetDir>${project.build.directory}/classes</webappTargetDir>
<cssSourceDir>/</cssSourceDir>
<cssTargetDir>/</cssTargetDir>
<cssFinalFile>simpf.css</cssFinalFile>
<cssSourceIncludes>
<cssSourceInclude>**/*.css</cssSourceInclude>
</cssSourceIncludes>
<jsSourceDir>/</jsSourceDir>
<jsTargetDir>/</jsTargetDir>
<jsFinalFile>simpf.js</jsFinalFile>
<jsSourceIncludes>
<jsSourceInclude>**/*.js</jsSourceInclude>
</jsSourceIncludes>
<!-- Do not minify and include tests -->
<jsSourceExcludes>
<jsSourceExclude>**/*.test.js</jsSourceExclude>
</jsSourceExcludes>
<jsEngine>CLOSURE</jsEngine>
<!-- Disable warnings for JSDoc annotations -->
<closureWarningLevels>
<misplacedTypeAnnotation>OFF</misplacedTypeAnnotation>
<nonStandardJsDocs>OFF</nonStandardJsDocs>
</closureWarningLevels>
</configuration>
<goals>
<goal>minify</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<dependencies>
<!-- Guacamole common API -->
<dependency>
<groupId>org.apache.guacamole</groupId>
<artifactId>guacamole-common</artifactId>
<version>1.4.0</version>
<scope>system</scope>
<systemPath>${basedir}/lib/guacamole-common-1.4.0.jar</systemPath>
</dependency>
<!-- Guacamole Extension API -->
<dependency>
<groupId>org.apache.guacamole</groupId>
<artifactId>guacamole-ext</artifactId>
<version>1.4.0</version>
<scope>system</scope>
<systemPath>${basedir}/lib/guacamole-ext-1.4.0.jar</systemPath>
</dependency>
<!-- Jersey - JAX-RS Implementation -->
<dependency>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
<version>1.1.1</version>
<scope>provided</scope>
</dependency>
</dependencies>
</project>
<file_sep>package jp.neuroinf.sim;
import java.util.HashMap;
import org.apache.guacamole.protocol.GuacamoleConfiguration;
public class SimPFVmNode {
private String hostname;
private String protocol;
private int port;
private String password;
private HashMap<String, String> parameters;
public SimPFVmNode(String hostname, String protocol, int port, String password) {
this.hostname = hostname;
this.protocol = protocol;
this.port = port;
this.password = <PASSWORD>;
this.parameters = new HashMap<String, String>();
}
public final void setParameter(String name, String value) {
this.parameters.put(name, value);
}
public final String getProtocol() {
return this.protocol;
}
public final String getHostname() {
return this.hostname;
}
public final int getPort() {
return this.port;
}
public final String getPassword() {
return this.password;
}
public final GuacamoleConfiguration getConfiguration() {
GuacamoleConfiguration config = new GuacamoleConfiguration();
config.setProtocol(this.protocol);
config.setParameter("hostname", this.hostname);
config.setParameter("port", String.valueOf(this.port));
config.setParameter("password", this.password);
this.parameters.forEach((key, value) -> config.setParameter(key, value));
return config;
}
}
<file_sep>package jp.neuroinf.sim.rest;
import java.util.HashMap;
import java.util.Map;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import org.apache.guacamole.GuacamoleException;
import org.apache.guacamole.environment.Environment;
import org.apache.guacamole.environment.LocalEnvironment;
import jp.neuroinf.sim.SimPFProperties;
@Produces(MediaType.APPLICATION_JSON)
public class SimPFRest {
private final String hostname;
private final String pingCommand;
public SimPFRest(String host) throws GuacamoleException {
Environment environment = LocalEnvironment.getInstance();
this.hostname = host;
this.pingCommand = environment.getRequiredProperty(SimPFProperties.PING_COMMAND);
}
@GET
@Path("isalive")
public Map<String, String> ping() {
final Map<String, String> ret = new HashMap<>();
boolean response = pingCommand();
ret.put("host", this.hostname);
ret.put("result", response ? "SUCCESS" : "FAILED");
return ret;
}
private boolean pingCommand() {
try {
String cmd = this.pingCommand + " " + this.hostname;
Process proc = Runtime.getRuntime().exec(cmd);
proc.waitFor();
return (proc.exitValue() == 0);
} catch (Exception e) {
return false;
}
}
}
<file_sep>package jp.neuroinf.sim;
import org.apache.guacamole.properties.StringGuacamoleProperty;
public class SimPFProperties {
/**
* this class should not be instantiated
*/
private SimPFProperties() {
}
/**
* mysql hostname
*/
public static final StringGuacamoleProperty MYSQL_HOSTNAME = new StringGuacamoleProperty() {
@Override
public String getName() {
return "simpf-mysql-hostname";
}
};
/**
* mysql port
*/
public static final StringGuacamoleProperty MYSQL_PORT = new StringGuacamoleProperty() {
@Override
public String getName() {
return "simpf-mysql-port";
}
};
/**
* mysql database
*/
public static final StringGuacamoleProperty MYSQL_DATABASE = new StringGuacamoleProperty() {
@Override
public String getName() {
return "simpf-mysql-database";
}
};
/**
* mysql username
*/
public static final StringGuacamoleProperty MYSQL_USERNAME = new StringGuacamoleProperty() {
@Override
public String getName() {
return "simpf-mysql-username";
}
};
/**
* mysql password
*/
public static final StringGuacamoleProperty MYSQL_PASSWORD = new StringGuacamoleProperty() {
@Override
public String getName() {
return "simpf-mysql-password";
}
};
/**
* ping command
*/
public static final StringGuacamoleProperty PING_COMMAND = new StringGuacamoleProperty() {
@Override
public String getName() {
return "simpf-ping-command";
}
};
}
|
2a5c482d6cb2569f2b08d9999290461aaeae6c3c
|
[
"SQL",
"JavaScript",
"Markdown",
"Maven POM",
"Java"
] | 8
|
JavaScript
|
neuroinformatics/guacamole-auth-simpf
|
f817c3b012ca91f1a228850b39d00ac2c4af21b8
|
f3c36b7beb74eae401af361a7f9e463d8d84b686
|
refs/heads/master
|
<repo_name>lenincasco/ComunidadInformatica<file_sep>/server.js
var http = require('http'),
server = http.createServer()
server.listen(8080)
// Declaración normal de Servidor NodeJs
console.log('Servidor Escuchando en puerto 8080')
// Declaramos io como instancia de socket.io escuchando al servidor que creamos
var io = require('socket.io').listen(server)
// Cantidad de usuarios conectados
var ursConectados = 0
// Para almacenar los usuarios que se registren
var usuarios = []
// Para almacenar los mensajes que se envien
var mensajes = []
// Evento que se produce cuando se conecta un cliente al servidor
io.sockets.on('connection', function(socket) {
console.log('Nueva Socket Conectada ' + socket.id)
ursConectados++
// Emitir a todos la cantidad de usuarios conectados
io.sockets.emit('actualizarCantidad', ursConectados)
// Con este for recorremos cada uno de los mensajes enviados
// y los emitimos a la socket que se acaba de conectar
for(var ind = 0; ind < mensajes.length; ind++) {
var usr = mensajes[ind].usuario
var msj = mensajes[ind].mensaje
var time = mensajes[ind].timestamp
// emitir usuario y mensaje a la socket conectada
socket.emit('msjCon', msj, usr, time)
}
// Cuando se de el evento 'nombre' recibiremos el nombre del cliente
// y lo almacenamos en 'usuarios' y tambien en la propiedad username de la socket
socket.on('nombre', function(nombre) {
for (var i = 0; i < usuarios.length; i++) {
if (usuarios[i] === nombre) {
delete usuarios[i]
}
}
usuarios = usuarios.filter(Boolean)
usuarios.push(nombre)
socket.username = nombre
})
// Evento que devuelve el nombre de la socket que pregunta.
socket.on('getNombre', function() {
nombre = socket.username
socket.emit('usuario', socket.username)
console.log('nombre: ' + socket.username)
})
// Evento que recibe un mensaje y el usuario que lo envia
// guardamos el mensaje y actualizamos el nombre del usuario
// Emitimos a todas las sockets el mensaje y el usuario que lo envio
socket.on('mensaje', function(mensaje, usuario) {
m = {
usuario: usuario,
mensaje: mensaje,
timestamp: (new Date()).getTime()
}
mensajes.push(m)
usuario = socket.username
time = m.timestamp
io.sockets.emit('mensaje', mensaje, usuario, time)
})
socket.on('disconnect', function() {
ursConectados--
io.sockets.emit('actualizarCantidad', ursConectados)
console.log('Quedan Conectados' + ursConectados)
})
})<file_sep>/js/client.js
// funciones utiles
util = {
// mensajes sin leer
unread: 0,
focus: true,
// agregar ceros al inicio
zeroPad: function (digits, n) {
n = n.toString()
while (n.length < digits)
n = '0' + n
return n
},
salir: function() {
var nombre = $('#nom')
var logout = $('#salir')
if (localStorage.userName) {
localStorage.removeItem('userName')
nombre.val('')
nombre.attr('disabled', false)
nombre.focus()
logout.hide()
}
},
timeString: function (date) {
if (date == null) {
// si el tiempo es nulo, usar el tiempo actual
date = new Date();
} else if ((date instanceof Date) === false) {
// si es un timestamp, se imterpreta
date = new Date(date);
}
var minutes = date.getMinutes().toString()
var hours = date.getHours().toString()
var txt = 'AM'
if (hours > 12) {
hours = hours - 12
txt = 'PM'
}
return this.zeroPad(2, hours) + ":" + this.zeroPad(2, minutes) + ' ' + txt
},
isBlank: function(text) {
var blank = /^\s*$/
return (text.match(blank) !== null)
},
renderMessage: function(mensaje) {
mensaje = mensaje.replace(/&(?!\s)|</g, function (s) {
if (s == '&') {
return '&'
} else {
return '<'
}
})
return mensaje.replace(/(:\)|:\(|:p|:P|:D|:o|:O|;\)|8\)|B\||>:\(|:\/|:'\(|3:\)|o:\)|O:\)|:\*|<3|\^_\^|-_-|o.O|>.<|:v|:V|:3|\(y\)|\(Y\))/g, '<span title="$1" class="emoticon"></span>')
},
updateTitle: function() {
if (this.unread) {
document.title = '(' + this.unread.toString() + ') chat'
} else {
document.title = 'Chat'
}
}
}
// Cuando el documento este listo
$(function() {
// Nos conectamos a nuestro servidor Nodejs
var socket = io.connect('http://localhost:8080')
// Almacenar nombre del usuario
var user
// Obtenemos el nom para el campo donde va el nombre (id="nom")
var nombre = $('#nom')
// Obtenemos el input mensaje que es el campo del mensaje (id="mensaje")
var mensaje = $('#mensaje')
var logout = $('#salir')
$('#salir').on('click', function() {
util.salir()
})
// detectar el blur y el focus en el window
$(window).on('blur', function() {
util.focus = false
util.updateTitle()
})
$(window).on('focus', function() {
util.focus = true
util.unread = 0
util.updateTitle()
})
// si ya se habia conectado y por alguna razon recargo la pagina volvemos a poner su usario
// el cual esta almacenado localmente
// Comprobamos si no es null, para que no nos ponga un objeto nulo en el campo nombre
if (localStorage.userName && localStorage.userName !== 'null') {
nombre.val(localStorage.userName)
user = localStorage.userName
socket.emit('nombre', user)
nombre.attr('disabled', true)
mensaje.focus()
logout.show()
}
// Cuando pierda el foco el campo nombre
nombre.on('focusout', function() {
// Validar si se ha escrito algo
if ($(this).val()) {
// se desabilita el campo para no cambiar el nombre de suario
$(this).attr('disabled', true)
// Si se escribio
// almacenamos localmente el nombre del usuario
user = nombre.val()
// Hacemos un llamado al servidor con la funcion 'nombre' y le pasamos el nombre
socket.emit('nombre', user)
// Una vez logueado mostrar el boton de salir
logout.show()
// si en local no tenemos almacenado el nombre de usuario, se almacena
if (!localStorage.userName) {
localStorage.userName = user
}
}
})
// Cuando obtenga el enfoque el campo mensaje
mensaje.on('focus', function() {
// Comprobamos si ya tenemos un nombre en el campo
if(!nombre.val()) {
nombre.focus()
}
})
//Cuando se da enter la caja de mensaje
$('#mensaje').on('keyup', function(e) {
if(e.which === 13) {
e.preventDefault()
// Si el campo nombre no esta vacio
if (nombre.val() && !util.isBlank(mensaje.val())) {
// Enviamos el mensaje al servidor por la funcion 'mensaje'
socket.emit('mensaje', mensaje.val(), user)
mensaje.val('')
}
}
})
//Cuando se de el evento mensaje
socket.on('mensaje', function(mensaje, usuario, time) {
if(!util.focus) util.unread++
util.updateTitle()
var sonido = document.getElementById('pop')
sonido.play()
$('#messages').prepend('\
<li>\
<div class="avatar">\
<a href="http://twitter.com/' + usuario + '" title="@' + usuario + '" target="_blank"><img src="https://api.twitter.com/1/users/profile_image?screen_name=' + usuario + '&size=normal" alt="@' + usuario + '" height="48" width="48"></a>\
</div>\
<div class="text">\
<a href="http://twitter.com/' + usuario + '" title="@' + usuario + '" target="_blank">@' + usuario + '</a>\
<time>' + util.timeString(time) + '</time>\
<p>' + util.renderMessage(mensaje) + '</p>\
</div>\
</li>')
})
//Este evento hace lo mismo que la funcion mensaje pero, se da cuando se conecta un usuario nuevo
socket.on('msjCon', function(mensaje, usuario, time) {
$('#messages').prepend('\
<li>\ <div class="avatar">\
<a href="http://twitter.com/' + usuario + '" title="@' + usuario + '" target="_blank"><img src="https://api.twitter.com/1/users/profile_image?screen_name=' + usuario + '&size=normal" alt="@' + usuario + '" height="48" width="48"></a>\
</div>\
<div class="text">\
<a href="http://twitter.com/' + usuario + '" title="@' + usuario + '" target="_blank">@' + usuario + '</a>\
<time>' + util.timeString(time) + '</time>\
<p>' + util.renderMessage(mensaje) + '</p>\
</div>\
</li>')
})
socket.on('actualizarCantidad', function(cantidad) {
$('#cantU').text(cantidad)
})
})
|
e6485ec7f2000568e1195e38f6e583de9f655f74
|
[
"JavaScript"
] | 2
|
JavaScript
|
lenincasco/ComunidadInformatica
|
3a3609ebd7364423fd763932aa76bdee74b49bec
|
2d156f06b036ec2e328954f2eeb54194b0c6948c
|
refs/heads/master
|
<repo_name>jordenh/DE2VTT<file_sep>/csrc/utilities.h
#ifndef UTILITIES_H_
#define UTILITIES_H_
#define MAX_TOKENS 100
typedef enum command{
CONNECT,
DISCONNECT,
SEND_MAP,
SEND_TOKEN,
GET_DM,
RELEASE_DM,
MOVE_TOKEN,
HANDSHAKE,
PASS_MSG,
UPDATE_ALIAS,
OUTPUT_TOKEN_INFO,
DISCONNECT_DEV,
REMOVE_TOKEN,
GET_DM_ID
} command;
char * IntToCharBuf(unsigned int inputInt, unsigned int numChars);
#endif /* UTILITIES_H_ */
<file_sep>/javasrc/src/org/ubc/de2vtt/fragments/WINGFragment.java
package org.ubc.de2vtt.fragments;
import java.util.LinkedList;
import java.util.List;
import org.ubc.de2vtt.comm.Command;
import org.ubc.de2vtt.comm.Received;
import android.app.Fragment;
public abstract class WINGFragment extends Fragment {
private Command[] accept;
public abstract boolean passReceived(Received r);
public enum FragDrawerId {
TableTopFragDrawerId,
ManageTokenFragDrawerId,
GameConfigFragDrawerId,
SendImageFragDrawerId,
PassMessageFragDrawerId,
BulletinFragDrawerId,
DieRollFragDrawerId,
ConnectionFragDrawerId
}
public Command[] commandsAccepted() {
if (accept != null) {
return accept.clone();
} else {
return new Command[0];
}
}
/**
* This method must be called before commandAccepted
*/
protected List<Command> setAcceptedCommands(Command... cmds) {
List<Command> l = new LinkedList<Command>();
for (Command c : cmds) {
l.add(c);
}
return l;
}
}
<file_sep>/csrc/timer.c
#include <stdio.h>
#include <stdlib.h>
#include "io.h"
#include "sys/alt_timestamp.h"
#include "sys/alt_alarm.h"
#include "system.h"
#include "timer.h"
/* Ask gord why "freq = alt_timestamp_freq();" isnt working?
// Test code from lab
void timer_test(void) {
int freq;
int cycles;
float duration;
int ticks_start;
int ticks_end;
int ticks_per_s;
int ticks_duration;
int timer_period;
int status;
int done;
printf("Timers\n");
printf(" Sys Clock Timer\n");
ticks_per_s = alt_ticks_per_second();
printf("Tick Freq: %d\n", ticks_per_s);
printf(" Recording starting ticks\n");
ticks_start = alt_nticks();
printf(" Sleeping for 5 seconds\n");
usleep(5000000);
printf(" Recording ending ticks\n");
ticks_end = alt_nticks();
ticks_duration = ticks_end -ticks_start;
duration = (float) ticks_duration / (float) ticks_per_s;
printf(" The program slept for %d ticks (%f seconds)\n\n", ticks_duration,
duration);
printf(" Timestamp Timer\n");
freq = alt_timestamp_freq();
printf(" CPU Freq: %d\n", freq);
printf(" Resetting Timestamp timer\n");
alt_timestamp_start();
printf(" ...Timing the print of this statement...\n");
cycles = alt_timestamp();
duration = (float) cycles / (float) freq;
printf(" It took %d cycles (%f seconds) to print the statement\n\n",
cycles, duration);
printf(" Hardware-Only Timer\n");
printf(" Setting timer period to 5 seconds.\n");
timer_period = 5 * CLOCK_FREQ;
// Setting the period registers must be done in 2 steps as they are only 16 bits wide
IOWR_16DIRECT(HAL_SYSTEM_TIMER_BASE, 8, timer_period & 0xFFFF); // less significant word
IOWR_16DIRECT(HAL_SYSTEM_TIMER_BASE,12, timer_period >> 16); // more significant word
printf(" Stopping Timer\n");
status = IORD_16DIRECT(HAL_SYSTEM_TIMER_BASE, 0); // read status registers
// Write the control registers
if(status & 0x2) {
IOWR_16DIRECT(HAL_SYSTEM_TIMER_BASE, 4, 1 << 3); // stop the timer if it was started
}
printf(" Starting Timer\n");
IOWR_16DIRECT(HAL_SYSTEM_TIMER_BASE, 4, 1 << 2); // start the timer
printf(" Waiting for timer to expire...\n");
done = 0;
while(! done) {
status = IORD_16DIRECT(HAL_SYSTEM_TIMER_BASE, 0); // read status registers
done = status & 0x1;
}
printf(" 5 seconds timer is done\n");
} */
void initHardwareTimer() {
setHardwareTimerPeriod(CLOCK_FREQ/30);
}
// Period is in clock cycles
// ie 5 seconds would be 5 * CLOCK_FREQ
void setHardwareTimerPeriod(int period)
{
IOWR_16DIRECT(HAL_SYSTEM_TIMER_BASE, 8, period & 0xFFFF); // less significant word
IOWR_16DIRECT(HAL_SYSTEM_TIMER_BASE,12, period >> 16); // more significant word
}
int isHardwareTimerRunning(void)
{
int status = IORD_16DIRECT(HAL_SYSTEM_TIMER_BASE, 0);
return status & 0x2; // not totally sure on this
}
int hasHardwareTimerExpired(void)
{
return IORD_16DIRECT(HAL_SYSTEM_TIMER_BASE, 0);
}
void startHardwareTimer(void)
{
IOWR_16DIRECT(HAL_SYSTEM_TIMER_BASE, 4, 1 << 2);
}
<file_sep>/csrc/bmp.h
#ifndef __BMP_H__
#define __BMP_H__
#include<stdio.h>
#include<stdlib.h>
#include "vga.h"
#include "sd_card.h"
#include "utilities.h"
#define BYTES_PER_PIXEL 3
#define DE2_BYTES_PER_PIXEL 2
typedef struct {
unsigned short int type; /* Magic identifier */
unsigned int size; /* File size in bytes */
unsigned short int reserved1, reserved2;
unsigned int offset; /* Offset to image data, bytes */
} HEADER;
typedef struct {
unsigned int size; /* Header size in bytes */
int width,height; /* Width and height of image */
unsigned short int planes; /* Number of color planes */
unsigned short int bits; /* Bits per pixel */
unsigned int compression; /* Compression type */
unsigned int imagesize; /* Image size in bytes */
int xresolution,yresolution; /* Pixels per meter */
unsigned int ncolors; /* Number of colors */
unsigned int importantcolors; /* Important colors */
} INFOHEADER;
typedef struct {
HEADER header;
INFOHEADER infoheader;
short int *color;
} BMP;
void parseBmp(char *fileName, BMP *bmp);
void drawBmp(BMP *bmp, int x, int y);
void eraseBmp(BMP *bmp, int x, int y);
void receiveToken (char *buffer, BMP *bmp);
void receiveTokenPixArr (unsigned char *buffer, BMP *bmp);
unsigned char readByteChar(char * buffer);
short int readWordChar(char * buffer);
int readDWordChar(char * buffer);
#endif
<file_sep>/csrc/map.c
#include "map.h"
BMP map;
int allocatedMap = 0;
void receiveMap(unsigned char *buffer) {
if(allocatedMap) {
free(map.color);
} else {
allocatedMap = 1;
}
receiveTokenPixArr(buffer, &map);
}
void partialMapReDraw(int x, int y, int width, int height) {
int i, j, offset;
short int color;
if(allocatedMap) {
for(i = 0; i < height; i++) {
offset = (y + i) * map.infoheader.width + x;
for(j = 0; j < width; j++) {
color = map.color[offset + j];
drawPixelFast(x+j, y+i, color);
}
}
} else {
for(i = 0; i < height; i++) {
for(j = 0; j < width; j++) {
drawPixelFast(x+j, y+i, 0);
}
}
}
}
void drawMap() {
drawBmp(&map, 0, 0);
drawAllTokens();
}
<file_sep>/csrc/command.c
#include "command.h"
extern BMP map;
extern token * tokenArr;
extern int loadedTokenCnt;
extern char dmID;
//inputs: a message to be decoded and executed
//output: -1 for an error, 0 otherwise
//purpose: Take in a raw message from middleman and execute the required functions depending on what the input command is.
int executeCmd(msg * currentMsg) {
if(currentMsg == NULL) {
return -1;
}
unsigned int nextCmd = currentMsg->cmd;
unsigned char byteInfo;
msg * rspnsMsg;
switch ((command)nextCmd) {
case CONNECT:
//not implemented
break;
case DISCONNECT:
//not implemented
break;
case SEND_MAP:
//Android sends map to DE2 - needs to be recieved, stored and drawn
printf("Entering send SEND_MAP\n");
if(loadedTokenCnt < MAX_TOKENS){
receiveMap(currentMsg->buffer);
drawMap();
} else {
printf("Error when Android sending map!\n");
return -1;
}
break;
case SEND_TOKEN:
//Android sends token to DE2 - needs to be recieved, stored as a token, and drawn
// Then others need to be notified of new token
printf("Entering send Token\n");
//obtain free address in token array
token *newTok = allocateToken();
if(newTok){
newTok->ownerID = currentMsg->androidID;
receiveTokenPixArr(currentMsg->buffer, &(newTok->bmp));
drawBmp(&newTok->bmp, newTok->x, newTok->y);
} else {
printf("Error when Android sending token!\n");
return -1;
}
// respond with token ID
rspnsMsg = createSendTokenResponsesMsg(currentMsg, newTok);
sendMessage(rspnsMsg);
free(rspnsMsg->buffer);
free(rspnsMsg);
alertUsersOfTokenInfo(currentMsg, newTok->tokenID);
break;
case GET_DM:
//Android attempts to get DM and new DM information is sent to all Android Users
printf("In get_dm\n");
//only allow DM to be taken if it is not already taken
if (dmID == 0) {
dmID = currentMsg->androidID;
printf("New DM: %x\n", dmID);
} else {
printf("DM not available - player %x currently has it\n", dmID);
}
sendAllUsersDMID(dmID);
printf("DM id %x\n", dmID);
break;
case RELEASE_DM:
//Android attempts to release DM and new DM information is sent to all Androi Users
printf("In release_dm\n");
//only the DM can release their status
if (dmID == currentMsg->androidID)
{
dmID = 0;
}
sendAllUsersDMID(dmID);
printf("DM id %x\n", dmID);
break;
case GET_DM_ID:
//All Android users get DM information
printf("In test_get_dm\n");
sendAllUsersDMID(dmID);
printf("DM id %x\n", dmID);
break;
case MOVE_TOKEN:
//Android moves token on DE2 - needs structure to be updated, and redrawn
// Then others need to be notified of new token position
printf("In move_token\n");
handleMoveTokenMsg(currentMsg);
alertUsersOfTokenInfo(currentMsg, currentMsg->buffer[0]);
break;
case HANDSHAKE:
//return identical message to Android that was recieved
printf("In hand_shake command\n");
sendMessage(currentMsg);
break;
case PASS_MSG:
//Pass message between Android users
printf("In Pass_msg command statement\n");
passMsg(currentMsg);
break;
case UPDATE_ALIAS:
//Update the user's alias to a new string
printf("In Update_Alias\n");
updateConnUserAlias(currentMsg);
alertUsersNewUser(currentMsg);
break;
case OUTPUT_TOKEN_INFO:
//This is a java side command - the DE2 will update all connected androids that a token has moved.
printf("In Output_Token_Info\n");
break;
case DISCONNECT_DEV:
//A device disconnected, so cleanup everything it owns and alert all other users that this player,
// and their tokens no longer exist
printf("In DISCONNECT_DEV\n");
if (dmID == currentMsg->androidID) {
dmID = 0;
sendAllUsersDMID(dmID);
}
alertUsersOfUserDC(currentMsg); // removes their alias
removeTokensOfOneUser(currentMsg, REMOVEALLVAL); // removes all references to DC'd players tokens from all other users.
removeTokenFromUser(currentMsg->androidID);
clearUserInfo(currentMsg);
alt_up_char_buffer_clear(char_buffer); // refresh buffer.
break;
case REMOVE_TOKEN:
//Android removes token from DE2 - needs to be cleaned up
// Then others need to be notified of token removed from game
printf("In Remove_Token");
byteInfo = *(currentMsg->buffer); // first byte in buffer is Token_ID;
removeTokensOfOneUser(currentMsg, byteInfo);
removeToken(byteInfo);
break;
default:
printf("Error, invalid command received on DE2!");
break;
}
return 0;
}
<file_sep>/pysrc/middleman.py
#!/usr/bin/python
import sys
import atexit
import serial
import select
import socket
import signal
import queue
import threading
import time
HOST = ''
PORT = 50002
def open_serial():
if 'linux' in sys.platform:
dev = '/dev/ttyUSB'
elif 'win' in sys.platform:
dev = '\\.\COM'
dev += input("Enter the serial port #:")
try:
ser = serial.Serial(port = dev,
baudrate = 115200,
bytesize = 8,
parity = "N",
stopbits = 1)
if not ser.isOpen():
ser.open()
except serial.SerialException as e:
print(e)
sys.exit()
return ser
def serial_loopback():
ser = open_serial()
while True:
length = ser.read()
ser.write(length)
print("length: ", ord(length))
data = ser.read(ord(length))
print(data.decode())
def tcp_loopback():
print("Host ip addr:")
print(socket.gethostbyname(socket.gethostname()), "\n")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(5)
conn, addr = sock.accept()
print("Connection Address", addr, "\n")
while True:
data = conn.recv(1024)
if not data: break
print("data: ", data)
print("received data length: ", len(data))
sent = conn.send(data)
print("sent length: ", sent)
def tcp_serial():
id_count = 0
conn_map = {}
tcp_send_queues = []
uart_send_queue = queue.Queue()
ser = open_serial()
ser_thread = threading.Thread(target = serial_worker, args = (ser, tcp_send_queues, uart_send_queue))
ser_thread.daemon = True
ser_thread.start()
print("Host ip addr:")
print(socket.gethostbyname(socket.gethostname()), "\n")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(5)
while True:
conn, addr = sock.accept()
if addr[0] in conn_map.keys():
conn_id = conn_map[addr[0]]
tcp_send_queue = tcp_send_queues[conn_id - 1]
else:
id_count+=1
conn_id = id_count
conn_map[addr[0]] = conn_id
tcp_send_queue = queue.Queue()
tcp_send_queues.append(tcp_send_queue)
print("Connection Id:", conn_id, " Connection Address", addr, "\n")
t = threading.Thread(target = tcp_worker, args = (conn, conn_id, tcp_send_queue, uart_send_queue, tcp_send_queues))
t.daemon = True
t.start()
def tcp_worker(conn, conn_id, tcp_send_queue, uart_send_queue, tcp_send_queues):
try:
oldLen = 0
while True:
(sread, swrite, sexec) = select.select([conn], [], [], 0)
if sread:
msgLen = 0
data = b''
for i in reversed(range(0, 4)):
tmp=conn.recv(1)
msgLen = (msgLen + (ord(tmp) * (1 << i * 8)))
data += tmp
#command is 1 byte
data += conn.recv(1)
#5 is for command length, and 4 bytes of message length info
while len(data) < (msgLen + 5):
oldLen = len(data)
data += conn.recv(msgLen - len(data) + 5)
print("received ", len(data), " data of ", (msgLen + 5), " so far!")
if oldLen == len(data):
break;
if not data: break
#Broadcast map to all android devices
if (data[4] == 2):
for queue in tcp_send_queues:
queue.put(data)
#Append connection id to data
data = chr(conn_id).encode() + data
print("data: ", data)
uart_send_queue.put(data)
if not tcp_send_queue.empty():
print("actually sending data via TCP to android")
data = tcp_send_queue.get()
conn.send(data)
except Exception as e:
print(e)
REMOVE_ALL_TOKEN = 11
data = chr(0).encode() + chr(0).encode() + chr(0).encode() + chr(0).encode() + chr(REMOVE_ALL_TOKEN).encode()
data = chr(conn_id).encode() + data
print("data: ", data)
uart_send_queue.put(data)
def serial_worker(ser, tcp_send_queues, uart_send_queue):
ready = False
while True:
if ser.inWaiting() > 0:
conn_id = ord(ser.read())
if conn_id == 0:
print("DE2 is ready to receive data.")
ready = True
else:
print("Connection Id: " + str(conn_id))
msgLen = 0
x = b''
for i in reversed(range(0, 4)):
tmp=ser.read(1)
x+= tmp
msgLen = (msgLen + (ord(tmp) * (1 << i * 8)))
print("length: ", str(msgLen))
#Data includes the command in this code (+1)
data = ser.read(msgLen + 1)
print(data)
#Push data to correct tcp queue
tcp_send_queues[conn_id - 1].put(x + data)
if (not uart_send_queue.empty()) and ready:
print("Sending data through the serial port.")
data = uart_send_queue.get()
ser.write(data)
ready = False
def main():
print("""Welcome to Middleman
This program allows you to transmit data between serial and TCP.
The program supports three modes:
0. Serial Loopback
1. TCP Loopback
2. Serial <-> TCP
""")
while True:
try:
usr_input = int(input("Select mode (0,1,2):"))
if usr_input in [0,1,2]:
print("")
break
else:
print("Invalid mode selection")
except ValueError:
print("Please enter a valid integer")
if usr_input == 0:
serial_loopback()
elif usr_input == 1:
while True:
try:
tcp_loopback()
except:
print("Caught exception in tcp loopback.")
else:
tcp_serial()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit("\nUser keyboard interrupt")
<file_sep>/javasrc/src/org/ubc/de2vtt/tabletop/TableTopOnTouchListener.java
package org.ubc.de2vtt.tabletop;
import org.ubc.de2vtt.comm.Command;
import org.ubc.de2vtt.comm.Message;
import org.ubc.de2vtt.comm.Messenger;
import org.ubc.de2vtt.comm.sendables.SendableMove;
import org.ubc.de2vtt.token.Token;
import android.view.MotionEvent;
import android.view.View;
import android.widget.RelativeLayout;
public class TableTopOnTouchListener implements View.OnTouchListener {
private Token mTok;
private int prev_x, prev_y;
private int mFragmentHeight, mFragmentWidth;
public TableTopOnTouchListener (Token tok, int fragmentWidth, int fragmentHeight) {
mTok = tok;
mFragmentHeight = fragmentHeight;
mFragmentWidth = fragmentWidth;
}
@Override
public boolean onTouch(View v, MotionEvent event) {
final int x, y, dx, dy;
RelativeLayout.LayoutParams params = (RelativeLayout.LayoutParams) v.getLayoutParams();
switch (event.getAction()) {
case MotionEvent.ACTION_DOWN:
prev_x = (int) event.getRawX();
prev_y = (int) event.getRawY();
break;
case MotionEvent.ACTION_MOVE:
x = (int) event.getRawX();
y = (int) event.getRawY();
dx = x - prev_x;
dy = y - prev_y;
if ((params.topMargin + dy + params.height) < mFragmentHeight && (params.leftMargin + dx + params.width) < mFragmentWidth
&& (params.topMargin + dy) > 0 && (params.leftMargin + dx) > 0) {
prev_x = x;
prev_y = y;
params.leftMargin += dx;
params.topMargin += dy;
v.setLayoutParams(params);
}
break;
case MotionEvent.ACTION_UP:
float y_ratio = ((float) (mFragmentWidth - params.leftMargin - params.width))/((float) mFragmentWidth);
float x_ratio = ((float) params.topMargin)/((float) mFragmentHeight);
mTok.move(x_ratio,y_ratio);
SendableMove mv = new SendableMove(mTok.getId(), (int) (mTok.getX() * Token.SCREEN_WIDTH),
(int) (mTok.getY() * Token.SCREEN_HEIGHT));
Messenger m = Messenger.GetSharedInstance();
Message msg = new Message(Command.MOVE_TOKEN, mv);
m.send(msg);
break;
default:
break;
}
return true;
}
}<file_sep>/csrc/bmp.c
#include "bmp.h"
//purpose: parse a BMP file and package in a BMP structure
void parseBmp (char *fileName, BMP *bmp) {
int i, j, k;
char b, g, r;
int pixels, rowOffset, offset;
short int fh;
fh = openFile(fileName);
bmp->header.type = readWord(fh);
bmp->header.size = readDWord(fh);
bmp->header.reserved1 = readWord(fh);
bmp->header.reserved2 = readWord(fh);
bmp->header.offset = readDWord(fh);
bmp->infoheader.size = readDWord(fh);
bmp->infoheader.width = readDWord(fh);
bmp->infoheader.height = readDWord(fh);
bmp->infoheader.planes = readWord(fh);
bmp->infoheader.bits = readWord(fh);
bmp->infoheader.compression = readDWord(fh);
bmp->infoheader.imagesize = readDWord(fh);
bmp->infoheader.xresolution = readDWord(fh);
bmp->infoheader.yresolution = readDWord(fh);
bmp->infoheader.ncolors = readDWord(fh);
bmp->infoheader.importantcolors = readDWord(fh);
pixels = bmp->infoheader.width * bmp->infoheader.height;
bmp->color = malloc(BYTES_PER_PIXEL * pixels);
for(i = 0; i < bmp->infoheader.height; i++) {
rowOffset = i*bmp->infoheader.width;
for(j = 0; j < bmp->infoheader.width; j++ ){
offset = pixels - rowOffset - j - 1;
b = (readByte(fh) & 0xF1) >> 3;
g = (readByte(fh) & 0xFC) >> 2;
r = (readByte(fh) & 0xF1) >> 3;
//Filter out the pink pixels
if(b == 0x1E && g == 0 && r == 0x1E) {
bmp->color[offset] = 0x0;
} else {
bmp->color[offset] = (r << 11) | (g << 5) | b;
}
}
if((BYTES_PER_PIXEL*bmp->infoheader.width) % 4 != 0) {
for (k = 0; k < (4 - ((BYTES_PER_PIXEL*bmp->infoheader.width) % 4)); k++) {
readByte(fh);
}
}
}
closeFile(fh);
}
//purpose: Recieve a token in WING from a given bmp buffer and package in a BMP struct
void receiveToken (char *buffer, BMP *bmp) {
int i, j, k;
char b, g, r;
int pixels, rowOffset, offset;
short int fh;
bmp->header.type = readWordChar(buffer);
buffer += 2;
bmp->header.size = readDWordChar(buffer);
buffer += 4;
bmp->header.reserved1 = readWordChar(buffer);
buffer += 2;
bmp->header.reserved2 = readWordChar(buffer);
buffer += 2;
bmp->header.offset = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.size = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.width = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.height = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.planes = readWordChar(buffer);
buffer += 2;
bmp->infoheader.bits = readWordChar(buffer);
buffer += 2;
bmp->infoheader.compression = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.imagesize = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.xresolution = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.yresolution = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.ncolors = readDWordChar(buffer);
buffer += 4;
bmp->infoheader.importantcolors = readDWordChar(buffer);
buffer += 4;
pixels = bmp->infoheader.width * bmp->infoheader.height;
bmp->color = malloc(BYTES_PER_PIXEL * pixels);
for(i = 0; i < bmp->infoheader.height; i++) {
rowOffset = i*bmp->infoheader.width;
for(j = 0; j < bmp->infoheader.width; j++ ){
offset = pixels - rowOffset - j - 1;
b = (readByteChar(buffer++) & 0xF1) >> 3;
g = (readByteChar(buffer++) & 0xFC) >> 2;
r = (readByteChar(buffer++) & 0xF1) >> 3;
//Filter out the pink pixels
if(b == 0x1E && g == 0 && r == 0x1E) {
bmp->color[offset] = 0x0;
} else {
bmp->color[offset] = (r << 11) | (g << 5) | b;
}
}
if((BYTES_PER_PIXEL*bmp->infoheader.width) % 4 != 0) {
for (k = 0; k < (4 - ((BYTES_PER_PIXEL*bmp->infoheader.width) % 4)); k++) {
readByteChar(buffer++);
}
}
}
closeFile(fh);
}
//purpose: Recieve a token in WING from a given pixel array buffer and package in a BMP struct
// This function makes use of knowing how the pixel arrays are created on the Android devices
// (sent with xxxx/yyyyy/actualPixelArrayIn565Format)
void receiveTokenPixArr (unsigned char *buffer, BMP *bmp) {
unsigned char sizeArr[4];
int i, j;
char byte1, byte2;
int pixels, rowOffset, offset;
unsigned int cursor = 0;
bmp->infoheader.width = 0;
bmp->infoheader.height = 0;
//obtain width
for(i = ((sizeof(sizeArr) / sizeof(sizeArr[0])) - 1); i >= 0; i--) {
sizeArr[i] = buffer[cursor++];
printf("received: sizeArr[i] %d\n", sizeArr[i]);
bmp->infoheader.width += (0xFF & sizeArr[i]) << i*8;
}
//obtain height
for(i = ((sizeof(sizeArr) / sizeof(sizeArr[0])) - 1); i >= 0; i--) {
sizeArr[i] = buffer[cursor++];
printf("received: sizeArr[i] %d\n", sizeArr[i]);
bmp->infoheader.height += (0xFF & sizeArr[i]) << i*8;
}
pixels = bmp->infoheader.width * bmp->infoheader.height;
printf("pixels set to: %d\n", pixels);
bmp->color = malloc(BYTES_PER_PIXEL * pixels);
if(bmp->color) {
for(i = 0; i < bmp->infoheader.height; i++) {
rowOffset = i * bmp->infoheader.width;
for(j = 0; j < bmp->infoheader.width; j++ ){
offset = rowOffset + j;
byte1 = buffer[cursor++];
byte2 = buffer[cursor++];
bmp->color[offset] = ((byte1 << 8) & 0xFF00) | (byte2 & 0xFF);
}
}
} else {
printf("Error, didn't allocate memory for token color\n");
}
}
//return a byte from a character buffer
unsigned char readByteChar(char * buffer) {
return *buffer;
}
//return 2 bytes in a short int from a character buffer
short int readWordChar(char * buffer) {
short int byte1, byte2;
byte1 = (short int)(*buffer);
byte2 = (short int)(*(buffer+1));
return ((unsigned short int)byte1 << 8) | ((unsigned short int)byte2 & 0x00FF);
}
//return a 4 bytes in an int from a character buffer
int readDWordChar(char * buffer) {
short int byte1, byte2, byte3, byte4;
byte1 = (short int)(*buffer);
byte2 = (short int)(*(buffer+1));
byte3 = (short int)(*(buffer+2));
byte4 = (short int)(*(buffer+3));
return ((unsigned short int)byte1 << 24) | ((unsigned short int)byte2 << 16) | ((unsigned short int)byte3 << 8) | (unsigned short int)byte4;
}
//draw a BMP at a given x/y location on the monitor, making use of drawPixelFast
void drawBmp (BMP *bmp, int x, int y) {
int i,j;
int offset;
for(i = 0; i < bmp->infoheader.height; i++) {
if(y + i < SCREEN_HEIGHT && y + i > 0) {
offset = i * bmp->infoheader.width;
for(j = 0; j < bmp->infoheader.width; j++){
if(x + j >= SCREEN_WIDTH || x + j <= 0)
continue;
drawPixelFast(x + j, y + i, bmp->color[offset +j]);
}
}
}
}
//draw black over a given BMP at a given x/y on the monitor, making use of drawPixelFast.
void eraseBmp (BMP *bmp, int x, int y) {
int i,j;
int offset;
for(i = 0; i < bmp->infoheader.height; i++) {
if(y + i < SCREEN_HEIGHT && y + i > 0) {
offset = i * bmp->infoheader.width;
for(j = 0; j < bmp->infoheader.width; j++){
if(x + j >= SCREEN_WIDTH || x + j <= 0)
continue;
drawPixelFast(x + j, y + i, 0);
}
}
}
}
<file_sep>/README.md
DE2VTT
======
Note - to obtain our personally tailored version of middleman, type:
git submodule init
follwed by
git submodule update
in order to obtain and update the code in the repository for "Middleman"
Make sure to download pyserial from https://pypi.python.org/pypi/pyserial
1) download the tar
2) unzip it using 7zip
3) find unpackaged file and run command:
python setup.py install
That should be it
D&D Virtual Table Top for DE2
<file_sep>/javasrc/src/org/ubc/de2vtt/token/TokenManager.java
package org.ubc.de2vtt.token;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.Set;
import org.ubc.de2vtt.SharedPreferencesManager;
import android.graphics.Bitmap;
import android.graphics.Color;
import android.os.AsyncTask;
import android.util.Log;
import android.util.SparseArray;
public class TokenManager {
private static final String TAG = TokenManager.class.getSimpleName();
private static final String TOKENS_KEY = "tokens";
private static final int[] COLORS = {Color.CYAN, Color.MAGENTA, Color.GREEN, Color.YELLOW, Color.BLUE, Color.WHITE};
static TokenManager sharedInstance;
private SparseArray<Token> localTokenList;
private SparseArray<Token> remoteTokenList;
private Queue<Bitmap> sendBmps;
private SparseArray<Bitmap> colorMap;
public static TokenManager getSharedInstance() {
if (sharedInstance == null) {
sharedInstance = new TokenManager();
}
return sharedInstance;
}
protected TokenManager() {
localTokenList = new SparseArray<Token>();
remoteTokenList = new SparseArray<Token>();
sendBmps = new LinkedList<Bitmap>();
colorMap = new SparseArray<Bitmap>();
}
public void add(Token tok) {
if (tok.isLocal()) {
if (!sendBmps.isEmpty()) {
tok.setBmp(sendBmps.remove());
} else {
Log.e(TAG, "Added local token without a bitmap.");
setColorBitmap(tok);
}
localTokenList.append(tok.getId(), tok);
} else {
setColorBitmap(tok);
remoteTokenList.append(tok.getId(), tok);
}
if (tok.getBitmap() == null) {
setColorBitmap(tok);
}
}
private void setColorBitmap(Token tok) {
int playerID = tok.getPlayerID();
int[] color = new int[1];
color[0] = COLORS[playerID % COLORS.length];
if (colorMap.get(color[0]) == null) {
// new color
Bitmap bmp = Bitmap.createBitmap(color, 1, 1, Bitmap.Config.RGB_565);
colorMap.put(color[0], bmp);
tok.setBmp(bmp);
} else {
Bitmap b = colorMap.get(color[0]);
tok.setBmp(b);
}
}
public void remove(Token tok) {
localTokenList.remove(tok.getId());
remoteTokenList.remove(tok.getId());
}
public void resetTokenManager() {
localTokenList = new SparseArray<Token>();
remoteTokenList = new SparseArray<Token>();
sendBmps = new LinkedList<Bitmap>();
}
public void save() {
new TokenSave().execute();
}
public void queueBitmap(Bitmap bmp) {
sendBmps.add(bmp);
}
private class TokenSave extends AsyncTask<Void, Integer, Void> {
@Override
protected Void doInBackground(Void... params) {
SharedPreferencesManager man = SharedPreferencesManager.getSharedInstance();
Set<String> s = new HashSet<String>();
int key = 0;
for (int i = 0; i < localTokenList.size(); i++) {
key = localTokenList.keyAt(i);
Token t = localTokenList.get(key);
s.add(t.encode());
}
man.putStringSet(TOKENS_KEY, s);
return null;
}
}
public void load() {
new TokenLoad().execute();
}
// TODO: token ids need to be assigned by the DE2 each session
private class TokenLoad extends AsyncTask<Void, Integer, Void> {
@Override
protected Void doInBackground(Void... params) {
SharedPreferencesManager man = SharedPreferencesManager.getSharedInstance();
Set<String> s = man.getStringSet(TOKENS_KEY);
String[] tokens = (String[]) s.toArray();
for (int i = 0; i < tokens.length; i++) {
Token t = new Token(tokens[i]);
localTokenList.append(t.getId(), t);
}
return null;
}
}
// NOTE: token received from DE2 can be used to move a token, no need for a move object
public void move(Token tok) {
if (tok.isLocal() || ownToken(tok)) {
Token toMove = localTokenList.get(tok.getId());
toMove.move(tok.getX(), tok.getY());
} else {
if (remoteTokenList.get(tok.getId()) == null) {
// Token being added
add(tok);
}
else {
// Token being moved
remoteTokenList.put(tok.getId(), tok);
}
}
if (tok.getBitmap() == null) {
setColorBitmap(tok);
}
}
public boolean ownToken(Token tok) {
return localTokenList.get(tok.getId()) != null;
}
public int sizeLocal() {
return localTokenList.size();
}
public int sizeAll() {
return localTokenList.size() + remoteTokenList.size();
}
public int getLocalKey(int i) {
return localTokenList.keyAt(i);
}
public Token getLocal(int i) {
return localTokenList.get(i);
}
public int getRemoteKey(int i) {
return remoteTokenList.keyAt(i);
}
public Token getRemote(int i) {
return remoteTokenList.get(i);
}
/**
*
* @return list of ALL tokens
*/
public List<Token> getList() {
List<Token> l = new ArrayList<Token>();
addElementsToList(localTokenList, l);
addElementsToList(remoteTokenList, l);
return l;
}
public List<Token> getLocalList() {
List<Token> l = new ArrayList<Token>();
addElementsToList(localTokenList, l);
return l;
}
public List<Token> getRemoteList() {
List<Token> l = new ArrayList<Token>();
addElementsToList(remoteTokenList, l);
return l;
}
private void addElementsToList(SparseArray<Token> a, List<Token> l) {
for (int i = 0; i < a.size(); i++) {
int key = a.keyAt(i);
Token t = a.get(key);
l.add(t);
}
}
public Token getTokenById(int id) {
List<Token> l = getList();
for (Token t : l) {
if (t.getId() == id) {
return t;
}
}
return null;
}
}
<file_sep>/javasrc/src/org/ubc/de2vtt/comm/Command.java
package org.ubc.de2vtt.comm;
import org.ubc.de2vtt.exceptions.InvalidCommandException;
public enum Command {
CONNECT((byte)0),
DISCONNECT((byte)1),
SEND_MAP((byte)2),
SEND_TOKEN((byte)3),
GET_DM((byte)4),
RELEASE_DM((byte)5),
MOVE_TOKEN((byte)6),
HANDSHAKE((byte)7),
PASS_MSG((byte)8),
UPDATE_ALIAS((byte)9),
OUTPUT_TOKEN_INFO((byte)10),
DISCONNECT_DEV((byte)11),
REMOVE_TOKEN((byte)12),
GET_DM_ID((byte)13)
;
public byte code;
private Command(byte c) {
code = c;
}
public static Command Convert(byte b) {
//return Command.values()[b];
switch (b) {
case (byte)0:
return CONNECT;
case (byte)1:
return DISCONNECT;
case (byte)2:
return SEND_MAP;
case (byte)3:
return SEND_TOKEN;
case (byte)4:
return GET_DM;
case (byte)5:
return RELEASE_DM;
case (byte)6:
return MOVE_TOKEN;
case (byte)7:
return HANDSHAKE;
case (byte)8:
return PASS_MSG;
case (byte) 9:
return UPDATE_ALIAS;
case (byte) 10:
return OUTPUT_TOKEN_INFO;
case (byte) 11:
return DISCONNECT_DEV;
case (byte) 12:
return REMOVE_TOKEN;
case (byte) 13:
return GET_DM_ID;
default:
throw new InvalidCommandException();
}
}
}
<file_sep>/csrc/main.c
#include <stdio.h>
#include <stdlib.h>
#include "audio.h"
#include "timer.h"
#include "sd_card.h"
#include "vga.h"
#include "input.h"
#include "message.h"
#include "command.h"
#include "token.h"
#include "io.h"
#include "system.h"
#include "altera_nios2_qsys_irq.h"
#include "sys/alt_irq.h"
int init() {
if (openSdCard() == -1) {
printf("Error: Failed to open sd card\n");
return -1;
} else {
printf("Opened SD card\n");
}
initVga();
setupAudio(); //stub for audio, it is functional, but not used in WING currently
setupMessage();
initTokens();
initHardwareTimer();
return 0;
}
int main() {
msg msg_m;
msg_m.buffer = NULL;
int statusInt;
if (init() == -1)
return -1;
//startHardwareTimer();
//Loop as a command center, continually receiving and executing commands
while (1) {
if(msg_m.buffer != NULL) {
free(msg_m.buffer);
msg_m.buffer = NULL;
}
//overlay user Aliases and IDs on screen
drawUserIDs();
printf("Obtaining message\n");
getMessage(&msg_m);
printf("Executing message command\n");
statusInt = executeCmd(&msg_m);
if(statusInt == -1) {
printf("error occurred in executing Command.\n");
} else {
printf("Completed message command\n");
}
//stub for audio, it is functional, but not used in WING currently
/*if (hasHardwareTimerExpired() == 1) {
startHardwareTimer();
handleKeyInput();
handleSwitchInput();
//playEpicMusic();
//Check if message to receive?
}*/
}
return 0;
}
<file_sep>/csrc/command.h
#ifndef COMMAND_H_
#define COMMAND_H_
#include "message.h"
#include "bmp.h"
#include "map.h"
#include "token.h"
#include "utilities.h"
int executeCmd(msg *m);
#endif /* COMMAND_H_ */
<file_sep>/javasrc/src/org/ubc/de2vtt/comm/Messenger.java
package org.ubc.de2vtt.comm;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.nio.ByteBuffer;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantLock;
import org.ubc.de2vtt.Disconnect;
import org.ubc.de2vtt.SharedPreferencesManager;
import org.ubc.de2vtt.comm.sendables.SendableString;
import org.ubc.de2vtt.fragments.ConnectionFragment;
import android.os.AsyncTask;
import android.util.Log;
// Singleton class used to send/receive messages via middleman
// Most code copied or adapted from platform tutorial 2
public class Messenger {
static final String TAG = Messenger.class.getSimpleName();
private Socket mSocket;
private String ip;
private String port;
static Messenger mSharedInstance;
static ReentrantLock mutex = new ReentrantLock(true);
public static Messenger GetSharedInstance() {
if (mSharedInstance == null) {
mSharedInstance = new Messenger();
connectWithPrevValues();
}
return mSharedInstance;
}
protected Messenger() {
mSocket = null;
ip = null;
port = null;
}
private static void connectWithPrevValues() {
SharedPreferencesManager man = SharedPreferencesManager.getSharedInstance();
mSharedInstance.ip = man.getString(ConnectionFragment.SHARED_PREFS_IP, null);
mSharedInstance.port = man.getString(ConnectionFragment.SHARED_PREFS_PORT, null);
if (mSharedInstance.ip != null && mSharedInstance.port != null) {
mSharedInstance.openSocket(mSharedInstance.ip, mSharedInstance.port);
}
}
public synchronized void resetSocket() {
// if (ip == null || port == null) {
// Log.e(TAG, "Unable to reset null socket.");
// }
// else {
// if (isConnected()) {
// closeSocket();
// }
// openSocket(ip, port);
// }
}
public synchronized void openSocket(String ip, Integer port) {
openSocket(ip, port.toString());
}
public synchronized void openSocket(String ip, String port) {
// Make sure the socket is not already opened
if (isConnected()) {
Log.e(TAG, "Socket already open");
return;
}
this.ip = ip;
this.port = port;
new SocketConnector().execute(this.ip, this.port);
}
public synchronized void closeSocket() {
if (isConnected()) {
try {
mSocket.getOutputStream().close();
mSocket.close();
} catch (IOException e) {
if(!isConnected()) {
Disconnect.removeSessionData();
Log.v(TAG, "Disconnect code hit");
}
e.printStackTrace();
}
} else {
Log.v(TAG, "Attempt to close non-open socket.");
}
}
public synchronized boolean isConnected() {
return mSocket != null && mSocket.isConnected() && !mSocket.isClosed();
}
public void sendStringMessage(String str, Command cmd) {
SendableString sendStr = new SendableString(str);
Message msg = new Message(cmd, sendStr);
send(msg);
}
public void send(Message msg) {
new SocketSender().execute(msg);
}
private class SocketSender extends AsyncTask<Message, Integer, Void> {
@Override
protected Void doInBackground(Message... msg) {
mutex.lock();
try {
Thread.sleep(msg[0].getDelay());
sendMessage(msg[0]);
} catch (InterruptedException e) {
if(!isConnected()) {
Disconnect.removeSessionData();
Log.v(TAG, "Disconnect code hit");
}
e.printStackTrace();
}
finally {
mutex.unlock();
}
return null;
}
private void sendMessage(Message msg) {
byte buf[] = msg.GetArrayToSend();
if (!isConnected()) {
resetSocket();
}
if (isConnected()) {
try {
OutputStream out = mSocket.getOutputStream();
Log.v(TAG, "Sending " + buf.length + " bytes.");
try {
out.write(buf, 0, buf.length);
out.flush();
Log.v(TAG, "Send complete.");
} catch (IOException e) {
if(!isConnected()) {
Disconnect.removeSessionData();
Log.v(TAG, "Disconnect code hit");
}
e.printStackTrace();
}
} catch (IOException e) {
if(!isConnected()) {
Disconnect.removeSessionData();
Log.v(TAG, "Disconnect code hit");
}
e.printStackTrace();
}
} else {
Log.v(TAG, "Attempt to send without opening socket.");
}
}
}
public Received receive() {
SocketReceiver task = (SocketReceiver) new SocketReceiver();
task.execute();
Received r = null;
try {
r = task.get(3000, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
if(!isConnected()) {
Disconnect.removeSessionData();
Log.v(TAG, "Disconnect code hit");
}
Log.e(TAG, "Receive timed out.");
resetSocket();
//r = attemptReceiveRecovery(r);
//e.printStackTrace();
} catch (InterruptedException e) {
if(!isConnected()) {
Disconnect.removeSessionData();
Log.v(TAG, "Disconnect code hit");
}
resetSocket();
Log.e(TAG, "Receive interrupted out.");
//e.printStackTrace();
} catch (ExecutionException e) {
if(!isConnected()) {
Disconnect.removeSessionData();
Log.v(TAG, "Disconnect code hit");
}
resetSocket();
Log.e(TAG, "Receive computation mucked up.");
//e.printStackTrace();
}
return r;
}
private class SocketReceiver extends AsyncTask<Void, Integer, Received> {
@Override
protected Received doInBackground(Void... i) {
mutex.lock();
Received r = null;
try {
//Log.v(TAG, "Trying receive");
r = receiveMessage();
}
finally {
mutex.unlock();
}
return r;
}
public Received receiveMessage() {
Received rcv = null;
if (isConnected()) {
try {
rcv = getMessage(rcv);
} catch (IOException e) {
if(!isConnected()) {
Disconnect.removeSessionData();
Log.v(TAG, "Disconnect code hit");
}
Log.e(TAG, "IOException on receive.");
}
} else {
Log.e(TAG, "Attempt to receive message from non-open socket.");
}
return rcv;
}
private Received getMessage(Received rcv) throws IOException {
InputStream in = mSocket.getInputStream();
byte buf[] = null;
byte lenBuf[] = new byte[4];
// See if any bytes are available from the Middleman
int bytes_avail = in.available();
int read = 0;
if (bytes_avail > 0) {
// If so, find how long the args are
in.read(lenBuf, 0, 4);
for (byte b : lenBuf) {
String s = String.format("0x%x", b);
Log.v(TAG, s);
}
ByteBuffer bb = ByteBuffer.wrap(lenBuf);
int len = bb.getInt();
if (len < 0) {
Log.e(TAG, "Received negative length.");
return null;
}
Log.v(TAG, "Length is: " + len);
buf = new byte[len + 4 + 1]; // length and command
System.arraycopy(lenBuf, 0, buf, 0, 4);
read = in.read(buf, 4, len + 1);
while (read < len) {
read += in.read(buf, 4 + read, (len + 1) - read);
}
} else {
//Log.v(TAG, "Nothing to receive.");
return null;
}
Log.v(TAG, "Received " + buf.length + " bytes");
if (buf.length > 4) {
rcv = new Received(buf);
}
return rcv;
}
}
// Used by SocketConnect to set the socket once the connection occurs async
public synchronized void setSocket(Socket sock) {
mSocket = sock;
}
public static boolean readyToSend() {
return GetSharedInstance().isConnected();
}
}
<file_sep>/javasrc/src/org/ubc/de2vtt/exceptions/IncorrectCommandDatumException.java
package org.ubc.de2vtt.exceptions;
public class IncorrectCommandDatumException extends RuntimeException {
/**
*
*/
private static final long serialVersionUID = -8021872837409712522L;
}
<file_sep>/csrc/audio.h
#ifndef AUDIO_H
#define AUDIO_H
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "system.h"
#include "timer.h"
#include "sd_card.h"
#include "altera_up_avalon_audio_and_video_config.h"
#include "altera_up_avalon_audio.h"
#include "alt_types.h"
#include "sys/alt_irq.h"
#define PI 3.14159265359
#define NONE 0
#define LASER 1
#define PLAYER_DEATH 2
#define SHARK_DEATH 3
#define THEME 4
#define EPICMUSIC 5
#define FAIL -1
#define PLAYBUFFER_LEN 128
#define SONG_BUFFER_LEN 100000
#define SONG_MIN 0x100;
#define SONG_MAX 0x919060;
struct audioInfo{
unsigned int *mainBuffer;
unsigned int *volumeBuffer;
unsigned int bufferLength;
unsigned int playedWords;
bool active;
volatile unsigned int *playCursor;
};
void setupAudio(void);
void audioTest(void);
void readWavFile(char *wavFileName, struct audioInfo *info);
void playAudioMono(int length);
void playLaser(void);
void playPlayerDeath(void);
void playSharkDeath(void);
void playTheme(void);
void playPlayerDeath(void);
int setupAudioInterrupt(alt_up_audio_dev *audio, volatile int somethingForIrq);
void playAudio(unsigned int *leftBuffer, int leftLength, unsigned int *rightBuffer, int rightLength);
void loadLaser(void);
void loadPlayerDeath(void);
void loadSharkDeath(void);
void loadTheme(void);
void updateAudioWithVolume(char switchValues);
void changeBufferVolume(struct audioInfo *, char switchValues);
void stopTheme(void);
//Functions for project 2
void initializeEpicSong(void);
void playEpicMusic(void);
#endif
<file_sep>/csrc/input.h
#ifndef INPUT_H_
#define INPUT_H_
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "message.h"
#include "io.h"
#include "system.h"
#include "altera_nios2_qsys_irq.h"
#include "sys/alt_irq.h"
void handleKeyInput(void);
void handleSwitchInput(void);
#endif /* INPUT_H_ */
<file_sep>/csrc/token.h
#ifndef TOKEN_H_
#define TOKEN_H_
#include "bmp.h"
#include "message.h"
#include "utilities.h"
#define REMOVEALLVAL -1
typedef struct token {
BMP bmp;
unsigned int x;
unsigned int y;
unsigned int tokenID;
unsigned int ownerID;
} token;
void initTokens(void);
token * allocateToken(void);
void removeTokenMsg(msg * rmvMsg);
void removeTokenFromUser(unsigned int ownerID);
void removeToken(unsigned int tokenID);
void drawAllTokens(void);
void redrawOverlappedTokens(int tokenIndex);
void handleMoveTokenMsg(msg * moveMsg);
void moveToken(unsigned int tokenID, int x, int y);
msg * createSendTokenResponsesMsg(msg * initialMsg, token * curTok);
void alertUsersOfTokenInfo(msg * currentMsg, int tokenID);
void removeTokensOfOneUser(msg * currentMsg, int tokenID);
#endif /* TOKEN_H_ */
<file_sep>/csrc/timer.h
#ifndef TIMER_H
#define TIMER_H
// More info about the hardware timer can be found in the embedded ip guide pages 28-6 and 28-7
#define CLOCK_FREQ 50000000
void timer_test(void);
void initHardwareTimer();
void setHardwareTimerPeriod(int period);
int isHardwareTimerRunning(void);
int hasHardwareTimerExpired(void);
void startHardwareTimer(void);
#endif
<file_sep>/csrc/vga.h
#ifndef VGA_H
#define VGA_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "io.h"
#include "altera_up_avalon_video_character_buffer_with_dma.h"
#include "altera_up_avalon_video_pixel_buffer_dma.h"
#define SCREEN_WIDTH 340
#define SCREEN_HEIGHT 240
#define SCREEN_CHAR_WIDTH 80
alt_up_pixel_buffer_dma_dev *pixel_buffer;
alt_up_char_buffer_dev *char_buffer;
extern int connUserIDs[];
extern char * connUserAlias[];
void initVga();
void clearScreen();
void drawPixel(int x, int y, int color);
void drawPixelFast(unsigned int x, unsigned int y, unsigned int color);
void drawLine(int x0, int y0, int x1, int y1, int color);
void printLine(char *str, int x, int y);
void drawBox(int x0, int y0, int x1, int y1, int color);
int convert24BitRgbTo16(unsigned int rgb24bit);
void drawUserIDs(void);
#endif
<file_sep>/javasrc/src/org/ubc/de2vtt/fragments/PlaceholderFragment.java
package org.ubc.de2vtt.fragments;
import org.ubc.de2vtt.comm.Received;
public class PlaceholderFragment extends WINGFragment {
@Override
public boolean passReceived(Received r) {
// TODO Auto-generated method stub
return false;
}
}
<file_sep>/pysrc/setup.py
from cx_Freeze import setup, Executable
setup(
name = "Middleman",
version = "1.0",
description = "A simple tcp to serial middle man",
executables = [Executable("middleman.py")]
)
<file_sep>/javasrc/src/org/ubc/de2vtt/comm/receivers/SingleReceiver.java
package org.ubc.de2vtt.comm.receivers;
import org.ubc.de2vtt.comm.ReceiveTask;
public class SingleReceiver extends Receiver {
public SingleReceiver(ReceiveTask task) {
super();
timer.schedule(task, 100);
}
}
<file_sep>/csrc/map.h
#ifndef __MAP_H__
#define __MAP_H__
#include "vga.h"
#include "bmp.h"
#include "token.h"
extern BMP map;
void receiveMap();
void partialMapReDraw(int x, int y, int width, int height);
void drawMap();
#endif
<file_sep>/javasrc/src/org/ubc/de2vtt/MainActivity.java
package org.ubc.de2vtt;
import java.util.Locale;
import org.ubc.de2vtt.bulletin.Bulletin;
import org.ubc.de2vtt.bulletin.BulletinManager;
import org.ubc.de2vtt.comm.Command;
import org.ubc.de2vtt.comm.Mailbox;
import org.ubc.de2vtt.comm.Message;
import org.ubc.de2vtt.comm.Messenger;
import org.ubc.de2vtt.comm.Received;
import org.ubc.de2vtt.comm.sendables.SendableNull;
import org.ubc.de2vtt.fragments.*;
import org.ubc.de2vtt.notifications.notifications;
import org.ubc.de2vtt.token.Token;
import org.ubc.de2vtt.token.TokenManager;
import org.ubc.de2vtt.users.DMManager;
import org.ubc.de2vtt.users.UserManager;
import android.app.Activity;
import android.app.FragmentManager;
import android.content.Context;
import android.content.Intent;
import android.content.res.Configuration;
import android.graphics.Bitmap;
import android.os.Bundle;
import android.os.StrictMode;
import android.support.v4.app.ActionBarDrawerToggle;
import android.support.v4.widget.DrawerLayout;
import android.util.Log;
import android.util.SparseArray;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.inputmethod.InputMethodManager;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.ListView;
import android.widget.Toast;
public class MainActivity extends Activity {
private static final String TAG = MainActivity.class.getSimpleName();
private static Context mContext;
private String[] mDrawerItems;
private DrawerLayout mDrawerLayout;
private ListView mDrawerList;
private ActionBarDrawerToggle mDrawerToggle;
private String mTitle;
private WINGFragment activeFragment;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mTitle = "WING";
// This call will result in better error messages if you
// try to do things in the wrong thread.
// From tutorial 2
StrictMode.setThreadPolicy(new StrictMode.ThreadPolicy.Builder()
.detectDiskReads().detectDiskWrites().detectNetwork()
.penaltyLog().build());
setContentView(R.layout.activity_main);
// For later static access
mContext = getApplicationContext();
setupDrawerList();
setupDrawerToggle();
// Set the drawer toggle as the DrawerListener
mDrawerLayout.setDrawerListener(mDrawerToggle);
getActionBar().setDisplayHomeAsUpEnabled(true);
getActionBar().setHomeButtonEnabled(true);
Mailbox.getSharedInstance(this);
// Attempt to connect
Messenger messenger = Messenger.GetSharedInstance();
Message msg = new Message(Command.GET_DM_ID,
SendableNull.GetSharedInstance());
messenger.send(msg);
}
private void setupDrawerList() {
mDrawerItems = getResources().getStringArray(R.array.app_drawer_array);
mDrawerLayout = (DrawerLayout) findViewById(R.id.linear_layout);
mDrawerList = (ListView) findViewById(R.id.left_drawer);
mDrawerList.setAdapter(new ArrayAdapter<String>(this,
R.layout.drawer_list_item, mDrawerItems));
mDrawerList.setOnItemClickListener(new DrawerItemClickListener());
}
private void setupDrawerToggle() {
mDrawerToggle = new ActionBarDrawerToggle(this, mDrawerLayout,
R.drawable.ic_drawer, R.string.drawer_open,
R.string.drawer_close) {
/** Called when a drawer has settled in a completely closed state. */
public void onDrawerClosed(View view) {
getActionBar().setTitle(mTitle);
invalidateOptionsMenu(); // creates call to
// onPrepareOptionsMenu()
}
/** Called when a drawer has settled in a completely open state. */
public void onDrawerOpened(View drawerView) {
getActionBar().setTitle(R.string.app_name);
invalidateOptionsMenu(); // creates call to
// onPrepareOptionsMenu()
// close keyboard
InputMethodManager inputManager = (InputMethodManager) getSystemService(Context.INPUT_METHOD_SERVICE);
inputManager.hideSoftInputFromWindow(getCurrentFocus()
.getWindowToken(), InputMethodManager.HIDE_NOT_ALWAYS);
}
};
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
// getMenuInflater().inflate(R.menu.main, menu);
// return true;
return false;
}
@Override
protected void onPostCreate(Bundle savedInstanceState) {
super.onPostCreate(savedInstanceState);
Intent current = getIntent();
Bundle b = current.getExtras();
if (b != null) {
int fragment = b.getInt("fragment_sel");
switchFragment(fragment);
} else {
switchFragment(0);
}
// Sync the toggle state after onRestoreInstanceState has occurred.
mDrawerToggle.syncState();
Intent intent = getIntent();
try {
String action = intent.getAction().toUpperCase(Locale.CANADA);
Log.v(TAG, "OnCreate: intent action" + action);
if (action != null) {
if (action.equalsIgnoreCase(mContext.getResources().getString(
R.string.in_msg_notification))) {
notifications.removeNotify(
mContext,
mContext.getResources().getString(
R.string.in_msg_notification));
switchFragment(WINGFragment.FragDrawerId.BulletinFragDrawerId
.ordinal()); // hard coded to switch to bulletin
// board!
}
} else {
Log.v(TAG, "Oncreate: Intent was null");
}
} catch (Exception e) {
Log.e(TAG, "Problem consuming action from intent", e);
}
}
@Override
public void onConfigurationChanged(Configuration newConfig) {
super.onConfigurationChanged(newConfig);
mDrawerToggle.onConfigurationChanged(newConfig);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Pass the event to ActionBarDrawerToggle, if it returns
// true, then it has handled the app icon touch event
if (mDrawerToggle.onOptionsItemSelected(item)) {
return true;
}
// Handle your other action bar items...
return super.onOptionsItemSelected(item);
}
@Override
public void onPause() {
super.onPause();
}
private class DrawerItemClickListener implements
ListView.OnItemClickListener {
@Override
public void onItemClick(AdapterView<?> parent, View view, int position,
long id) {
switchFragment(position);
}
}
public void switchFragment(int position) {
Log.v(TAG, "Switching fragments.");
WINGFragment fragment = new PlaceholderFragment();
Bundle args = new Bundle();
fragment.setArguments(args);
switch (WINGFragment.FragDrawerId.values()[position]) {
case TableTopFragDrawerId:
fragment = new TableTopFragment();
break;
case ManageTokenFragDrawerId:
fragment = new TokenManagerFragment();
break;
case GameConfigFragDrawerId:
fragment = new GameConfigFragment();
break;
case SendImageFragDrawerId:
fragment = new SendImageFragment();
break;
case PassMessageFragDrawerId:
fragment = new PassMessageFragment();
break;
case BulletinFragDrawerId:
fragment = new BulletinFragment();
break;
case DieRollFragDrawerId:
fragment = new DieRollFragment();
break;
case ConnectionFragDrawerId:
fragment = new ConnectionFragment();
break;
}
activeFragment = fragment;
FragmentManager fragmentManager = getFragmentManager();
fragmentManager.beginTransaction()
.replace(R.id.content_frame, fragment).commit();
mDrawerList.setItemChecked(position, true);
setTitle(mDrawerItems[position]);
mTitle = mDrawerItems[position];
mDrawerLayout.closeDrawer(mDrawerList);
}
public synchronized void onReceiveData(Received rcv) {
Log.v(TAG, "Received data.");
Token t;
TokenManager tm;
BulletinManager bm;
DMManager dmm;
Bulletin b;
if (rcv == null) {
return;
}
switch (rcv.getCommand()) {
case OUTPUT_TOKEN_INFO:
Log.v(TAG, "Moving other player's token.");
tm = TokenManager.getSharedInstance();
t = new Token(rcv);
tm.move(t);
Log.v(TAG, "x: " + t.getX() + " y: " + t.getY());
if (activeFragment instanceof TableTopFragment) {
// signal fragment that a token moved
activeFragment.passReceived(rcv);
}
break;
case SEND_TOKEN:
Log.v(TAG, "Receiving token.");
tm = TokenManager.getSharedInstance();
t = new Token(rcv);
tm.add(t);
if (activeFragment instanceof TokenManagerFragment
|| activeFragment instanceof TableTopFragment) {
// signal fragment that there is a new token
activeFragment.passReceived(rcv);
}
break;
case REMOVE_TOKEN:
Log.v(TAG, "Removing token.");
tm = TokenManager.getSharedInstance();
t = new Token(rcv);
tm.remove(t);
if (activeFragment instanceof TableTopFragment) {
activeFragment.passReceived(null);
}
break;
case PASS_MSG:
Log.v(TAG, "Receiving a bulletin.");
bm = BulletinManager.getSharedInstance();
b = new Bulletin(rcv);
bm.add(b);
if (activeFragment instanceof BulletinFragment) {
// Notify of new bulletin
activeFragment.passReceived(rcv);
} else {
notifications.notify(mContext, mContext.getResources()
.getString(R.string.in_msg_notification));
}
break;
case UPDATE_ALIAS:
Log.v(TAG, "Updating Alias List.");
UserManager um = UserManager.getSharedInstance();
dmm = DMManager.getSharedInstance();
um.handleUpdateAlias(rcv);
dmm.updateDMAlias();
break;
case SEND_MAP:
Log.v(TAG, "Receiving a map.");
Bitmap bmp = rcv.DataToBitmap();
TableTopFragment.setMap(bmp);
break;
case GET_DM_ID:
Log.v(TAG, "Updating DM id");
dmm = DMManager.getSharedInstance();
dmm.handleGetDMId(rcv);
if (activeFragment instanceof GameConfigFragment) {
// Notify of new bulletin
activeFragment.passReceived(rcv);
}
break;
default:
// signal active fragment
if (!activeFragment.passReceived(rcv)) {
Log.e(TAG, "Failed to pass message to fragment.");
}
break;
}
}
public boolean acceptCommand(Command cmd) {
// should be based on active fragment
return false;
}
@Override
public void setTitle(CharSequence title) {
getActionBar().setTitle(title);
}
static public Context getAppContext() {
return mContext;
}
}
<file_sep>/csrc/sd_card.c
#include <stdio.h>
#include <stdlib.h>
#include "sd_card.h"
int openSdCard(){
sdDev = alt_up_sd_card_open_dev("/dev/Altera_UP_SD_Card_Avalon_Interface_0");
if(sdDev != NULL && alt_up_sd_card_is_FAT16()){
return 0;
} else {
return -1;
}
}
short int openFile(char *fileName) {
return alt_up_sd_card_fopen(fileName, 0);
}
int closeFile(short int fh) {
return alt_up_sd_card_fclose(fh);
}
unsigned char readByte(short int fh) {
return (unsigned char) alt_up_sd_card_read(fh);
}
short int readWord(short int fh) {
short int byte1, byte2;
byte1 = alt_up_sd_card_read(fh);
byte2 = alt_up_sd_card_read(fh);
if (byte1 == -1 || byte2 == -1)
return -1;
return ((unsigned short int)byte2 << 8) | ((unsigned short int)byte1 & 0x00FF);
}
int readDWord(short int fh) {
short int byte1, byte2, byte3, byte4;
byte1 = alt_up_sd_card_read(fh);
byte2 = alt_up_sd_card_read(fh);
byte3 = alt_up_sd_card_read(fh);
byte4 = alt_up_sd_card_read(fh);
if (byte1 == -1 || byte2 == -1 || byte3 == -1 || byte4 == -1)
return -1;
return ((unsigned short int)byte4 << 24) | ((unsigned short int)byte3 << 16) | ((unsigned short int)byte2 << 8) | (unsigned short int)byte1;
}
unsigned int getWavFileLength(char *fileName) { // TBD: This function MUST be fixed - it currently returns an incorrect value, becuase - vals dont always mean it's done.
unsigned int fileLength = 0;
short int fileHandle = openFile(fileName);
if (fileHandle == -1) {
printf("Error occurred, unable to open file in 'getFileLength' with name: %s", fileName);
}
readPastWavHeader(fileHandle); // to get wav file length, dont bypass header i think..
short int wordRead = readWord(fileHandle);
//unsigned char firstByte = 0x0000FFFF | wordRead;
//unsigned char secondByte = 0x0000FFFF | (wordRead >> 8);
while ((short int)wordRead >= 0) {
//printf("%c", (unsigned char)byteRead);
fileLength += 2;
wordRead = readWord(fileHandle);
}
if ((short int)wordRead <= -1) {
printf("Error reading bytes from %s\n", fileName);
}
closeFile(fileHandle);
return fileLength;
}
void readPastWavHeader(short int handle) {
int i;
for (i = 0; i < 44; i++) {
readByte(handle);
}
}
<file_sep>/csrc/token.c
#include "token.h"
token tokenArr[MAX_TOKENS];
int loadedTokenCnt = 0;
//purpose: initialization code
void initTokens(void) {
int i;
for(i = 0; i < MAX_TOKENS; i++) {
if(tokenArr[i].bmp.color) free(tokenArr[i].bmp.color);
tokenArr[i].ownerID = 0;
tokenArr[i].tokenID = 0;
tokenArr[i].x = 0;
tokenArr[i].y = 0;
}
loadedTokenCnt = 0;
}
//purpose: return the address of a preallocated token in the token array of MAX_TOKENS size
token * allocateToken(void) {
int i;
for(i = 0; i < MAX_TOKENS; i++) {
if(tokenArr[i].tokenID == 0) {
tokenArr[i].tokenID = i + 1;
return &(tokenArr[i]);
loadedTokenCnt++;
}
}
return NULL;
}
//purpose: given a msg with the command "removeToken", perform all actions necessary to remove that users token
// ie clean up it's BMP
void removeTokenMsg(msg * rmvMsg){
unsigned int ownerID = (unsigned int)(*(rmvMsg->buffer));
removeTokenFromUser(ownerID);
}
//purpose: remove all tokens from one user (cleanup BMPs)
void removeTokenFromUser(unsigned int ownerID) {
int i;
for(i = 0; i < MAX_TOKENS; i++) {
if(tokenArr[i].ownerID == ownerID) {
partialMapReDraw(tokenArr[i].x, tokenArr[i].y, tokenArr[i].bmp.infoheader.width, tokenArr[i].bmp.infoheader.height);
tokenArr[i].tokenID = 0;
tokenArr[i].ownerID = 0;
tokenArr[i].x = 0;
tokenArr[i].y = 0;
if(tokenArr[i].bmp.color) free(tokenArr[i].bmp.color);
loadedTokenCnt--;
}
}
}
//purpose: remove one token from WING (cleanup the BMP)
void removeToken(unsigned int tokenID) {
int i;
for(i = 0; i < MAX_TOKENS; i++) {
if(tokenArr[i].tokenID == tokenID) {
partialMapReDraw(tokenArr[i].x, tokenArr[i].y, tokenArr[i].bmp.infoheader.width, tokenArr[i].bmp.infoheader.height);
tokenArr[i].tokenID = 0;
tokenArr[i].ownerID = 0;
tokenArr[i].x = 0;
tokenArr[i].y = 0;
if(tokenArr[i].bmp.color) free(tokenArr[i].bmp.color);
loadedTokenCnt--;
}
}
}
//draw tokens over map
void drawAllTokens(void) {
int i;
for (i = 0; i < MAX_TOKENS; i++) {
if(tokenArr[i].tokenID) {
drawBmp(&tokenArr[i].bmp, tokenArr[i].x, tokenArr[i].y);
}
}
}
//ensure moved tokens that were overlapped get redrawn
void redrawOverlappedTokens(int tokenIndex) {
int i;
for (i = 0; i < MAX_TOKENS; i++) {
if (i != tokenIndex) {
if((int) tokenArr[i].x >= ((int) tokenArr[tokenIndex].x - 20) && tokenArr[i].x <= (tokenArr[tokenIndex].x + 20)) {
if((int) tokenArr[i].y >= ((int) tokenArr[tokenIndex].y - 20) && tokenArr[i].y <= (tokenArr[tokenIndex].y + 20)) {
drawBmp(&tokenArr[i].bmp, tokenArr[i].x, tokenArr[i].y);
}
}
}
}
}
//update token information given a moveToken message
void handleMoveTokenMsg(msg * moveMsg){
unsigned int tokenID = (unsigned int)(*(moveMsg->buffer));
unsigned int x1 = (unsigned int)(*(moveMsg->buffer + 1));
unsigned int x0 = (unsigned int)(*(moveMsg->buffer + 2));
unsigned int y1 = (unsigned int)(*(moveMsg->buffer + 3));
unsigned int y0 = (unsigned int)(*(moveMsg->buffer + 4));
unsigned int x = x1*255 + x0;
unsigned int y = y1*255 + y0;
moveToken(tokenID, x, y);
}
//actually update the token information and redraw the BMP
void moveToken(unsigned int tokenID, int x, int y) {
int i;
for (i = 0; i < MAX_TOKENS; i++) {
if(tokenArr[i].tokenID == tokenID) {
partialMapReDraw(tokenArr[i].x, tokenArr[i].y, tokenArr[i].bmp.infoheader.width, tokenArr[i].bmp.infoheader.height);
redrawOverlappedTokens(i);
tokenArr[i].x = x;
tokenArr[i].y = y;
drawBmp(&tokenArr[i].bmp, tokenArr[i].x, tokenArr[i].y);
break;
}
}
}
//generate the msg struct required to respond to a sendMsg command (given the communication specs of WING)
msg * createSendTokenResponsesMsg(msg * initialMsg, token * curTok) {
int i;
msg *responseMsg = malloc(sizeof(msg));
responseMsg->androidID = initialMsg->androidID;
responseMsg->cmd = initialMsg->cmd;
responseMsg->len = 5;
responseMsg->buffer = malloc(initialMsg->len * sizeof(char));
responseMsg->buffer[0] = (unsigned char)curTok->tokenID;
printf("tokenID = %d ", curTok->tokenID);
char * x2Char = IntToCharBuf((unsigned int)curTok->x, 2);
char * y2Char = IntToCharBuf((unsigned int)curTok->y, 2);
for(i = 0; i < 2; i++) {
responseMsg->buffer[1+i] = x2Char[i];
}
for(i = 0; i < 2; i++) {
responseMsg->buffer[3+i] = y2Char[i];
}
free(x2Char);
free(y2Char);
return responseMsg;
}
//OUTPUT_TOKEN_INFO from DE2 to Android - used to alert Android users that a token was moved/added/deleted by another player.
void alertUsersOfTokenInfo(msg * currentMsg, int tokenID) {
int i;
msg alertMsg;
alertMsg.androidID = 0;
alertMsg.cmd = (unsigned int)OUTPUT_TOKEN_INFO;
alertMsg.len = 6;
//This maps the token ID, owner ID, and x,y of the token that was moved, to the correct
//buffer location for the message to be sent out.
alertMsg.buffer = malloc(sizeof(char) * 6);
alertMsg.buffer[0] = tokenID; // TokenID
alertMsg.buffer[1] = currentMsg->androidID; // Owner of Token's ID
if((command)currentMsg->cmd == SEND_TOKEN) {
printf("In alertUsersOfTokenInfo, setting x/y to initial vals\n");
alertMsg.buffer[2] = 0; //Initial x,y = 0,0
alertMsg.buffer[3] = 0;
alertMsg.buffer[4] = 0;
alertMsg.buffer[5] = 0;
} else {
printf("In alertUsersOfTokenInfo, setting x/y to moved values");
alertMsg.buffer[2] = currentMsg->buffer[1]; // Token x1
alertMsg.buffer[3] = currentMsg->buffer[2]; // Token x0
alertMsg.buffer[4] = currentMsg->buffer[3]; // Token y1
alertMsg.buffer[5] = currentMsg->buffer[4]; // Token y0
}
for(i = 0; i < NUM_USERS; i++) {
if((currentMsg->androidID != connUserIDs[i]) && (connUserIDs[i] != 0)) {
printf("in alertUsersOfTokenMove - sending to id %d about movement of %d's token\n", connUserIDs[i], currentMsg->androidID);
alertMsg.androidID = connUserIDs[i]; // id of who is to receive the alert.
sendMessage(&alertMsg);
}
}
free(alertMsg.buffer);
}
//send all currently active Token information to a new user
void alertUserOfAllTokens(msg * currentMsg) {
int i;
msg alertMsg;
alertMsg.androidID = currentMsg->androidID; //send back to the user who is joining WING
alertMsg.cmd = (unsigned int)OUTPUT_TOKEN_INFO;
alertMsg.len = 6;
//This maps the token ID, owner ID, and x,y of the token that was moved, to the correct
//buffer location for the message to be sent out.
alertMsg.buffer = malloc(sizeof(char) * 6);
alertMsg.buffer[1] = currentMsg->androidID; // Owner of Token's ID
for(i = 0; i < MAX_TOKENS; i++) {
if(tokenArr[i].tokenID != 0) {
alertMsg.buffer[0] = tokenArr[i].tokenID; // TokenID
alertMsg.buffer[2] = (unsigned char)(tokenArr[i].x / 255); // Token x1
alertMsg.buffer[3] = (unsigned char)(tokenArr[i].x % 255); // Token x0
alertMsg.buffer[4] = (unsigned char)(tokenArr[i].y / 255); // Token y1
alertMsg.buffer[5] = (unsigned char)(tokenArr[i].y % 255); // Token y0
printf("in alertUserOfAllTokens - sending to id %d about token %d\n", alertMsg.androidID, tokenArr[i].tokenID);
sendMessage(&alertMsg);
}
}
free(alertMsg.buffer);
}
//notify all other users that tokens of one user should be removed.
//If tokenIDRemove == -1, then removes all tokens, else removes only tokenID
void removeTokensOfOneUser(msg * currentMsg, int tokenID) {
int i,j;
msg alertMsg;
alertMsg.androidID = 0;
alertMsg.cmd = (unsigned int)REMOVE_TOKEN;
alertMsg.len = 6;
//This maps the token ID, owner ID, and mock x,y of the token that was moved, to the correct
//buffer location for the message to be sent out.
alertMsg.buffer = malloc(sizeof(char) * 6);
alertMsg.buffer[1] = currentMsg->androidID; // Owner of Token's ID
for(i = 0; i < NUM_USERS; i++) {
if((currentMsg->androidID != connUserIDs[i]) && (connUserIDs[i] != 0)) {
printf("in removeTokensOfOneUser - sending to id %d about removal of tokens from %d\n", connUserIDs[i], currentMsg->androidID);
alertMsg.androidID = connUserIDs[i];
//alert user of all of the tokens to be removed.
for(j = 0; j < MAX_TOKENS; j++) {
alertMsg.buffer[0] = tokenArr[j].tokenID;
if(tokenArr[j].ownerID == currentMsg->androidID) {
if(tokenID == -1 || tokenID == tokenArr[j].tokenID) {
printf("removing %d's token %d\n", tokenArr[j].ownerID, tokenArr[j].tokenID);
sendMessage(&alertMsg);
}
}
}
}
}
free(alertMsg.buffer);
}
<file_sep>/javasrc/src/org/ubc/de2vtt/fragments/PassMessageFragment.java
package org.ubc.de2vtt.fragments;
import java.util.ArrayList;
import java.util.List;
import org.ubc.de2vtt.R;
import org.ubc.de2vtt.comm.Command;
import org.ubc.de2vtt.comm.Messenger;
import org.ubc.de2vtt.comm.Received;
import org.ubc.de2vtt.users.User;
import org.ubc.de2vtt.users.UserManager;
import android.app.Activity;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemSelectedListener;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.EditText;
import android.widget.Spinner;
//Fragment in which allows users to write private messages, choose the recipient via a spinner, and send them off.
public class PassMessageFragment extends WINGFragment {
private static final String TAG = PassMessageFragment.class.getSimpleName();
protected View mParentView;
private Activity mActivity;
private Messenger mMessenger = Messenger.GetSharedInstance();
private User mDestination;
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
mParentView = inflater.inflate(R.layout.fragment_passmessage, container, false);
setupSpinner();
setupOnClickListeners();
updateButtonState();
mActivity = this.getActivity();
setAcceptedCommands(Command.PASS_MSG);
return mParentView;
}
@Override
public void onPause() {
super.onPause();
}
private void setupSpinner() {
Spinner rcvrSpinner = (Spinner) mParentView.findViewById(R.id.rcvrSpinner);
// Obtain list of currently connected phone IDs (names), as well as title for main screen. TBD - add more
List<String> rcvrIDs = new ArrayList<String>();
UserManager UM = UserManager.getSharedInstance();
for(int i = 0; i < UM.count(); i++) {
rcvrIDs.add(UM.getAtIndex(i).getAlias() + " " + UM.getAtIndex(i).getID());
}
// Create an ArrayAdapter using the string array and a default spinner layout
ArrayAdapter<String> adapter = new ArrayAdapter<String>
(this.getActivity(), android.R.layout.simple_spinner_item, rcvrIDs);
// Specify the layout to use when the list of choices appears
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
// Apply the adapter to the spinner
rcvrSpinner.setAdapter(adapter);
rcvrSpinner.setOnItemSelectedListener(new OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View v,
int position, long arg3) {
UserManager um = UserManager.getSharedInstance();
mDestination = um.getAtIndex(position);
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
});
}
private void setupOnClickListeners() {
Button sendTokBtn = (Button) mParentView.findViewById(R.id.btnSendMessage);
sendTokBtn.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
passMsg();
}
});
}
private void updateButtonState() {
boolean canSend = Messenger.readyToSend();
Button sendMsgBtn = (Button) mParentView.findViewById(R.id.btnSendMessage);
sendMsgBtn.setEnabled(canSend);
}
public void passMsg() {
EditText et = (EditText)mParentView.findViewById(R.id.sendMsg);
Spinner sp = (Spinner)mParentView.findViewById(R.id.rcvrSpinner);
String msg = "\0";
msg += et.getText().toString() + '\0';
byte[] strBytes = msg.getBytes();
strBytes[0] = (byte)mDestination.getID();
msg = new String(strBytes);
mMessenger.sendStringMessage(msg, Command.PASS_MSG);
}
@Override
public boolean passReceived(Received r) {
mActivity.runOnUiThread(new Runnable() {
public void run() {
updateButtonState();
setupSpinner();
}
});
return false;
}
}
<file_sep>/javasrc/src/org/ubc/de2vtt/comm/sendables/SendableMove.java
package org.ubc.de2vtt.comm.sendables;
import java.nio.ByteBuffer;
public class SendableMove implements Sendable {
private int tokenID;
private int x,y;
public SendableMove(int tokenID, int x, int y) {
this.tokenID = tokenID;
this.x = x;
this.y = y;
}
@Override
public byte[] ToByteArray() {
byte[] data = new byte[5];
data[0] = (byte) tokenID;
byte xBuf[] = ByteBuffer.allocate(4).putInt(x).array();
System.arraycopy(xBuf, 2, data, 1, 2);
byte yBuf[] = ByteBuffer.allocate(4).putInt(y).array();
System.arraycopy(yBuf, 2, data, 3, 2);
return data;
}
}
<file_sep>/csrc/input.c
#include "input.h"
msg interMsg;
extern FILE* uart;
//purpose: handle key input from the DE2 - this can be set up to call specific functions when keys are
//pressed, released, or held down.
void handleKeyInput(void){
static char keyInput;
static short int edgeDetect0 = 0;
static short int edgeDetect1 = 0;
static short int edgeDetect2 = 0;
static short int edgeDetect3 = 0;
keyInput = IORD_8DIRECT(KEYS_BASE, 0);
char key0 = keyInput & 0x01;
char key1 = keyInput & 0x02;
char key2 = keyInput & 0x04;
char key3 = keyInput & 0x08;
//functionality for keys being held
if(key1) {
} else if (key2) {
} else {
}
//functionality for keys being pressed.
if (!key0 && (edgeDetect0 == 0)) {
edgeDetect0 = 1;
} else if (key0 && (edgeDetect0 == 1)) {
edgeDetect0 = 0;
if(interMsg.buffer == NULL) {
free(interMsg.buffer);
}
//interMsg = *getMessage();
}
if (!key1 && (edgeDetect1 == 0)) {
edgeDetect1 = 1;
} else if (key1 && (edgeDetect1 == 1)) {
edgeDetect1 = 0;
//Temporary Hard coded stuff:
// interMsg.androidID = 2;
// interMsg.len = 5;
// interMsg.buffer = "hello";
if(interMsg.buffer != NULL) {
sendMessage(&interMsg);
}
}
if (!key2 && (edgeDetect2 == 0)) {
edgeDetect2 = 1;
} else if (key2 && (edgeDetect2 == 1)) {
edgeDetect2 = 0;
//free(interMsg.buffer);
if(fgetc(uart) == EOF) {
printf("reading EOF with key2.\n");
} else {
printf("no EOF.\n");
}
}
if (!key3 && (edgeDetect3 == 0)) {
edgeDetect3 = 1;
} else if (key3 && (edgeDetect3 == 1)) {
edgeDetect3 = 0;
printf("printing in key3 to ser\n");
fputc((unsigned char) 1, uart);
}
}
//purpose: handle switch input from the DE2 - this can be set up to call specific functions when keys are switched,
//or held in position
void handleSwitchInput(void){
static char SWInput;
static short int prevSwitchState = 0;
SWInput = IORD_8DIRECT(SWITCHES_BASE, 0);
if ((SWInput & 0x80) != 0) {
if(prevSwitchState == 0){
}
prevSwitchState = 1;
} else {
if(prevSwitchState == 1){
}
prevSwitchState = 0;
}
}
<file_sep>/csrc/vga.c
#include "vga.h"
char dmID;
void initVga() {
pixel_buffer = alt_up_pixel_buffer_dma_open_dev("/dev/pixel_buffer_dma");
alt_up_pixel_buffer_dma_change_back_buffer_address(pixel_buffer, PIXEL_BUFFER_BASE);
alt_up_pixel_buffer_dma_swap_buffers(pixel_buffer);
while(alt_up_pixel_buffer_dma_check_swap_buffers_status(pixel_buffer));
char_buffer = alt_up_char_buffer_open_dev("/dev/char_drawer");
alt_up_char_buffer_init(char_buffer);
clearScreen();
}
void clearScreen() {
alt_up_char_buffer_clear(char_buffer);
alt_up_pixel_buffer_dma_clear_screen(pixel_buffer, 0);
}
void drawPixel(int x, int y, int color) {
alt_up_pixel_buffer_dma_draw(pixel_buffer, color, x, y);
}
void drawPixelFast(unsigned int x, unsigned int y, unsigned int color) {
unsigned int addr = 0;
addr |= ((x & pixel_buffer->x_coord_mask) << pixel_buffer->x_coord_offset);
addr |= ((y & pixel_buffer->y_coord_mask) << pixel_buffer->y_coord_offset);
IOWR_16DIRECT(pixel_buffer->buffer_start_address, addr, color);
}
void drawLine(int x0, int y0, int x1, int y1, int color) {
if (y0 == y1) {
alt_up_pixel_buffer_dma_draw_hline(pixel_buffer, x0, x1, y0, color, 0);
} else if (x0 == x1) {
alt_up_pixel_buffer_dma_draw_hline(pixel_buffer, x0, y0, y1, color, 0);
} else {
alt_up_pixel_buffer_dma_draw_line(pixel_buffer, x0, y0, x1, y1, color, 0);
}
}
void printLine(char *str, int x, int y) {
alt_up_char_buffer_string(char_buffer, str, x, y);
}
void drawBox(int x0, int y0, int x1, int y1, int color) {
alt_up_pixel_buffer_dma_draw_box(pixel_buffer, x0, y0, x1, y1, color, 0);
}
int convert24BitRgbTo16(unsigned int rgb24bit) {
unsigned int R8bit = (rgb24bit >> 16) & 0xFF;
unsigned int G8bit = (rgb24bit >> 8) & 0xFF;
unsigned int B8bit = (rgb24bit) & 0xFF;
unsigned int R5bit = R8bit >> 3 & 0x1F;
unsigned int G6bit = G8bit >> 2 & 0x3F;
unsigned int B5bit = B8bit >> 3 & 0x1F;
return (R5bit << 11 | G6bit << 5 | B5bit);
}
void drawUserIDs(void) {
int i, xPos;
char cArr[2] = {'-', '\0'};
char cDMArr[2] = {'*', '\0'};
for(i = 0; i < 5; i++) {
//TBD - make a "constants" h file
if (dmID && (dmID == connUserIDs[i]))
{
xPos = (SCREEN_CHAR_WIDTH - 6 - strlen(connUserAlias[i]));
alt_up_char_buffer_string(char_buffer, cDMArr , xPos, i + 1);
}
xPos = (SCREEN_CHAR_WIDTH - 4 - strlen(connUserAlias[i]));
alt_up_char_buffer_string(char_buffer, connUserAlias[i] , xPos, i + 1);
xPos = (SCREEN_CHAR_WIDTH - 2);
cArr[0] = (connUserIDs[i] % 10) + '0'; // unique IDs for 0-9
alt_up_char_buffer_string(char_buffer, cArr , xPos, i + 1);
}
char nameStr[5] = {'N', 'a', 'm', 'e', '\0'};
char IDStr[3] = {'I', 'D', '\0'};
alt_up_char_buffer_string(char_buffer, nameStr , SCREEN_CHAR_WIDTH - strlen(nameStr) - strlen(IDStr) - 2, 0);
alt_up_char_buffer_string(char_buffer, IDStr , SCREEN_CHAR_WIDTH - strlen(IDStr) - 1, 0);
}
<file_sep>/csrc/audio.c
#include "audio.h"
//Note: This file is NOT USED in WING currently - it is a stub, for if future development of audio is implemented.
// It is left over from EECE 381 project 1.
#ifdef ALT_ENHANCED_INTERRUPT_API_PRESENT
static void playSoundISR(void* isr_context);
#else
static void playSoundISR(void* isr_context, alt_u32 id);
#endif
const char *CONFIG_NAME = "/dev/audio_and_video_config_0";
const char *AUDIO_NAME = "/dev/audio_0";
alt_up_audio_dev *audio = NULL;
alt_up_av_config_dev *config = NULL;
int DEBUG = 0;
volatile short int status;
volatile short int loaded;
volatile unsigned int *playCursor;
unsigned int audioFileWordLength;
unsigned int playedWords;
struct audioInfo epicSong;
const char *epicSongName = "mEpic.wav";
struct audioInfo laser;
struct audioInfo playerDeath;
struct audioInfo sharkDeath;
struct audioInfo theme;
static unsigned int playBuffer[PLAYBUFFER_LEN];
struct audioInfo *sounds[5];
unsigned short int numSounds = 5;
volatile int somethingForIrq;
void setupAudio()
{
bool error = false;
config = (alt_up_av_config_dev *)alt_up_av_config_open_dev(CONFIG_NAME);
if (config == NULL) {
printf("Error: audio video config could not be opened.\n");
error = true;
}
while (!alt_up_av_config_read_ready(config)) {
}
audio = (alt_up_audio_dev *)alt_up_audio_open_dev(AUDIO_NAME);
if (config == NULL) {
printf("Error: audio codec could not be opened.\n");
error = true;
} else if (DEBUG == 1) {
printf("Successfully opened audio codec.\n");
}
int interruptStatus = setupAudioInterrupt(audio, somethingForIrq);
if (interruptStatus < 0) {
printf("Error: audio interrupt could not be setup.\n");
error = true;
} else if (DEBUG == 1) {
printf("Successfully setup audio interrupts.\n");
}
initializeEpicSong();
sounds[4] = &epicSong;
/*loadLaser();
loadPlayerDeath();
loadSharkDeath();
loadTheme();
printf("finished loading sound buffers.\n");
sounds[0] = &laser;
sounds[1] = &sharkDeath;
sounds[2] = &playerDeath;
sounds[3] = &theme; */
status = NONE;
if (DEBUG == 1 && error == false) {
printf("Successfully setup sound.\n");
}
}
int setupAudioInterrupt(alt_up_audio_dev *audio, volatile int somethingForIrq)
{
// Need to disable both audio interrupts before setting them up
// otherwise you get stuck in them when they are setup
alt_up_audio_disable_read_interrupt(audio);
alt_up_audio_disable_write_interrupt(audio);
void *irqInt = (void*)&somethingForIrq;
#ifdef ALT_ENHANCED_INTERRUPT_API_PRESENT
return alt_ic_isr_register(AUDIO_0_IRQ_INTERRUPT_CONTROLLER_ID, AUDIO_0_IRQ, playSoundISR, irqInt, 0x0);
#else
return alt_irq_register(AUDIO_0_IRQ, irqInt, playSoundISR);
#endif
}
void initializeEpicSong() {
epicSong.bufferLength = SONG_BUFFER_LEN;
epicSong.mainBuffer = malloc(sizeof(unsigned int) * SONG_BUFFER_LEN);
epicSong.volumeBuffer = malloc(sizeof(unsigned int) * SONG_BUFFER_LEN);
loadMoreEpicMusic(0, (epicSong.bufferLength / 2), 0);
}
void loadLaser() {
laser.bufferLength = 38384;
readWavFile("laserii.wav", &laser);
}
void loadPlayerDeath() {
playerDeath.bufferLength = 0x0000DAFF / 2;
readWavFile("pdie.wav", &playerDeath);
}
void loadSharkDeath() {
sharkDeath.bufferLength = 0x0000DAFF / 2;
readWavFile("sdie.wav", &sharkDeath);
}
void loadTheme() {
theme.bufferLength = 0x00063E00 / 2;
readWavFile("theme.wav", &theme);
}
void playAudioMono(int length) {
if (DEBUG == 1) {
printf("Playing audio.\n");
}
alt_up_audio_write_fifo(audio, (unsigned int *)playBuffer, length, ALT_UP_AUDIO_LEFT);
alt_up_audio_write_fifo(audio, (unsigned int *)playBuffer, length, ALT_UP_AUDIO_RIGHT);
}
void playAudio(unsigned int *leftBuffer, int leftLength, unsigned int *rightBuffer, int rightLength) {
int leftWritten = alt_up_audio_write_fifo(audio, leftBuffer, leftLength, ALT_UP_AUDIO_LEFT);
if (DEBUG == 1) {
printf("Wrote %d to left audio FIFO. with value\n", leftWritten);
}
int rightWritten = alt_up_audio_write_fifo(audio, rightBuffer, rightLength, ALT_UP_AUDIO_RIGHT);
if (DEBUG == 1) {
printf("Wrote %d to right audio FIFO.\n", rightWritten);
}
}
void readWavFile(char *wavFileName, struct audioInfo *info) {
info->mainBuffer = malloc(sizeof(unsigned int) * info->bufferLength);
info->volumeBuffer = malloc(sizeof(unsigned int) * info->bufferLength);
if (info->mainBuffer == NULL || info->volumeBuffer == NULL) {
printf("Error: insufficient memory to load audio file into memory.\n");
}
short int fileHandle = openFile(wavFileName);
if (fileHandle == -1) {
printf("Error opening %s\n", wavFileName);
return;
}
readPastWavHeader(fileHandle);
unsigned int i;
unsigned int word;
for (i = 0; i < info->bufferLength; i++) {
word = readWord(fileHandle);
info->mainBuffer[i] = word;
info->volumeBuffer[i] = word;
//printf("0x%x ", (int)word > 0 ? word : -1 * word);
}
//printf("reached EOF\n");
closeFile(fileHandle);
return;
}
void playSound(struct audioInfo *info) {
//printf("Playing sound with info at 0x%x.\n", info);
info->active = true;
info->playedWords = 0;
info->playCursor = info->volumeBuffer;
alt_up_audio_enable_write_interrupt(audio);
}
void playLaser(void) {
playSound(&laser);
}
void playPlayerDeath(void) {
playSound(&playerDeath);
}
void playSharkDeath(void) {
playSound(&sharkDeath);
}
void playTheme(void) {
playSound(&theme);
status = THEME;
if (theme.active != true) {
printf("Error: theme was not set to active in play theme.\n");
}
}
void stopTheme(void) {
status = NONE;
}
void playEpicMusic(void){
static int fullSongIndex = 500000; //done in the initialization -- ugly constant, sorry. it's half of 100000
static int loadedFirstHalf = 1;
static int loadedSecondHalf = 0;
int loadCount;
if(loadedFirstHalf == 0 ) { //&& (epicSong.playedWords > (epicSong.bufferLength / 2))) {
loadCount = loadMoreEpicMusic(fullSongIndex, (epicSong.bufferLength / 2), 0);
if(loadCount != -1) {
fullSongIndex += loadCount;
loadedFirstHalf = 1;
loadedSecondHalf = 0;
} else {
fullSongIndex = 0;
}
} else if (loadedSecondHalf == 0 ) { // && (epicSong.playedWords < (epicSong.bufferLength / 2))) {
loadCount = loadMoreEpicMusic(fullSongIndex, (epicSong.bufferLength / 2), (epicSong.bufferLength / 2));
if(loadCount != -1) {
fullSongIndex += loadCount;
loadedFirstHalf = 0;
loadedSecondHalf = 1;
} else {
fullSongIndex = 0;
}
}
playSound(&epicSong);
status = EPICMUSIC;
}
int loadMoreEpicMusic(int fullSongIndex, int loadByteCount, int startIndex) {
static short int fileHandle;
static int songMax = SONG_MAX;
if(fullSongIndex == 0) {
fileHandle = openFile(epicSongName);
} else if(fullSongIndex >= songMax) {
closeFile(fileHandle);
return -1;
}
if (fileHandle == -1) {
printf("Error opening %s\n", epicSongName);
return -1;
}
readPastWavHeader(fileHandle);
unsigned int i;
unsigned int word;
for (i = startIndex; i < startIndex + loadByteCount; i++) {
word = readWord(fileHandle);
epicSong.mainBuffer[i] = word;
epicSong.volumeBuffer[i] = word;
//printf("0x%x ", (int)word > 0 ? word : -1 * word);
}
//printf("reached EOF\n");
return loadByteCount;
}
void clearPlayBuffer(void) {
int i;
for (i = 0; i < PLAYBUFFER_LEN; i++) {
playBuffer[i] = 0;
}
}
void addToPlayBuffer(unsigned int *buffer, unsigned int len) {
int i;
for (i = 0; i < len; i++) {
playBuffer[i] += buffer[i];
}
}
void addChunkToPlayBuffer(struct audioInfo *info, unsigned int free) {
unsigned int len, i;
unsigned int end = (unsigned)(info->volumeBuffer) + (2 * info->bufferLength);
if (((int)info->playCursor + free >= end) ||
(info->playedWords + free) >= info->bufferLength) {
// Last chunk to play
len = end - (int)playCursor;
if (status == THEME && info == &theme) {
info->playCursor = info->volumeBuffer;
info->playedWords = 0;
} else {
info->active = false;
}
} else {
len = free;
}
len = len > free ? free : len;
info->playedWords += len;
for (i = 0; i < len; i++) {
playBuffer[i] += info->playCursor[i];
}
info->playCursor += len;
}
#ifdef ALT_ENHANCED_INTERRUPT_API_PRESENT
static void playSoundISR(void* isr_context) {
#else
static void playSoundISR(void* isr_context, alt_u32 id) {
#endif
int i;
unsigned int free = alt_up_audio_write_fifo_space(audio, ALT_UP_AUDIO_LEFT);
bool atLeastOneActive = false;
clearPlayBuffer();
if (free >= 1) {
for (i = 0; i < numSounds; i++) {
if (sounds[i]->active == true) {
//printf("Sounds %d is active.\n", i);
atLeastOneActive = true;
addChunkToPlayBuffer(sounds[i], free);
}
}
playAudioMono(free);
}
if (atLeastOneActive == false) {
//printf("Disabling audio interrupt.\n");
alt_up_audio_disable_write_interrupt(audio);
}
}
void changeBufferVolume(struct audioInfo *currentAudioInfo, char switchValues) {
int i;
short volumeKeys = switchValues & 0x0F;
if(volumeKeys == 0x00 || volumeKeys == 0x01) {
short shiftLength = 2 - volumeKeys;
for(i = 0; i < currentAudioInfo->bufferLength; i++) {
*(currentAudioInfo->volumeBuffer + i) = *(currentAudioInfo->mainBuffer + i) >> shiftLength;
}
} else if(volumeKeys == 0x07 || volumeKeys == 0x0F) {
short shiftLength = (volumeKeys % 7) + 1;
for(i = 0; i < currentAudioInfo->bufferLength; i++) {
*(currentAudioInfo->volumeBuffer + i) = *(currentAudioInfo->mainBuffer + i) << shiftLength;
}
} else if(volumeKeys == 0x03) {
for(i = 0; i < currentAudioInfo->bufferLength; i++) {
*(currentAudioInfo->volumeBuffer + i) = *(currentAudioInfo->mainBuffer + i);
}
} else {
for(i = 0; i < currentAudioInfo->bufferLength; i++) {
*(currentAudioInfo->volumeBuffer + i) = 0; //mute on all other combos
}
}
}
void updateAudioWithVolume(char switchValues) {
int i = 0;
for (i = 0; i < numSounds; i++) {
if (sounds[i]->active == true) {
changeBufferVolume(sounds[i], switchValues);
}
}
}
<file_sep>/csrc/message.c
#include "message.h"
//Note: This file serves two purposes:
//1) It sets up functions that are used to receive, package, and send messages
//
//2) It has a lot of functions that deal with the WING user information (ID/Alias, etc)
// This also includes functions that can be called to send out specific messages back
// to the android side to notify all users of DE2 updates (tokens added, players added/removed, etc)
int connUserIDs[NUM_USERS] = {0,0,0,0,0};
char * connUserAlias[NUM_USERS];
FILE* uart;
//initializatino code for serial UART and User alias array data structure
void setupMessage(void) {
int i;
for(i = 0; i < NUM_USERS; i++) {
if(connUserAlias[i] == NULL) {
connUserAlias[i] = malloc(sizeof(char) * MAX_ALIAS_SIZE);
}
*connUserAlias[i] = '\0';
}
//printf("UART Initialization\n");
uart = fopen("/dev/uart_0", "r+");
if(uart == NULL) {
printf("ERROR - uart not opened properly");
}
}
// checks if the ID is saved in the connUserIDs array, and returns true if it exists, false otherwise.
unsigned int isIDSaved(msg * inMsg) {
int i;
for(i = 0; i < NUM_USERS ; i++){
if(inMsg->androidID == connUserIDs[i]){
printf("android %d sending to DE2 already in system\n", connUserIDs[i]);
return 1;
}
}
return 0;
}
// stores an ID in the connUsersIDs array, if room available. Returns 0 if not added, 1 if added.
unsigned int storeNewID(int ID) {
int i;
for(i = 0; i < NUM_USERS ; i++){
if(connUserIDs[i] == 0) {
printf("DE2 communicating with new android - ID %d\n", ID);
connUserIDs[i] = ID;
return 1;
}
}
return 0;
}
// if alias != null, connAlias set to alia, else set to inMsgBuffer.
unsigned int updateConnUserAlias(msg * inMsg) {
int i;
char buf[MAX_ALIAS_SIZE];
for(i = 0; i < NUM_USERS; i ++) {
if(connUserIDs[i] == inMsg->androidID) {
if(inMsg->cmd == (unsigned int)UPDATE_ALIAS) {
strncpy(connUserAlias[i], (char*)inMsg->buffer, (MAX_ALIAS_SIZE - 1));
} else {
sprintf(buf, "player%d", i);
connUserAlias[i] = strncpy(connUserAlias[i], buf, (MAX_ALIAS_SIZE - 1)); //strlen(buf));//
}
connUserAlias[i][MAX_ALIAS_SIZE - 1] = '\0'; // enforce last byte to be null character, to avoid overflow
alt_up_char_buffer_clear(char_buffer);
return 1;
}
}
return 0;
}
//purpose: Alert all current Android users of a user change (addition or alias update)
void alertUsersNewUser(msg * currentMsg) {
int i;
msg alertMsg;
alertMsg.androidID = 0;
alertMsg.buffer = malloc(sizeof(char) * MAX_ALIAS_SIZE);
alertMsg.cmd = (unsigned int)UPDATE_ALIAS;
alertMsg.len = MAX_ALIAS_SIZE;//currentMsg->len; // correct?
//printf("in alertUsersNewUser, alerting of new user: %d\n", currentMsg->androidID);
for(i = 0; i < NUM_USERS; i++) {
if(currentMsg->androidID == connUserIDs[i]) {
*(unsigned char*)alertMsg.buffer = (unsigned char)connUserIDs[i];
strncpy((char*)(alertMsg.buffer + 1), connUserAlias[i], MAX_ALIAS_SIZE - 1);
alertMsg.buffer[MAX_ALIAS_SIZE - 1] = '\0';
}
}
for(i = 0; i < NUM_USERS; i++) {
if((currentMsg->androidID != connUserIDs[i]) && (connUserIDs[i] != 0)) {
alertMsg.androidID = connUserIDs[i];
printf("about to send string: %s to %d in alertUsersNewUser\n", (char*)alertMsg.buffer, connUserIDs[i]);
sendMessage(&alertMsg);
}
}
free(alertMsg.buffer);
}
//purpose: Alert one user about all of the other active Android user's information
// To be used when a phone connects, when others are already connected.
void alertUserAllUsers(msg * currentMsg) {
int i;
msg alertMsg;
alertMsg.androidID = 0;
alertMsg.buffer = malloc(sizeof(char) * MAX_ALIAS_SIZE);
alertMsg.cmd = (unsigned int)UPDATE_ALIAS;
alertMsg.len = MAX_ALIAS_SIZE;//currentMsg->len; // correct? TBD
//printf("in alertUserAllUsers, alerting new user: %d\n", currentMsg->androidID);
alertMsg.androidID = currentMsg->androidID;
for(i = 0; i < NUM_USERS; i++) {
if((currentMsg->androidID != connUserIDs[i]) && (connUserIDs[i] != 0)) {
*(unsigned char *)alertMsg.buffer = (unsigned char)connUserIDs[i];
strncpy((char*)(alertMsg.buffer + 1), connUserAlias[i], MAX_ALIAS_SIZE - 1);
alertMsg.buffer[MAX_ALIAS_SIZE - 1] = '\0';
printf("about to send string: %s in alertUserAlUsers\n", (char*)alertMsg.buffer);
sendMessage(&alertMsg);
}
}
free(alertMsg.buffer);
}
//purpose: Alerta all users that one user has disconnected, in order to alert Android users to remove
// all associated user information.
void alertUsersOfUserDC(msg * currentMsg) {
int i, j;
msg alertMsg;
alertMsg.androidID = 0;
alertMsg.cmd = (unsigned int)UPDATE_ALIAS;
alertMsg.len = 2; // size of buffer for ID and 0.
alertMsg.buffer = malloc(2); //message of null indicates that android should remove the user from their memory.
alertMsg.buffer[0] = currentMsg->androidID;
alertMsg.buffer[1] = '\0';
for(i = 0; i < NUM_USERS; i++) {
if((currentMsg->androidID != connUserIDs[i]) && (connUserIDs[i] != 0)) {
printf("in alertUsersOfUserDC - sending to id %d about DC of %d", connUserIDs[i], currentMsg->androidID);
alertMsg.androidID = connUserIDs[i];
sendMessage(&alertMsg);
}
}
free(alertMsg.buffer);
}
//purpose: cleanup the alias array when a user leaves WING
void clearUserInfo(msg * currentMsg) {
int i;
for(i = 0; i < NUM_USERS; i++) {
if(currentMsg->androidID == connUserIDs[i]) {
*connUserAlias[i] = '\0';
connUserIDs[i] = 0;
}
}
}
//purpose: recieve a message from the UART and package it into a msg struct
// This function allocates room for the msg buffer
void getMessage(msg * inMsg){
int i;
unsigned char msgLen[4];
inMsg->len = 0;
//Middleman will only send a message to the DE2 if the first byte it receives is a zero
fputc('\0', uart);
//obtain android ID
do {
inMsg->androidID = (int) fgetc(uart);
} while(inMsg->androidID == EOF || inMsg->androidID == '\n');
printf("I got msg from ID %d\n", inMsg->androidID);
//obtain length (4 bytes)
for(i = ((sizeof(msgLen) / sizeof(msgLen[0])) - 1); i >= 0; i--) {
//printf("about to fgetc\n");
msgLen[i] = fgetc(uart);
//printf("received: msgLen[i] %d\n", msgLen[i]);
inMsg->len += (0xFF & msgLen[i]) << i*8;
}
inMsg->cmd = (unsigned int) fgetc(uart);
printf("About to receive %d characters, from cmd %d:\n", inMsg->len, inMsg->cmd);
int tmp;
inMsg->buffer = malloc(inMsg->len * sizeof(char));
if(inMsg->buffer) {
tmp = fread(inMsg->buffer, sizeof(char), inMsg->len, uart);
printf("num bytes read from serial stream: %d\n", tmp);
} else {
printf("Error, input Msg buffer not able to be allocated\n");
}
if(isIDSaved(inMsg) == 0) {
if (storeNewID(inMsg->androidID) == 0)
printf("Error adding Android ID, ID array full\n");
else {
updateConnUserAlias(inMsg);
alertUsersNewUser(inMsg); //alert current users of new user
alertUserAllUsers(inMsg); //alert new user of all current users
alertUserOfAllTokens(inMsg); //alert new user of all active tokens
}
}
// for(i = 0; i < inMsg->len; i++) {
// printf("%c", *(inMsg->buffer + i));
// }
printf("\n");
return;
}
//purpose: sends the msg struct in the correct order to the UART given our communication protocol
//requires: pre-allocated char buffer
void sendMessage(msg * sendMsg){
int i;
unsigned char msgLen[4];
if(sendMsg->buffer == NULL) {
printf("Error in sendMessage, buffer is null!");
return;
} else if(uart == NULL) {
printf("Error in sendMessage, uart is null!");
return;
}
// Start with the android ID, since we are interfacing many androids
fputc((unsigned char) sendMsg->androidID, uart);
printf("starting to send message, with length: %d\n", sendMsg->len);
// Start with the number of bytes in our message
for(i = ((sizeof(msgLen) / sizeof(msgLen[0])) - 1); i >= 0; i--) {
msgLen[i] = (sendMsg->len >> i*8) & (0xFF);
//printf("msgLen[i] = %d\n", msgLen[i]);
fputc(msgLen[i], uart);
}
fputc(sendMsg->cmd, uart);
// Now send the actual message to the Middleman
fwrite(sendMsg->buffer, sizeof(char), sendMsg->len, uart);
fflush(uart);
}
//purpose: alert all Android users about the ID of the DM
void sendAllUsersDMID(char dmID) {
msg * rspnsMsg;
rspnsMsg = malloc(sizeof(msg));
rspnsMsg->androidID = 0;
rspnsMsg->buffer = malloc(sizeof(char));
*(rspnsMsg->buffer) = dmID;
rspnsMsg->cmd = GET_DM_ID;
rspnsMsg->len = 1;
int i;
for(i = 0; i < NUM_USERS; i++) {
if(connUserIDs[i] != 0) {
rspnsMsg->androidID = connUserIDs[i];
sendMessage(rspnsMsg);
}
}
alt_up_char_buffer_clear(char_buffer);
free(rspnsMsg->buffer);
free(rspnsMsg);
}
//purpose: Pass a message along to the correct recipient
void passMsg(msg * passMsg) {
printf("in passMsg\n");
if(passMsg->buffer == NULL || uart == NULL){
printf("Error in sendMessage, buffer or uart is null!");
return;
}
char * msgString = (char *)passMsg->buffer;
unsigned int yPos = 2;
unsigned int xPos;
unsigned int sendID = (unsigned int)(*(msgString));
if( sendID == 0) {
printf("about to write to screen!\n");
alt_up_char_buffer_clear(char_buffer);
xPos = (SCREEN_CHAR_WIDTH / 2) - (int)(strlen(msgString) / 2);
alt_up_char_buffer_string(char_buffer, msgString , xPos, yPos);
} else if( (sendID == connUserIDs[0]) ||
( sendID == connUserIDs[1]) ||
( sendID == connUserIDs[2]) ||
( sendID == connUserIDs[3]) ||
( sendID == connUserIDs[4]) ) {
*(msgString) = passMsg->androidID; // byte 6 is now the ID of the device that sent the message.
passMsg->androidID = sendID;
sendMessage(passMsg);
} else {
printf("Error, tried to pass message to non-existant device!\n");
}
}
<file_sep>/javasrc/src/org/ubc/de2vtt/comm/sendables/SendableNull.java
package org.ubc.de2vtt.comm.sendables;
public class SendableNull implements Sendable {
// This is a sendable object for commands that do not require arguments
private static SendableNull instance;
protected SendableNull() {
}
public static SendableNull GetSharedInstance() {
if (instance == null) {
instance = new SendableNull();
}
return instance;
}
@Override
public byte[] ToByteArray() {
return new byte[0];
}
}
<file_sep>/javasrc/src/org/ubc/de2vtt/comm/Message.java
package org.ubc.de2vtt.comm;
import java.nio.ByteBuffer;
import org.ubc.de2vtt.comm.sendables.Sendable;
public class Message {
private static String TAG = Message.class.getSimpleName();
private Command cmd;
private Sendable send;
private Direction dir;
private int delay = 0;
public Message(Command cmd, Sendable send) {
this.cmd = cmd;
this.send = send;
this.dir = Direction.OUT;
}
public Message(Command cmd) {
this.cmd = cmd;
this.send = null;
this.dir = Direction.OUT;
}
public byte[] GetArrayToSend() {
byte[] args = send.ToByteArray();
int sendLen = args.length + 5;
//int neededSize = 1024 - (sendLen % 1024);
byte[] ret = new byte[sendLen];
// if (ret.length % 1024 != 0) {
// Log.e(TAG, "Incorrect buff size.");
// }
// bytes 0-3 are length of command data
byte lenBuf[] = ByteBuffer.allocate(4).putInt(args.length).array();
System.arraycopy(lenBuf, 0, ret, 0, lenBuf.length);
// byte 4 is the command
ret[4] = cmd.code;
// bytes 5 and beyond are the command data
System.arraycopy(args, 0, ret, 5, args.length);
return ret;
}
public Direction GetDirection() {
return dir;
}
public void setDelay(int d) {
delay = d;
}
public int getDelay() {
return delay;
}
public enum Direction {
IN, OUT;
}
}
<file_sep>/csrc/message.h
#ifndef MESSAGE_H_
#define MESSAGE_H_
#include "system.h"
#include "vga.h"
#include "utilities.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#define NUM_USERS 5
#define MAX_ALIAS_SIZE 40
//extern token tokenArr[MAX_TOKENS];
extern int loadedTokenCnt;
struct message {
unsigned int androidID;
unsigned int len;
unsigned int cmd;
unsigned char * buffer; // max 124 bytes
};
typedef struct message msg;
void setupMessage(void);
unsigned int isIDSaved(msg * inMsg);
unsigned int storeNewID(int ID);
unsigned int updateConnUserAlias(msg * inMsg);
void alertUsersNewUser(msg * currentMsg);
void alertUserAllUsers(msg * currentMsg);
void alertUsersOfUserDC(msg * currentMsg);
void alertUsersOfTokenInfo(msg * currentMsg, int tokenID);
void clearUserInfo(msg * currentMsg);
void getMessage(msg * inMsg);
void sendMessage(msg * sendMsg);
void sendAllUsersDMID(char dmID);
void passMsg(msg * passMsg);
#endif /* MESSAGE_H_ */
<file_sep>/csrc/sd_card.h
#ifndef __SD_CARD_H__
#define __SD_CARD_H__
#include "Altera_UP_SD_Card_Avalon_Interface.h"
alt_up_sd_card_dev *sdDev;
int openSdCard();
short int openFile(char *fileName);
int closeFile(short int fh);
unsigned char readByte(short int fh);
short int readWord(short int fh);
int readDWord(short int fh);
unsigned int getWavFileLength(char *fileName);
void readPastWavHeader(short int handle);
#endif
<file_sep>/javasrc/src/org/ubc/de2vtt/fragments/SendImageFragment.java
package org.ubc.de2vtt.fragments;
import org.ubc.de2vtt.R;
import org.ubc.de2vtt.comm.Command;
import org.ubc.de2vtt.comm.Message;
import org.ubc.de2vtt.comm.Messenger;
import org.ubc.de2vtt.comm.Received;
import org.ubc.de2vtt.comm.sendables.SendableBitmap;
import org.ubc.de2vtt.token.TokenManager;
import org.ubc.de2vtt.users.DMManager;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Bundle;
import android.provider.MediaStore;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.ImageView.ScaleType;
public class SendImageFragment extends WINGFragment {
private static final String TAG = SendImageFragment.class.getSimpleName();
private static final int TOKEN_X = 20;
private static final int TOKEN_Y = 20;
private static final int MAP_X = 340;
private static final int MAP_Y = 240;
protected View mParentView;
private static final int REQUEST_CODE = 1;
private Bitmap bitmap;
private Uri selectedImage;
private ProgressDialog progress;
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
mParentView = inflater.inflate(R.layout.fragment_sendimage, container, false);
setupOnClickListeners();
if (bitmap != null) {
ImageView imageView = (ImageView) mParentView.findViewById(R.id.imgView);
imageView.setImageBitmap(bitmap);
imageView.setScaleType(ScaleType.FIT_XY);
}
DMManager dmm = DMManager.getSharedInstance();
if (!dmm.isUserDM()) {
Button sendManBtn = (Button) mParentView.findViewById(R.id.btnSendMap);
sendManBtn.setVisibility(View.GONE);
}
updateButtonState();
return mParentView;
}
private void setupOnClickListeners() {
Button pickBtn = (Button) mParentView.findViewById(R.id.btnSelectImage);
pickBtn.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
pickImage(v);
}
});
Button sendTokBtn = (Button) mParentView.findViewById(R.id.btnSendToken);
sendTokBtn.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
sendToken();
}
});
Button sendMapBtn = (Button) mParentView.findViewById(R.id.btnSendMap);
sendMapBtn.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
sendMap();
}
});
}
private void updateButtonState() {
boolean canSend = Messenger.readyToSend();
Button sendMapBtn = (Button) mParentView.findViewById(R.id.btnSendMap);
sendMapBtn.setEnabled(canSend);
Button sendTokBtn = (Button) mParentView.findViewById(R.id.btnSendToken);
sendTokBtn.setEnabled(canSend);
if (bitmap == null) {
sendMapBtn.setEnabled(false);
sendTokBtn.setEnabled(false);
}
}
public void pickImage(View View) {
Intent intent = new Intent();
intent.setType("image/*");
intent.setAction(Intent.ACTION_GET_CONTENT);
startActivityForResult(Intent.createChooser(intent,"Select Image"), REQUEST_CODE);
}
public void sendToken() {
sendImage(Command.SEND_TOKEN, TOKEN_X, TOKEN_Y);
}
public void sendMap() {
sendImage(Command.SEND_MAP, MAP_X, MAP_Y);
}
public void sendImage(Command cmd, int x, int y) {
if (cmd == Command.SEND_MAP) {
if (bitmap != null) {
Bitmap scaled = Bitmap.createScaledBitmap(bitmap, x, y, false);
SendableBitmap bmp = new SendableBitmap(scaled.copy(Bitmap.Config.ARGB_8888, false));
Message msg = new Message(cmd, bmp);
Messenger messenger = Messenger.GetSharedInstance();
messenger.send(msg);
updateButtonState();
} else {
Log.v(TAG, "Attempt to send null bitmap.");
}
} else if(cmd == Command.SEND_TOKEN) {
if (bitmap != null) {
Bitmap scaled = Bitmap.createScaledBitmap(bitmap, x, y, false);
SendableBitmap bmp = new SendableBitmap(scaled.copy(Bitmap.Config.ARGB_8888, false));
Message msg = new Message(cmd, bmp);
Messenger messenger = Messenger.GetSharedInstance();
TokenManager m = TokenManager.getSharedInstance();
m.queueBitmap(scaled);
messenger.send(msg);
updateButtonState();
} else {
Log.v(TAG, "Attempt to send null bitmap.");
}
} else {
Log.v(TAG, "Attempt to send image with invalid command.");
}
}
@Override
public void onPause() {
super.onPause();
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
Log.v(TAG, "onActivityResult entered.");
if (requestCode == REQUEST_CODE && resultCode == Activity.RESULT_OK && null != data) {
progress = new ProgressDialog(getActivity());
progress.setTitle("Loading");
progress.setMessage("Loading your image...");
progress.show();
selectedImage = data.getData();
String[] filePathColumn = { MediaStore.Images.Media.DATA };
Cursor cursor = getActivity().getContentResolver().query(selectedImage, filePathColumn, null, null, null);
cursor.moveToFirst();
int columnIndex = cursor.getColumnIndex(filePathColumn[0]);
String picturePath = cursor.getString(columnIndex);
cursor.close();
ImageView imageView = (ImageView) mParentView.findViewById(R.id.imgView);
imageView.setImageResource(R.drawable.black);
ThumbnailSetter ts = new ThumbnailSetter();
ts.execute(picturePath);
}
Log.v(TAG, "onActivityResult finished.");
}
private class ThumbnailSetter extends AsyncTask<String, Void, Bitmap> {
@Override
protected Bitmap doInBackground(String... params) {
String picturePath = params[0];
Bitmap bmp = BitmapFactory.decodeFile(picturePath);
return Bitmap.createScaledBitmap(bmp, 500, 500, false);
}
@Override
protected void onPostExecute(Bitmap b) {
Log.v(TAG, "Setting image bmp");
bitmap = b;
ImageView imageView = (ImageView) mParentView.findViewById(R.id.imgView);
imageView.setImageBitmap(b);
progress.dismiss();
updateButtonState();
}
}
@Override
public boolean passReceived(Received r) {
Log.e(TAG, "Received message from Mailbox via MainActivity");
return false;
}
}
<file_sep>/csrc/utilities.c
#include "utilities.h"
char dmID = 0;
//return a character array given an input integer and the size of the array to create
char * IntToCharBuf(unsigned int inputInt, unsigned int numChars) {
char * charBuf = malloc(numChars * sizeof(char));
int i;
if(charBuf) {
for(i = (numChars - 1); i >= 0; i--) {
charBuf[i] = (inputInt >> i*8) & (0xFF);
}
}
return charBuf;
}
|
fb8a5bb3cea91d4ea0a8baebbe7c37887a33f7c0
|
[
"Java",
"C",
"Python",
"Markdown"
] | 40
|
C
|
jordenh/DE2VTT
|
ed47704920cb157c6e4bf9abd116f6d21815766c
|
67c87d82b90ec54274b3a102029c8aa61443ae82
|
refs/heads/master
|
<repo_name>Pooleyo/peakfinder<file_sep>/peakrunner/in_peakrunner.py
root_directory = "/media/ajp560/data/Documents/codes/peakfinder.py/peakrunner"
subdirectory_prefix = "uncompressed_300K_"
subdirectory_suffix = ""
subdirectory_variable_list = ["10x10x10", "20x20x20", "30x30x30", "40x40x40", "50x50x50", "60x60x60", "70x70x70", "80x80x80", "90x90x90", "100x100x100"]
peakfinder_src_location = "/media/ajp560/data/Documents/codes/peakfinder.py/src"
bash_script_filename = "peakrunner.sh"
current_lammps_directory = ""
lammps_prefix = ""
lammps_suffix = ""
lammps_variable_list = []<file_sep>/development/fit_to_peak_centres.py
def run(run_soh, raw_pos_est, pos_est, gsqr_est, compression_ratio, source_name, N_atoms, mass, a_lattice,
k_steps_find_centre_1D, k_steps_find_centre_3D, timestep, soh_command, plot):
import units as un
import copy
print "Fitting to peak centres..."
centre_guess_3DFT = copy.deepcopy(pos_est) # This list will contain the first guesses of peak centres gained from
# the three 1DFTs performed below. It is used as a first guess of the centre for the final 3DFT.
fitted_pos_est = copy.deepcopy(pos_est) # This list contains the final peak centres, determined from the final
# 3DFT. These are used as the input for peak centres when calculating the intensity of a full peak.
compressed_pos_est, compressed_gsqr_est = un.apply_compression_ratio_to_pos_est(pos_est, gsqr_est, compression_ratio)
offset = un.calc_k_offset_with_N_atoms(N_atoms)
for i, pos in enumerate(compressed_pos_est):
k_start = un.find_simple_k_start(pos, offset)
k_stop = un.find_simple_k_stop(pos, offset)
kx_start, kx_stop, ky_start, ky_stop, kz_start, kz_stop = un.calc_lineout_k_start_stop_along_xyz(k_start,
k_stop,
pos)
peak_str = un.make_peak_str(raw_pos_est[i])
input_file_location = un.determine_rough_soh_input_file_location(peak_str, "find_centre_kx_1DFT.in")
un.write_soh_input_1DFT(source_name, input_file_location, "find_centre_kx_" + peak_str, mass, a_lattice, kx_start, kx_stop, k_steps_find_centre_1D)
input_file_location = un.determine_rough_soh_input_file_location(peak_str, "find_centre_ky_1DFT.in")
un.write_soh_input_1DFT(source_name, input_file_location, "find_centre_ky_" + peak_str, mass, a_lattice, ky_start, ky_stop, k_steps_find_centre_1D)
input_file_location = un.determine_rough_soh_input_file_location(peak_str, "find_centre_kz_1DFT.in")
un.write_soh_input_1DFT(source_name, input_file_location, "find_centre_kz_" + peak_str, mass, a_lattice, kz_start, kz_stop, k_steps_find_centre_1D)
if run_soh is True:
for i, pos in enumerate(compressed_pos_est):
peak_str = un.make_peak_str(raw_pos_est[i])
input_file_location = un.determine_rough_soh_input_file_location(peak_str, "find_centre_kx_1DFT.in")
un.run_soh(input_file_location, soh_command)
un.move_soh_rough_output_to_peak_folder(peak_str, "find_centre_kx_" + peak_str, source_name, timestep)
input_file_location = un.determine_rough_soh_input_file_location(peak_str, "find_centre_ky_1DFT.in")
un.run_soh(input_file_location, soh_command)
un.move_soh_rough_output_to_peak_folder(peak_str, "find_centre_ky_" + peak_str, source_name, timestep)
input_file_location = un.determine_rough_soh_input_file_location(peak_str, "find_centre_kz_1DFT.in")
un.run_soh(input_file_location, soh_command)
un.move_soh_rough_output_to_peak_folder(peak_str, "find_centre_kz_" + peak_str, source_name, timestep)
for i, pos in enumerate(compressed_pos_est):
peak_str = un.make_peak_str(raw_pos_est[i])
appended_string = "find_centre_kx_" + peak_str
soh_output_file_location = un.determine_rough_soh_output_file_location(peak_str, source_name, timestep,
appended_string)
soh_output = un.read_from_soh_output(soh_output_file_location)
k_max = un.find_point_of_max_height(soh_output)
kx_centre = k_max[0]
if plot is True:
un.plot_pygnuplot(soh_output[0], soh_output[3], "./data/" + peak_str + "/find_centre_kx.png",
"./data/" + peak_str + "/find_centre_kx.dat")
appended_string = "find_centre_ky_" + peak_str
soh_output_file_location = un.determine_rough_soh_output_file_location(peak_str, source_name, timestep,
appended_string)
soh_output = un.read_from_soh_output(soh_output_file_location)
k_max = un.find_point_of_max_height(soh_output)
ky_centre = k_max[1]
if plot is True:
un.plot_pygnuplot(soh_output[1], soh_output[3], "./data/" + peak_str + "/find_centre_ky.png",
"./data/" + peak_str + "/find_centre_ky.dat")
appended_string = "find_centre_kz_" + peak_str
soh_output_file_location = un.determine_rough_soh_output_file_location(peak_str, source_name, timestep,
appended_string)
soh_output = un.read_from_soh_output(soh_output_file_location)
k_max = un.find_point_of_max_height(soh_output)
kz_centre = k_max[2]
if plot is True:
un.plot_pygnuplot(soh_output[2], soh_output[3], "./data/" + peak_str + "/find_centre_kz.png",
"./data/" + peak_str + "/find_centre_kz.dat")
centre_guess_3DFT[i] = [kx_centre, ky_centre, kz_centre]
# Now that the initial guesses have been determined using the 1DFTs above, a 3DFT is performed to get as close as
# possible to the centre of the peak.
dk = un.calc_dk_from_offset(offset, k_steps_find_centre_1D, k_steps_find_centre_1D, k_steps_find_centre_1D)
for i, pos in enumerate(centre_guess_3DFT):
k_start = un.find_simple_k_start(pos, dk)
k_stop = un.find_simple_k_stop(pos, dk)
peak_str = un.make_peak_str(raw_pos_est[i])
input_file_location = un.determine_rough_soh_input_file_location(peak_str, "find_centre_3DFT.in")
un.write_soh_input_3DFT(source_name, input_file_location, "find_centre_3DFT_" + peak_str, mass, a_lattice,
k_steps_find_centre_3D, k_start, k_stop)
if run_soh is True:
for i, pos in enumerate(centre_guess_3DFT):
peak_str = un.make_peak_str(raw_pos_est[i])
input_file_location = un.determine_rough_soh_input_file_location(peak_str, "find_centre_3DFT.in")
un.run_soh(input_file_location, soh_command)
un.move_soh_rough_output_to_peak_folder(peak_str, "find_centre_3DFT_" + peak_str, source_name, timestep)
for i, pos in enumerate(centre_guess_3DFT):
peak_str = un.make_peak_str(raw_pos_est[i])
appended_string = "find_centre_3DFT_" + peak_str
soh_output_file_location = un.determine_rough_soh_output_file_location(peak_str, source_name, timestep,
appended_string)
soh_output = un.read_from_soh_output(soh_output_file_location)
k_max = un.find_point_of_max_height(soh_output)
fitted_pos_est[i] = k_max
return fitted_pos_est
<file_sep>/development/calc_md_temperature.py
def run(lammps_file_location, user_input_temperature, temperature_dimensionality, atomic_mass, velocity_columns, number_velocity_bins):
import units as un
import numpy as np
print "Calculating temperature from MD velocities..."
md_temperature, velocity_squared = un.calc_MD_temperature(lammps_file_location, user_input_temperature, temperature_dimensionality, atomic_mass, velocity_columns)
speed = np.sqrt(velocity_squared)
histogram = un.bin_values(number_velocity_bins, speed)
populations = list(histogram[0])
total_population = float(sum(populations))
normalised_populations = [0] * len(populations)
for i, pop in enumerate(populations):
normalised_populations[i] = pop / total_population
bins = list(histogram[1])
del(bins[-1])
max_speed = np.max(speed)
boltzmann_probability_list, boltzmann_speed_list = un.calc_maxwell_boltzmann_velocity_distribution(max_speed, number_velocity_bins, normalised_populations)
un.plot_velocity_distribution(boltzmann_probability_list, boltzmann_speed_list, normalised_populations, bins)
print "Temperature is: " + str(md_temperature) + " K"
return md_temperature
<file_sep>/development/path_1_static_peakfinding.py
def run():
import inpkfd as ip
import select_peak_positions
import build_datafile_structure
import use_static_peakfinding_for_3DFT
import calc_peak_intensities
import calc_debye_waller
import write_output_files
import plot_debye_waller
import plot_peaks
import find_compression_ratio
import apply_compression_ratio
import calc_md_temperature
import logging as log
log.info("Path %s started.\n", __name__)
current_md_temperature = ip.temperature
if ip.calc_md_temperature_from_dump_file is True:
current_md_temperature = calc_md_temperature.run(ip.source_name, ip.temperature, ip.calculated_temperature_dimensionality, ip.mass, ip.velocity_columns, ip.number_velocity_bins)
raw_pos_est, raw_gsqr_est = select_peak_positions.run(ip.gsqr_max, ip.negative_k, ip.remove_000, ip.crystal_type)
current_pos_est = raw_pos_est
current_gsqr_est = raw_gsqr_est
peak_str = build_datafile_structure.run(raw_pos_est)
compression_ratio = find_compression_ratio.run(ip.run_soh, ip.uncompressed_peak_positions,
ip.compression_ratio_undershoot,
ip.compression_ratio_overshoot, ip.source_name, ip.mass,
ip.a_lattice,
ip.lineout_k_steps, ip.timestep, ip.soh_command)
compressed_pos_est, compressed_gsqr_est = apply_compression_ratio.run(current_pos_est, current_gsqr_est,
compression_ratio)
current_pos_est = compressed_pos_est
current_gsqr_est = compressed_gsqr_est
use_static_peakfinding_for_3DFT.run(current_pos_est, raw_pos_est, ip.source_name, ip.timestep, ip.mass, ip.a_lattice, ip.N_atoms, ip.k_steps,
ip.run_soh, ip.num_cores)
peak_centre, integrated_intensity = calc_peak_intensities.run(raw_pos_est, ip.source_name, ip.timestep)
debye_temperature, temperature, model_debye_temperatures, gsqr_per_angstrom, ln_intensity, slope = calc_debye_waller.run(
peak_centre, integrated_intensity, ip.a_lattice, ip.mass,
current_md_temperature, ip.uncompressed_debye_temperature,
ip.single_term_model_gamma_0_values,
ip.single_term_model_exponent_values,
ip.triple_term_model_gamma_0_values,
ip.triple_term_model_constants, compression_ratio, ip.polynomial_coeff)
write_output_files.run(debye_temperature, temperature, model_debye_temperatures, raw_pos_est, peak_centre,
gsqr_per_angstrom, integrated_intensity, ln_intensity, slope)
plot_debye_waller.run(gsqr_per_angstrom, ln_intensity, raw_pos_est, current_md_temperature, ip.mass, ip.uncompressed_peak_positions)
if ip.make_final_peak_plots is True:
plot_peaks.run(peak_str, peak_centre, ip.source_name, ip.timestep)
log.info("Path %s finished.\n", __name__)
return
<file_sep>/development/plot_debye_waller.py
def run(gsqr, ln_intensity, raw_pos_est, temperature, mass, plotting_directions):
import units as un
print "Plotting intensity vs. G^2 for all peaks, and in kx, ky, kz..."
filename = "data/ln_intensity_vs_gsqr_per_angstrom.png"
x_label = "$|G^2| (\AA^{-2})$"
y_label = "$ln(I)$ (arb.)"
plot_title = "$ln(I)$ vs. $|G^2|$"
normalised_ln_I = ln_intensity - max(ln_intensity)
fit_slope, fit_constant = un.calc_line_slope_and_constant(gsqr, normalised_ln_I)
un.plot_debye_waller(gsqr, normalised_ln_I, filename, x_label, y_label, plot_title, fit_slope, fit_constant)
direction_plot_filenames = ["data/parallel_x.png", "data/parallel_y.png", "data/parallel_z.png"]
direction_result_filenames = ["data/parallel_x.pkfd", "data/parallel_y.pkfd", "data/parallel_z.pkfd"]
for i, direction in enumerate(plotting_directions):
current_peak_list = []
current_intensity_list = []
for j, pos in enumerate(raw_pos_est):
if un.find_if_vectors_parallel(direction, pos) is True:
current_peak_list.append(gsqr[j])
current_intensity_list.append(ln_intensity[j])
else:
pass
un.plot_matplotlib(current_peak_list, current_intensity_list, direction_plot_filenames[i], x_label, y_label, plot_title)
if len(current_peak_list) and len(current_intensity_list) is not 0:
slope, constant = un.calc_line_slope_and_constant(current_peak_list, current_intensity_list)
debye_waller_constant = un.calc_debye_waller_constant(mass)
debye_temperature_xrd = un.calc_debye_temperature_xrd(temperature, slope, debye_waller_constant)
un.write_directional_temperatures_to_file(debye_temperature_xrd, temperature, direction_result_filenames[i])
else:
print "No peaks found along " + str(direction)
return
<file_sep>/test/test_units.py
def run(unit_name, test_input, expected_result):
import importlib
exec("from units import " + unit_name)
string_test_input = str(test_input)
string_test_input = string_test_input[1:-1]
exec("actual_result = " + str(unit_name) + "(" + string_test_input + ")")
if actual_result == expected_result:
print "Pass\t\t" + "UNIT\t\t" + unit_name
else:
print "####FAIL####\t\t" + "UNIT\t\t" + unit_name
return
<file_sep>/src/plot_debye_waller.py
def run(gsqr, ln_intensity, pos_est, temperature, mass):
import units as un
print "Plotting intensity vs. G^2 for all peaks, and in kx, ky, kz..."
filename = "ln_intensity_vs_gsqr_per_angstrom.png"
x_label = "$G^2$ (A$^-2$)"
y_label = "ln(I) (arb.)"
plot_title = "Intensity vs. G$^2$"
un.plot_matplotlib(gsqr, ln_intensity, filename, x_label, y_label, plot_title)
plotting_directions = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
direction_plot_filenames = ["parallel_x.png", "parallel_y.png", "parallel_z.png"]
direction_result_filenames = ["parallel_x.pkfd", "parallel_y.pkfd", "parallel_z.pkfd"]
for i, direction in enumerate(plotting_directions):
current_peak_list = []
current_intensity_list = []
for j, pos in enumerate(pos_est):
if un.find_if_vectors_parallel(direction, pos) is True:
current_peak_list.append(gsqr[j])
current_intensity_list.append(ln_intensity[j])
else:
pass
un.plot_matplotlib(current_peak_list, current_intensity_list, direction_plot_filenames[i], x_label, y_label, plot_title)
slope, constant = un.calc_line_slope_and_constant(current_peak_list, current_intensity_list)
debye_waller_constant = un.calc_debye_waller_constant(mass)
debye_temperature_xrd = un.calc_debye_temperature_xrd(temperature, slope, debye_waller_constant)
un.write_temperatures_to_file(debye_temperature_xrd, temperature, direction_result_filenames[i])
return
<file_sep>/test/oldddd/peakfinder.py
# This is a new version of peakfinder (started on 11/10/16) that will restructure the old version.
# The old version was becoming difficult to use due to lack of experience coding.
# The idea with this version is to restructure the code with functions.
import module as mod
run_soh = True
make_plots = True
source = "equilibrate_10000.atom"
a_lattice = 3.615 # In Angstroms.
mass = 63.55 # In g/mol.
timestep = 10000 # Only used for file locations.
del_kx = 1/10.0
del_ky = 1/10.0
del_kz = 1/10.0
k_steps = 11
k_steps_accurate = 1e3 + 1
t0, tpy0 = mod.startwatch()
gsqr_est, pos_est = mod.make_fcc(gsqr_max = 30, negative_k = True, remove_000 = True)
rot_pos_est = mod.enforce_rotation_111(pos_est = pos_est)
#source_cut, atom_count = mod.cut_atoms(source = source, xlo = 0.0, xhi = 1.0, ylo = 0.0, yhi = 1.0, zlo = 0.0, zhi = 1.0)
md_temperature_2d, md_temperature_3d = mod.get_md_temperature(source = source, mass = mass, piston_velocity = 0.0)
pos_est_compressed, gsqr_est_compressed, compression_factor = mod.compensate_for_compression(source = source, rotated_to_111 = True, initial_hkl_pos_est = pos_est, run_soh = run_soh, k_steps = 1001, pos_est = rot_pos_est, a_lattice = a_lattice, mass = mass, show_plot = False, timestep = timestep)
mod.get_peak_intensities(source = source, pos_est = pos_est_compressed, initial_hkl_pos_est = pos_est, compression_factor = compression_factor, a_lattice = a_lattice, mass = mass, del_kx = del_kx, del_ky = del_ky, del_kz = del_kz, k_steps = k_steps, k_steps_accurate = k_steps_accurate, run_soh = run_soh, timestep = timestep)
pos_integrated, gsqr_integrated, ln_complex_intensity_integrated, ln_norm_complex_intensity_integrated, ln_simple_intensity_integrated, ln_norm_simple_intensity_integrated = mod.get_ln_intensity(pos_est = pos_est_compressed, initial_hkl_pos_est = pos_est, source = source, miller_pos_est = pos_est, show_plot = False, timestep = timestep, a_lattice = a_lattice, del_kx = del_kx, del_ky = del_ky, del_kz = del_kz, k_steps = k_steps, compression_factor = compression_factor, make_plots = make_plots)
slope_ln_complex_intensity_integrated_vs_gsqr, constant_ln_complex_intensity_vs_gsqr = mod.get_slope_ln_intensity_vs_gsqr(gsqr = gsqr_integrated, ln_intensity = ln_complex_intensity_integrated)
slope_ln_simple_intensity_integrated_vs_gsqr, constant_ln_simple_intensity_vs_gsqr = mod.get_slope_ln_intensity_vs_gsqr(gsqr = gsqr_integrated, ln_intensity = ln_simple_intensity_integrated)
debye_temperature = mod.calc_debye_temperature(slope_ln_intensity_vs_gsqr = slope_ln_complex_intensity_integrated_vs_gsqr, mass = mass, md_temperature = md_temperature_2d)
temperature_est_simple_sum, central_temperature_simple_sum = mod.calc_temperature_xrd(slope_ln_intensity_vs_gsqr = slope_ln_simple_intensity_integrated_vs_gsqr, constant_ln_intensity_vs_gsqr = constant_ln_simple_intensity_vs_gsqr, gruneisen_uncompressed = 1.98, a_lattice = a_lattice, compression_factor = compression_factor, mass = mass, pos = pos_integrated, gsqr = gsqr_integrated, uncompressed_pos_est = pos_est, uncompressed_gsqr_est = gsqr_est, plot_name = "ln_I_vs_Gsqr.png", show_plot = False, ln_intensity = ln_simple_intensity_integrated, md_temperature_3d = md_temperature_3d, md_temperature_2d = md_temperature_2d, debye_temperature_uncompressed = 319.059756455)
temperature_est_complex_integration, central_temperature_complex_integration = mod.calc_temperature_xrd(slope_ln_intensity_vs_gsqr = slope_ln_complex_intensity_integrated_vs_gsqr, constant_ln_intensity_vs_gsqr = constant_ln_complex_intensity_vs_gsqr, gruneisen_uncompressed = 1.98, a_lattice = a_lattice, compression_factor = compression_factor, mass = mass, pos = pos_integrated, gsqr = gsqr_integrated, uncompressed_pos_est = pos_est, uncompressed_gsqr_est = gsqr_est, plot_name = "ln_I_vs_Gsqr.png", show_plot = False, ln_intensity = ln_complex_intensity_integrated, md_temperature_3d = md_temperature_3d, md_temperature_2d = md_temperature_2d, debye_temperature_uncompressed = 319.059756455)
mod.profile_peaks(source = source, timestep = timestep, initial_hkl_pos_est = pos_est, make_plots = make_plots)
mod.checkout(xrd_temperatures = [central_temperature_simple_sum, central_temperature_complex_integration], xrd_temperature_labels = ["Temperature by summing\t", "Temperature by integrating"], md_temperatures = [md_temperature_2d, md_temperature_3d], md_temperature_labels = ["2D","3D"])
mod.stopwatch(t0, tpy0)
<file_sep>/development/use_dynamic_peakfinding_for_3DFT.py
def run(current_pos_est, pos_est, source_name, timestep, mass, a_lattice, k_steps, run_soh, k_start, k_stop, soh_command):
import units as un
import logging as log
log.debug("Brick %s started.\n", __name__)
print "Performing accurate 3DFT of each peak..."
for i, pos in enumerate(current_pos_est):
peak_str = un.make_peak_str(pos_est[i])
input_file_location = un.determine_accurate_soh_input_file_location(peak_str)
un.write_soh_input_3DFT(source_name, input_file_location, peak_str, mass, a_lattice, k_steps, k_start[i], k_stop[i])
if run_soh is True:
for i in pos_est:
peak_str = un.make_peak_str(i)
input_file_location = un.determine_accurate_soh_input_file_location(peak_str)
un.run_soh(input_file_location, soh_command)
un.move_soh_accurate_output_to_peak_folder(peak_str, source_name, timestep)
log.debug("Brick %s finished.\n", __name__)
return
<file_sep>/development/units.py
import logging as log
def get_time():
import time
localtime = time.localtime()
t0 = time.time()
return t0, localtime
def build_all_k_values(gsqr_max, negative_k):
# Builds all possible combinations of integers, up to the k-value given by sqrt(gsqr_max), rounded up. It only includes combinations that give gsqr < gsqr_max. Bool negative_k can be used to include/exclude negative k-values.
import numpy as np
import math
k_max = int(math.ceil(np.sqrt(gsqr_max)))
if negative_k == True:
k_values = range(-k_max, k_max + 1)
elif negative_k == False:
k_values = range(k_max + 1)
else:
print "Incorrect entry:\nnegative_k must be of bool type.\n"
exit()
pos = []
for i in k_values:
for j in k_values:
for k in k_values:
if i**2 + j**2 + k**2 <= gsqr_max:
pos.append([i, j, k])
else:
pass
return pos
def remove_fcc_forbidden_reflections(old_pos):
# Removes pos_est values that are forbidden in fcc crystals. Diffraction is allowed at positions where all the k-values are all-even or all-odd.
new_pos = []
for i in range(len(old_pos)):
if old_pos[i][0] % 2 == 1 and old_pos[i][1] % 2 == 1 and old_pos[i][2] % 2 == 1:
new_pos.append(old_pos[i])
elif old_pos[i][0] % 2 == 0 and old_pos[i][1] % 2 == 0 and old_pos[i][2] % 2 == 0:
new_pos.append(old_pos[i])
else:
pass
return new_pos
def remove_bcc_forbidden_reflections(old_pos):
# Removes pos_est values that are forbidden in bcc crystals. Diffraction is allowed at positions where h + k + l is even.
new_pos = []
for i in range(len(old_pos)):
if (old_pos[i][0] + old_pos[i][1] + old_pos[i][2]) % 2 == 1:
continue
elif (old_pos[i][0] + old_pos[i][1] + old_pos[i][2]) % 2 == 0:
new_pos.append(old_pos[i])
else:
pass
return new_pos
def remove_000(old_pos):
# Removes [0, 0, 0] from pos.
new_pos = []
for i in old_pos:
if i != [0, 0, 0]:
new_pos.append(i)
return new_pos
def get_gsqr_values(pos):
# Calculates the value of G^2 for each position in pos.
gsqr = []
for i in pos:
current_gsqr = (i[0] ** 2) + (i[1] ** 2) + (i[2] ** 2)
gsqr.append(current_gsqr)
return gsqr
def build_datafile_structure(pos):
import os
peak_str = []
for i in pos:
current_peak_str = str(i[0]) + str(i[1]) + str(i[2])
peak_str.append(current_peak_str)
if not os.path.exists("data/" + current_peak_str):
os.makedirs("data/" + current_peak_str)
return peak_str
def make_lineout_directory():
import os
os.makedirs("data/lineouts")
return
def calc_k_offset_with_N_atoms(N_atoms):
offset = [1.0/N_atoms[0], 1.0/N_atoms[1], 1.0/N_atoms[2]]
return offset
def calc_dk_from_offset(offset, kx_steps, ky_steps, kz_steps):
import numpy as np
dk = [2 * offset[0]/(kx_steps - 1), 2 * offset[1]/(ky_steps - 1), 2 * offset[2]/(kz_steps - 1)]
return dk
def convert_to_per_angstrom(element, a_lattice):
import numpy as np
element = np.asarray(element)
converted_element = element * ( (2 * np.pi) / a_lattice )
converted_element = list(converted_element)
return converted_element
def make_peak_str(i):
peak_str = str(i[0]) + str(i[1]) + str(i[2])
return peak_str
def find_simple_k_start(pos_element, offset):
k_start = [pos_element[0] - offset[0], pos_element[1] - offset[1], pos_element[2] - offset[2]]
return k_start
def find_simple_k_stop(pos_element, offset):
k_stop = [pos_element[0] + offset[0], pos_element[1] + offset[1], pos_element[2] + offset[2]]
return k_stop
def calc_lineout_k_start_stop(centre, under_shoot, over_shoot):
k_start = [centre[0] * under_shoot, centre[1] * under_shoot, centre[2] * under_shoot]
k_stop = [centre[0] * over_shoot, centre[1] * over_shoot, centre[2] * over_shoot]
return k_start, k_stop
def calc_lineout_k_start_stop_along_xyz(k_start_3D, k_stop_3D, centre):
# Given two 3D 'k_start' and 'k_stop' points, this function will return three pairs of k_start values required to
# perform a 1DFT lineout through x, y, and z.
kx_start = list([k_start_3D[0], centre[1], centre[2]])
kx_stop = list([k_stop_3D[0], centre[1], centre[2]])
ky_start = list([centre[0], k_start_3D[1], centre[2]])
ky_stop = list([centre[0], k_stop_3D[1], centre[2]])
kz_start = list([centre[0], centre[1], k_start_3D[2]])
kz_stop = list([centre[0], centre[1], k_stop_3D[2]])
return kx_start, kx_stop, ky_start, ky_stop, kz_start, kz_stop
def calc_peak_edge_k_start_stop(predicted_peak_centre, under_shoot, over_shoot):
k_start = [predicted_peak_centre[0] - under_shoot, predicted_peak_centre[1] - under_shoot, predicted_peak_centre[2] - under_shoot]
k_stop = [predicted_peak_centre[0] + over_shoot, predicted_peak_centre[1] + over_shoot, predicted_peak_centre[2] + over_shoot]
return k_start, k_stop
def determine_soh_compression_finding_input_file_location(direction):
import os
cwd = os.getcwd()
input_file_location = cwd + "/data/lineouts/" + direction + "_lineout.in"
return input_file_location
def determine_soh_edge_finding_input_file_location(direction, peak_str):
import os
cwd = os.getcwd()
input_file_location = cwd + "/data/" + peak_str + "/" + direction + "_lineout.in"
return input_file_location
def determine_accurate_soh_input_file_location(peak_str):
import os
cwd = os.getcwd()
input_file_location = cwd + "/data/" + peak_str + "/" + peak_str + ".in"
return input_file_location
def determine_rough_soh_input_file_location(peak_str, filename):
import os
cwd = os.getcwd()
input_file_location = cwd + "/data/" + peak_str + "/" + filename
return input_file_location
def determine_rough_lineout_soh_input_file_location(peak_str):
import os
cwd = os.getcwd()
input_file_location = cwd + "/data/" + peak_str + "/find_edge_" + peak_str + ".in"
return input_file_location
def write_soh_input_1DFT(source_name, file_destination, appended_string, mass, a_lattice, k_start, k_stop, k_steps):
import os
cwd = os.getcwd()
source_location = cwd + "/lammps/" + source_name
string_to_write = ("VERBOSE 0"
+ "\nFILE_TYPE lammps-multi"
+ "\nDATA_FILE " + str(source_location)
+ "\nAPPEND_FILE_NAME " + str(appended_string)
+ "\nPLOT_OUTPUT pdf"
+ "\nCOORDS_SCALED"
+ "\nSET_MASS " + str(mass)
+ "\nSET_A_CELL " + str(a_lattice)
+ "\nCALC_1D_FT"
+ "\nSET_K_START " + str(k_start[0]) + " " + str(k_start[1]) + " " + str(k_start[2]) + " "
+ "\nSET_K_STOP " + str(k_stop[0]) + " " + str(k_stop[1]) + " " + str(k_stop[2]) + " "
+ "\nSET_NK " + str(k_steps)
+ "\n"
)
f = open(file_destination, "w")
f.write(string_to_write)
f.close()
return string_to_write
def write_soh_input_3DFT(source_name, file_destination, appended_string, mass, a_lattice, k_steps, k_start, k_stop):
import os
cwd = os.getcwd()
source_location = cwd + "/lammps/" + source_name
string_to_write = ("VERBOSE 0"
+ "\nFILE_TYPE lammps-multi"
+ "\nDATA_FILE " + str(source_location)
+ "\nAPPEND_FILE_NAME " + str(appended_string)
+ "\nPLOT_OUTPUT pdf"
+ "\nCOORDS_SCALED"
+ "\nSET_MASS " + str(mass)
+ "\nSET_A_CELL " + str(a_lattice)
+ "\nCALC_3D_FT"
+ "\nSET_KX " + str(k_start[0]) + " " + str(k_stop[0]) + " " + str(k_steps)
+ "\nSET_KY " + str(k_start[1]) + " " + str(k_stop[1]) + " " + str(k_steps)
+ "\nSET_KZ " + str(k_start[2]) + " " + str(k_stop[2]) + " " + str(k_steps)
+ "\n"
)
f = open(file_destination, "w")
f.write(string_to_write)
f.close()
return string_to_write
def run_soh(input_file_location, soh_command):
import subprocess
shell_command = soh_command + input_file_location + " >/dev/null"
subprocess.call(shell_command, shell=True)
return
def move_soh_accurate_output_to_peak_folder(peak_str, source_name, timestep):
import shutil
origin = "./lammps/" + source_name + "." + timestep + "." + peak_str + ".ft"
destination = "./data/" + peak_str + "/"
shutil.move(origin, destination)
return
def move_soh_rough_output_to_peak_folder(peak_str, appended_string, source_name, timestep):
import shutil
origin = "./lammps/" + source_name + "." + timestep + "." + appended_string + ".ft"
destination = "./data/" + peak_str + "/"
shutil.move(origin, destination)
return
def move_soh_output_to_lineout_folder(lineout_str, source_name, timestep):
import shutil
origin = "./lammps/" + source_name + "." + timestep + ".lineout_" + lineout_str + ".ft"
destination = "./data/lineouts/"
shutil.move(origin, destination)
return
def move_plot_output_to_peak_folder(direction, peak_str):
import shutil
origin = direction + ".png"
destination = "./data/" + peak_str + "/"
shutil.move(origin, destination)
return
def determine_accurate_soh_output_file_location(peak_str, source_name, timestep):
import os
cwd = os.getcwd()
output_file_location = cwd + "/data/" + peak_str + "/" + source_name + "." + timestep + "." + peak_str + ".ft"
return output_file_location
def determine_rough_soh_output_file_location(peak_str, source_name, timestep, appended_string):
import os
cwd = os.getcwd()
output_file_location = cwd + "/data/" + peak_str + "/" + source_name + "." + timestep + "." + appended_string + ".ft"
return output_file_location
def determine_soh_1DFT_output_file_location(direction_str, source_name, timestep):
import os
cwd = os.getcwd()
output_file_location = cwd + "/data/lineouts/" + source_name + "." + timestep + ".lineout_" + direction_str + ".ft"
return output_file_location
def determine_soh_edge_finding_output_file_location(peak_str, direction_str, source_name, timestep):
import os
cwd = os.getcwd()
output_file_location = cwd + "/data/" + peak_str + "/" + source_name + "." + timestep + "." + peak_str + "_find_edges_" + direction_str + ".ft"
return output_file_location
def determine_edge_finding_soh_output_file_location(direction_str, source_name, timestep):
import os
cwd = os.getcwd()
output_file_location = cwd + "/data/lineouts/" + source_name + "." + timestep + ".lineout_" + direction_str + ".ft"
return output_file_location
def read_from_soh_output(filename):
import pandas as pd
data = pd.read_csv(filename, delimiter=" ")
kx = data.T.iloc[0].values.tolist() # This list is generated by Transposing the pandas dataFrame object (labelled
# "data"), using iloc to select a column by index, turning it into a numpy array, and then turning the array into a
# list.
ky = data.T.iloc[1].values.tolist()
kz = data.T.iloc[2].values.tolist()
intensity = data.T.iloc[5].values.tolist()
soh_output = [kx, ky, kz, intensity]
return soh_output
def find_point_of_max_height(soh_output):
import numpy as np
max_height_index = np.argmax(soh_output[3])
point_of_max_height = [soh_output[0][max_height_index], soh_output[1][max_height_index], soh_output[2][max_height_index]]
return point_of_max_height
def calc_dvol(soh_output):
k_step = len(soh_output[0]) ** (1.0/3.0)
dkx = ( max(soh_output[0]) - min(soh_output[0]) ) / (k_step - 1)
dky = ( max(soh_output[1]) - min(soh_output[1]) ) / (k_step - 1)
dkz = ( max(soh_output[2]) - min(soh_output[2]) ) / (k_step - 1)
dvol = dkx * dky * dkz
return dvol
def calc_integrated_intensity(soh_output, dvol):
intensity_sum = sum(soh_output[3])
integrated_intensity = dvol * intensity_sum
return integrated_intensity
def get_ln_intensity(intensity):
import numpy as np
ln_intensity = np.log(intensity)
return ln_intensity
def calc_line_slope_and_constant(x, y):
import numpy as np
slope, constant = np.polyfit(x, y, 1, cov=False)
return slope, constant
def calc_debye_waller_constant(m):
from scipy.constants import h, N_A, k, pi
debye_waller_constant = (10 ** 20) * 3 * (h ** 2) * N_A / (4 * (pi ** 2) * m * (10 ** -3) * k)
return debye_waller_constant
def calc_debye_temperature_xrd(temperature, slope, debye_waller_constant):
import numpy as np
debye_temperature = np.sqrt(debye_waller_constant * temperature * abs( 1.0 / slope ))
return debye_temperature
def calc_debye_temperature_from_single_term_gruneisen_model(debye_temperautre_300K_uncompressed, initial_volume, final_volume, gamma_uncompressed, exponent):
import numpy as np
# See <NAME> PHYSICAL REVIEW B 78, 014109 (2008) for the source of this equation.
exponent_term = - (gamma_uncompressed / (exponent * (initial_volume ** exponent))) * ((final_volume ** exponent) - (initial_volume ** exponent))
correction_factor = np.exp(exponent_term)
model_debye_temperature = debye_temperautre_300K_uncompressed * correction_factor
return model_debye_temperature
def calc_debye_temperature_from_triple_term_gruneisen_model(debye_temperature_300K_uncompressed, initial_volume, final_volume, gamma_uncompressed, constants):
import numpy as np
# See <NAME>phy PHYSICAL REVIEW B 78, 014109 (2008) for the source of this equation.
constant_term_1 = gamma_uncompressed - constants[0] + constants[1] - constants[2]
volume_term_1 = np.log(final_volume) - np.log(initial_volume)
constant_term_2 = - constants[0] + 2 * constants[1] - 3 * constants[2]
volume_term_2 = initial_volume * ((1 / final_volume) - (1 / initial_volume))
constant_term_3 = - (constants[1] / 2.0) + (3 * constants[2] / 2.0)
volume_term_3 = (initial_volume ** 2) * ((1 / (final_volume ** 2)) - (1 / (initial_volume ** 2)))
constant_term_4 = - constants[2] / 3.0
volume_term_4 = (initial_volume ** 3) * ((1 / (final_volume ** 3)) - (1 / (initial_volume ** 3)))
exponent_term = (constant_term_1 * volume_term_1) + (constant_term_2 * volume_term_2) + (constant_term_3 * volume_term_3) + (constant_term_4 * volume_term_4)
correction_factor = np.exp(- exponent_term)
model_debye_temperature = debye_temperature_300K_uncompressed * correction_factor
return model_debye_temperature
def calc_volume_lattice_units(a_lattice, compression_factors):
volume = (a_lattice ** 3) * (compression_factors[0] * compression_factors[1] * compression_factors[2])
return volume
def calc_temperature_xrd(debye_temperature, slope, debye_waller_constant):
temperature = (debye_temperature ** 2) * abs(slope) * (1.0 / debye_waller_constant)
return temperature
def plot_matplotlib(x, y, filename, x_label, y_label, plot_title):
import matplotlib.pyplot as plt
plt.scatter(x, y)
plt.rcParams.update({'font.size': 17})
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(plot_title)
plt.tight_layout()
plt.savefig(filename)
plt.close()
return
def plot_debye_waller(x, y, filename, x_label, y_label, plot_title, slope, constant):
import matplotlib.pyplot as plt
line_x = [0.0, max(x)]
line_y = [constant, constant + slope*max(x)]
plt.scatter(x, y, label='peak intensities')
plt.plot(line_x, line_y, label='line fit')
plt.rcParams.update({'font.size': 16})
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(plot_title)
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
return
def plot_pyqtgraph(x, y, filename):
import pyqtgraph as pg
import pyqtgraph.exporters
class MyPlotClass():
def __init__(self):
self.windowplt = pg.plot()
self.windowplt.win.hide()
def savePlot(self, x, y, filename):
self.windowplt.plot(x, y)
exporter = pg.exporters.ImageExporter(self.windowplt.plotItem)
exporter.params.param('width').setValue(256, blockSignal=exporter.widthChanged)
exporter.params.param('height').setValue(256, blockSignal=exporter.heightChanged)
exporter.export(filename)
save_plot = MyPlotClass()
save_plot.savePlot(x, y, filename)
return
def plot_pygnuplot(x, y, filename, data_filename):
import PyGnuplot as gnu
gnu.s([x,y], data_filename)
gnu.c('set terminal pngcairo size 350,262 enhanced font "Verdana,10"')
gnu.c('set output "' + filename + '"')
gnu.c('plot "' + data_filename + '" pt 1 ps 0.5')
return
def find_line_data_from_3DFT(constant_axes, variable_axis, centre_point, soh_output):
constant_value_0 = centre_point[constant_axes[0]]
constant_value_1 = centre_point[constant_axes[1]]
line_points = []
line_intensity = []
for i, intensity in enumerate(soh_output[3]):
if soh_output[constant_axes[0]][i] == constant_value_0 and soh_output[constant_axes[1]][i] == constant_value_1:
line_k = soh_output[variable_axis][i]
line_points.append(line_k)
line_intensity.append(intensity)
return line_points, line_intensity
def write_temperatures_to_file(slope, debye_temperature, temperature, model_debye_temperatures, filename_temperatures):
f = open(filename_temperatures, "w")
f.write(
"Slope of ln(I) vs. G^2\t\t" + str(slope) + "\n"
"XRD Debye temperature (using slope of ln(I) vs. G^2 and temperature calculated from MD)\t\t" + str(debye_temperature) + "\n"
"Debye temperature as modelled, using calculated compression\t\t" + str(model_debye_temperatures) + "\n"
"Temperature (using slope of ln(I) vs. G^2 and models of Debye temperature)\t\t" + str(temperature)
)
f.close()
return
def write_directional_temperatures_to_file(debye_temperature, temperature, filename_temperatures):
f = open(filename_temperatures, "w")
f.write(
"XRD Debye temperature (using slope of ln(I) vs. G^2 and temperature calculated from MD)\t\t\t\t" + str(debye_temperature) + "\n"
"Temperature from MD\t\t\t\t\t\t" + str(temperature)
)
f.close()
return
def write_peak_intensities_to_file(pos_est, peak_centre, gsqr, integrated_intensity, ln_intensity, filename):
header_string = "ln(I) G^2 peak peak_centre integrated_intensity\n"
f = open(filename, "w")
f.write(header_string)
for i, pos in enumerate(pos_est):
f.write("%s %s %s %s %s\n" % (ln_intensity[i], gsqr[i], pos, peak_centre[i], integrated_intensity[i]))
f.close()
return
def find_if_vectors_parallel(v_1, v_2):
import numpy as np
import math
length_1 = np.linalg.norm(v_1)
length_2 = np.linalg.norm(v_2)
if length_1 < 0.001:
result = False
elif length_2 < 0.001:
result = False
else:
normalised_1 = v_1 / length_1
normalised_2 = v_2 / length_2
dot_prod = np.dot(normalised_1, normalised_2)
if math.isnan(dot_prod) is True:
result = False
elif int(dot_prod) is 1:
result = True
else:
result = False
return result
def calc_compression_ratio(compressed_k, uncompressed_k):
compression_ratio = compressed_k/uncompressed_k
return compression_ratio
def apply_compression_ratio_to_pos_est(pos_est, gsqr_est, compression_ratio):
import copy
compressed_pos_est = copy.deepcopy(pos_est)
compressed_gsqr_est = copy.deepcopy(gsqr_est)
for i, pos in enumerate(compressed_pos_est):
for j, compression in enumerate(compression_ratio):
pos[j] = pos[j] * compression
compressed_gsqr_est[i] = (pos[0] ** 2) + (pos[1] ** 2) + (pos[2] ** 2)
return compressed_pos_est, compressed_gsqr_est
def find_k_start_stop_for_peak_from_first_minima(k_data, intensity):
centre_index = len(k_data)/2
for i, k in enumerate(k_data):
if centre_index - i == 0:
print "Couldn't find intensity minimum for k_start."
exit()
intensity_diff = intensity[centre_index - i] - intensity[centre_index - i - 1]
if intensity_diff >= 0.0:
continue
elif intensity_diff < 0.0:
k_start = k_data[centre_index - i]
break
for i, k in enumerate(k_data):
if centre_index + i == (len(k_data) - 1):
print "Couldn't find intensity minimum for k_stop."
exit()
intensity_diff = intensity[centre_index + i] - intensity[centre_index + i + 1]
if intensity_diff >= 0.0:
continue
elif intensity_diff <= 0.0:
k_stop = k_data[centre_index + i]
break
return k_start, k_stop
def calc_overstepped_k_start_k_stop(pos, undershoot, overshoot):
k_start = [0.0, 0.0, 0.0]
k_stop = [0.0, 0.0, 0.0]
for i, k in enumerate(pos):
k_start[i] = k - undershoot[i]
k_stop[i] = k + overshoot[i]
return k_start, k_stop
def calc_MD_temperature(lammps_file_location, user_input_temperature, temperature_dimensionality, atomic_mass, velocity_columns):
import numpy as np
from scipy.constants import k
try:
vx, vy, vz = np.loadtxt("lammps/" + lammps_file_location, skiprows=9, usecols=(velocity_columns[0], velocity_columns[1], velocity_columns[2]), unpack=True)
number_of_atoms = len(vx)
velocity_squared = [0] * number_of_atoms
if temperature_dimensionality is 2:
for i in range(len(vx)):
velocity_squared[i] = (vx[i] ** 2) + (vy[i] ** 2)
elif temperature_dimensionality is 3:
for i in range(len(vx)):
velocity_squared[i] = (vx[i] ** 2) + (vy[i] ** 2) + (vz[i] ** 2)
velocity_squared_sum = sum(velocity_squared)
MD_temperature = (1.660539040e-27 * 10000 * atomic_mass * velocity_squared_sum) / (temperature_dimensionality * number_of_atoms * k) # The number 1.66054e-27 is to convert the atomic mass from amu to kg. The factor of 100 is to conver the velocities from Angstrom/ps to m/s.
except:
print "######### WARNING: calc_MD_temperature: Could not load values for velocity.\n\tThis could be due to " \
+ "the LAMMPS file not having enough columns. Check the LAMMPS file has the velocities set to column " \
+ str(velocity_columns[0]) + ", " + str(velocity_columns[1]) + ", and " \
+ str(velocity_columns[2]) + " (where 0 corresponds to the first column).\n\tThe user defined temperature" \
+ " will be used instead: " + str(user_input_temperature) + " K"
MD_temperature = user_input_temperature
velocity_squared = [0]
return MD_temperature, velocity_squared
def bin_values(number_of_bins, list_to_bin):
import numpy as np
histogram = np.histogram(list_to_bin, number_of_bins)
return histogram
def calc_maxwell_boltzmann_velocity_distribution(max_speed, number_of_speeds_to_calculate, frequency):
import numpy as np
from scipy.optimize import curve_fit
def maxwell_boltzmann_probability_distribution_function(x, a):
p = np.sqrt(2/np.pi) * (x ** 2) * np.exp((-x ** 2)/(2 * a ** 2)) * (1.0/a ** 3)
return p
input_speed_list = np.linspace(0.0, max_speed, number_of_speeds_to_calculate)
p0 = curve_fit(maxwell_boltzmann_probability_distribution_function, input_speed_list, frequency)
p_mb = []
longer_speed_list = np.linspace(0.0, max_speed, 100)
for s in longer_speed_list:
p_mb.append(maxwell_boltzmann_probability_distribution_function(s, p0[0]))
return p_mb, longer_speed_list
def plot_velocity_distribution(maxwell_boltzmann_probabilities, maxwell_boltzmann_speeds, md_populations, md_speeds):
import matplotlib.pyplot as plt
plt.plot(maxwell_boltzmann_speeds, maxwell_boltzmann_probabilities)
plt.scatter(md_speeds, md_populations)
plt.rcParams.update({'font.size': 17})
plt.title('MD Speed Distribution')
plt.xlabel('Speed ($\AA$ / ps)')
plt.ylabel('Frequency')
plt.tight_layout()
plt.savefig('speed_distribution_md_vs_boltzmann.png')
plt.close()
return
def plot_histogram(histogram, filename, x_label, y_label, plot_title):
import matplotlib.pyplot as plt
plt.hist(histogram)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(plot_title)
plt.savefig(filename)
plt.close()
return
def create_rotation_matrix_for_111_rotation():
import numpy as np
theta_x = np.pi / 2.0 - np.arctan(
1 / np.sqrt(2)) # Calculated by looking at a 111 vector which has been rotated by 45 degrees around the z-axis.
theta_z = np.pi / 4.0 # Rotate around z-axis by 45 degrees.
rot_x = np.array([[1, 0, 0], [0, np.cos(theta_x), -np.sin(theta_x)], [0, np.sin(theta_x), np.cos(
theta_x)]]) # creates the array which rotates the positions around the x-axis i.e. rotation matrix
rot_z = np.array([[np.cos(theta_z), -np.sin(theta_z), 0], [np.sin(theta_z), np.cos(theta_z), 0], [0, 0,1]])
# same as above, this time around z.
# Note that we won't create a y-rotation matrix since it isn't needed in this instance of 111.
return rot_x, rot_z
def rotate_pos_est_using_rotation_matrices(pos_est, rot_x, rot_z):
import numpy as np
rot_pos_est = [0] * len(pos_est)
pos_est = np.asarray(pos_est) # Converts pos_est to an array so we can multiply it by our rotational matrices.
for i in range(len(pos_est)): # Loops over all peaks to populate the pos_est list with rotated peak positions.
new = np.dot(rot_z, pos_est[i]) # First matrix multiply the z-rotation with the original position estimate to
# get "new", an intermediate variable.
new_2 = np.dot(rot_x, new) # Then matrix multiply the x-rotation with "new" to get the array version of the
# rotated peak.
rot_pos_est[i] = list(new_2) # Convert this to a list (for compatability with the rest of the code).
return rot_pos_est
def triangulate_peak_centre_octant(soh_output):
kx, ky, kz, intensity = soh_output[0], soh_output[1], soh_output[2], soh_output[5]
return
def calc_debye_temp_from_MD_model(coeff, volume_ratio):
if len(coeff) == 4:
debye_temperature = (coeff[0] * (volume_ratio ** 3)) + (coeff[1] * (volume_ratio ** 2)) \
+ (coeff[2] * (volume_ratio)) + coeff[3]
elif len(coeff) == 6:
debye_temperature = (coeff[0] * (volume_ratio ** 5)) + (coeff[1] * (volume_ratio ** 4)) \
+ (coeff[2] * (volume_ratio ** 3)) + (coeff[3] * (volume_ratio ** 2)) \
+ (coeff[4] * (volume_ratio)) + coeff[5]
else:
print "############### MD Debye Model did not return a value; returning a value of 1.0 ###################"
debye_temperature = 1.0
return debye_temperature
<file_sep>/development/finalise.py
def run():
import logging as log
import shutil
shutil.move("log.pkfd", "data/log.pkfd")
shutil.move("speed_distribution_md_vs_boltzmann.png", "data/speed_distribution_md_vs_boltzmann.png")
log.info("Peakfinder finalised.")
print "Finished!"
return
<file_sep>/src/path_2_dynamic_peakfinding.py
def run():
import inpkfd as ip
import select_peak_positions
import build_datafile_structure
import use_soh_for_3DFT
import calc_peak_intensities
import calc_debye_waller
import write_output_files
import plot_debye_waller
import plot_peaks
import logging as log
log.info("Path %s started.\n", __name__)
pos_est, gsqr_est = select_peak_positions.run(ip.gsqr_max, ip.negative_k, ip.remove_000)
peak_str = build_datafile_structure.run(pos_est)
use_soh_for_3DFT.run(pos_est, ip.source_name, ip.timestep, ip.mass, ip.a_lattice, ip.N_atoms, ip.k_steps,
ip.run_soh, ip.num_cores)
peak_centre, integrated_intensity = calc_peak_intensities.run(pos_est, ip.source_name, ip.timestep)
debye_temperature, temperature, gsqr_per_angstrom, ln_intensity = calc_debye_waller.run(
peak_centre, integrated_intensity, ip.a_lattice, ip.mass,
ip.temperature, ip.uncompressed_debye_temperature,
ip.single_term_model_gamma_0_values,
ip.single_term_model_exponent_values,
ip.triple_term_model_gamma_0_values,
ip.triple_term_model_constants)
write_output_files.run(debye_temperature, temperature, pos_est, peak_centre, gsqr_per_angstrom, integrated_intensity, ln_intensity)
plot_debye_waller.run(gsqr_per_angstrom, ln_intensity, pos_est, ip.temperature, ip.mass)
if ip.make_final_peak_plots is True:
plot_peaks.run(peak_str, peak_centre, ip.source_name, ip.timestep)
log.info("Path %s finished.\n", __name__)
return
<file_sep>/development/write_output_files.py
def run(debye_temperature, temperature, model_debye_temperatures, pos_est, peak_centre, gsqr, integrated_intensity, ln_intensity, slope):
import units as un
filename_temperatures = "data/results.pkfd"
un.write_temperatures_to_file(slope, debye_temperature, temperature, model_debye_temperatures, filename_temperatures)
filename_peaks = "data/integrated_intensity.dat"
un.write_peak_intensities_to_file(pos_est, peak_centre, gsqr, integrated_intensity, ln_intensity, filename_peaks)
return<file_sep>/peakrunner/rewrite_inpkfd.py
def run(copy_location_list):
import importlib
for copy_location in copy_location_list:
inpkfd_location = copy_location + "/inpkfd"
inpkfd = importlib.import_module(inpkfd_location)
print inpkfd
return
<file_sep>/test/use_soh_for_3DFT.py
def run(pos_est, source_location, mass, a_lattice, N_atoms, k_steps, run_soh):
import units as un
import logging as log
log.debug("Brick %s started.\n", __name__)
offset = un.calc_k_offset_with_N_atoms(N_atoms)
for i in pos_est:
peak_str = un.make_peak_str(i)
input_file_location = un.determine_accurate_soh_input_file_location(peak_str)
k_start = un.find_simple_k_start(i, offset)
k_stop = un.find_simple_k_stop(i, offset)
k_start = un.convert_to_per_angstrom(k_start, a_lattice)
k_stop = un.convert_to_per_angstrom(k_stop, a_lattice)
un.write_soh_input_3DFT(source_location, input_file_location, peak_str, mass, a_lattice, k_steps, k_start, k_stop)
if run_soh == True:
for i in pos_est:
input_file_location = un.determine_accurate_soh_input_file_location(peak_str)
un.run_soh(input_file_location)
log.debug("Brick %s finished.\n", __name__)
return
<file_sep>/test/peakfinder.py
import inpkfd
import importlib
import initialise
import finalise
path = importlib.import_module(inpkfd.path) # Imports the path specified in inpkfd.
initialise.run()
path.run()
finalise.run()
<file_sep>/development/calc_debye_waller.py
def run(peak_centre, intensity, a_lattice, mass, temperature, uncompressed_debye_temperature, single_term_model_gamma_0_values, single_term_model_q_values, triple_term_model_gamma_0_values, triple_term_constant_values, compression_ratio, MD_debye_temperature_coefficients):
import units as un
import logging as log
log.debug("Brick %s started.\n", __name__)
print "Calculating Debye-Waller effect..."
ln_intensity = []
for i in intensity:
current_ln_intensity = un.get_ln_intensity(i)
ln_intensity.append(current_ln_intensity)
peak_centre_per_angstrom = []
for i in peak_centre:
g_per_angstrom = un.convert_to_per_angstrom(i, a_lattice)
peak_centre_per_angstrom.append(g_per_angstrom)
gsqr_per_angstrom = un.get_gsqr_values(peak_centre_per_angstrom)
slope, constant = un.calc_line_slope_and_constant(gsqr_per_angstrom, ln_intensity)
debye_waller_constant = un.calc_debye_waller_constant(mass)
debye_temperature_xrd = un.calc_debye_temperature_xrd(temperature, slope, debye_waller_constant)
initial_volume = un.calc_volume_lattice_units(a_lattice, [1.0, 1.0, 1.0])
compression_factors = [1.0/compression_ratio[0], 1.0/compression_ratio[1], 1.0/compression_ratio[2]]
final_volume = un.calc_volume_lattice_units(a_lattice, compression_factors)
volume_ratio = final_volume / initial_volume
model_debye_temperatures = []
MD_debye_temperature = un.calc_debye_temp_from_MD_model(MD_debye_temperature_coefficients, volume_ratio)
model_debye_temperatures.append(MD_debye_temperature)
for i in range(len(single_term_model_gamma_0_values)):
debye_temperature = un.calc_debye_temperature_from_single_term_gruneisen_model( \
uncompressed_debye_temperature, initial_volume, final_volume, single_term_model_gamma_0_values[i],
single_term_model_q_values[i])
model_debye_temperatures.append(debye_temperature)
for i in range(len(triple_term_model_gamma_0_values)):
debye_temperature = un.calc_debye_temperature_from_triple_term_gruneisen_model( \
uncompressed_debye_temperature, initial_volume, final_volume, triple_term_model_gamma_0_values[i],
triple_term_constant_values[i])
model_debye_temperatures.append(debye_temperature)
temperature_xrd = []
for theta in model_debye_temperatures:
current_temperature_xrd = un.calc_temperature_xrd(theta, slope, debye_waller_constant)
temperature_xrd.append(current_temperature_xrd)
log.debug("Brick %s finished.\n", __name__)
return debye_temperature_xrd, temperature_xrd, model_debye_temperatures, gsqr_per_angstrom, ln_intensity, slope
<file_sep>/development/apply_compression_ratio.py
def run(pos_est, gsqr_est, compression_ratio):
import units as un
compressed_pos_est, compressed_gsqr_est = un.apply_compression_ratio_to_pos_est(pos_est, gsqr_est,
compression_ratio)
return compressed_pos_est, compressed_gsqr_est<file_sep>/test/finalise.py
def run():
import units as un
import logging as log
log.info("Peakfinder finalised.")
return
<file_sep>/development/path_2_dynamic_peakfinding.py
def run():
import inpkfd as ip
import select_peak_positions
import build_datafile_structure
import use_dynamic_peakfinding_for_3DFT
import calc_peak_intensities
import calc_debye_waller
import write_output_files
import plot_debye_waller
import plot_peaks
import find_compression_ratio
import fit_to_peak_centres
import overstep_peak_edges
import calc_md_temperature
import logging as log
log.info("Path %s started.\n", __name__)
current_md_temperature = ip.temperature
if ip.calc_md_temperature_from_dump_file is True:
current_md_temperature = calc_md_temperature.run(ip.source_name, ip.temperature, ip.calculated_temperature_dimensionality, ip.mass, ip.velocity_columns, ip.number_velocity_bins)
raw_pos_est, raw_gsqr_est = select_peak_positions.run(ip.gsqr_max, ip.negative_k, ip.remove_000, ip.crystal_type)
current_pos_est = raw_pos_est
current_gsqr_est = raw_gsqr_est
peak_str = build_datafile_structure.run(current_pos_est)
compression_ratio = find_compression_ratio.run(ip.run_soh, ip.uncompressed_peak_positions, ip.compression_ratio_undershoot,
ip.compression_ratio_overshoot, ip.source_name, ip.mass, ip.a_lattice,
ip.lineout_k_steps, ip.timestep, ip.soh_command)
fitted_pos_est = fit_to_peak_centres.run(ip.run_soh, raw_pos_est, current_pos_est, current_gsqr_est,
compression_ratio, ip.source_name, ip.N_atoms,
ip.mass, ip.a_lattice, ip.k_steps_find_centre_1DFT,
ip.k_steps_find_centre_3DFT, ip.timestep, ip.soh_command,
ip.make_plots_peak_centre_fit)
current_pos_est = fitted_pos_est
k_start_accurate, k_stop_accurate = overstep_peak_edges.run(current_pos_est, ip.peak_edge_undershoot, ip.peak_edge_overshoot)
use_dynamic_peakfinding_for_3DFT.run(current_pos_est, raw_pos_est, ip.source_name, ip.timestep, ip.mass, ip.a_lattice,
ip.k_steps, ip.run_soh, k_start_accurate, k_stop_accurate, ip.soh_command)
peak_centre, integrated_intensity = calc_peak_intensities.run(raw_pos_est, ip.source_name, ip.timestep)
debye_temperature, xrd_temperature, model_debye_temperatures, gsqr_per_angstrom, ln_intensity, slope = calc_debye_waller.run(
peak_centre, integrated_intensity, ip.a_lattice, ip.mass,
current_md_temperature, ip.uncompressed_debye_temperature,
ip.single_term_model_gamma_0_values,
ip.single_term_model_exponent_values,
ip.triple_term_model_gamma_0_values,
ip.triple_term_model_constants, compression_ratio, ip.polynomial_coeff)
write_output_files.run(debye_temperature, xrd_temperature, model_debye_temperatures, current_pos_est, peak_centre,
gsqr_per_angstrom, integrated_intensity, ln_intensity, slope)
plot_debye_waller.run(gsqr_per_angstrom, ln_intensity, raw_pos_est, current_md_temperature, ip.mass, ip.uncompressed_peak_positions)
if ip.make_final_peak_plots is True:
plot_peaks.run(peak_str, peak_centre, ip.source_name, ip.timestep)
log.info("Path %s finished.\n", __name__)
return
<file_sep>/test/oldddd/testpeakfinder.py
import testmodule
# Unit tests.
testmodule.TestLoadData()
testmodule.TestFindPeakCentre() # Must come after TestLoadData.
testmodule.TestFindOrthogonalLineout() # Must come after TestLoadData.
testmodule.TestFindIntensityMinima1D()
testmodule.TestBuildIntensityVolume()
testmodule.TestGetCompressedGruneisenParameterModel1()
testmodule.TestGetDebyeTemperatureFromGruneisenParameter()
<file_sep>/test/path_1_stat.py
def run():
import select_peak_positions
import build_datafile_structure
import use_soh_for_3DFT
import inpkfd as ip
import logging as log
log.info("Path %s started.\n", __name__)
pos_est, gsqr_est = select_peak_positions.run(ip.gsqr_max, ip.negative_k, ip.remove_000)
build_datafile_structure.run(pos_est)
use_soh_for_3DFT.run(pos_est, ip.source_location, ip.mass, ip.a_lattice, ip.N_atoms, ip.k_steps, ip.run_soh)
#calc_peak_intensities.run()
#calc_debye_waller.run()
#plot.run()
log.info("Path %s finished.\n", __name__)
return
<file_sep>/development/plot_peaks.py
def run(peak_str, peak_centre, source_name, timestep):
import units as un
import numpy as np
print "Plotting each peak in kx, ky, kz, and |G^2|..."
directions = ["kx", "ky", "kz"]
constant_axes = [[1,2], [0,2], [0,1]]
variable_axes = [0, 1, 2]
for i, current_peak_str in enumerate(peak_str):
data_filename = un.determine_accurate_soh_output_file_location(current_peak_str, source_name, timestep)
soh_output = un.read_from_soh_output(data_filename)
centre_point = peak_centre[i]
# This for loop plots the peak through kx, ky, and kz.
for j, direction in enumerate(directions):
k, intensity = un.find_line_data_from_3DFT(constant_axes[j], variable_axes[j], centre_point, soh_output)
plot_data_filename = "./data/" + current_peak_str + "/" + direction + ".dat"
plot_filename = "./data/" + current_peak_str + "/" + direction + ".png"
un.plot_pygnuplot(k, intensity, plot_filename, plot_data_filename)
# This section plots every point in the 3DFT as intensity vs. G^2.
gsqr = list((np.array(soh_output[0])**2) + (np.array(soh_output[1])**2) + (np.array(soh_output[2])**2))
intensity = soh_output[3]
plot_data_filename = "./data/" + current_peak_str + "/I_vs_gsqr.dat"
plot_filename = "./data/" + current_peak_str + "/I_vs_gsqr_" + current_peak_str + ".png"
un.plot_pygnuplot(gsqr, intensity, plot_filename, plot_data_filename)
return
<file_sep>/src/finalise.py
def run():
import logging as log
log.info("Peakfinder finalised.")
print "Finished!"
return
<file_sep>/peakrunner/write_bash_script.py
def run(bash_script_filename, peakfinder_directory_list):
from subprocess import call
f = open(bash_script_filename, 'w')
f.write("#!/bin/bash\n")
for peakfinder_directory in peakfinder_directory_list:
cd_line = "cd " + peakfinder_directory + "\n"
f.write(cd_line)
python_line = "python peakfinder.py" + "\n"
f.write(python_line)
call("chmod 755 " + bash_script_filename, shell=True)
return
<file_sep>/src/plot_peaks.py
def run(peak_str, peak_centre, source_name, timestep):
import units as un
print "Plotting each individual peak in kx, ky, and kz..."
directions = ["kx", "ky", "kz"]
constant_axes = [[1,2], [0,2], [0,1]]
variable_axes = [0, 1, 2]
for i, current_peak_str in enumerate(peak_str):
data_filename = un.determine_accurate_soh_output_file_location(current_peak_str, source_name, timestep)
soh_output = un.read_from_soh_output(data_filename)
centre_point = peak_centre[i]
for j, direction in enumerate(directions):
k, intensity = un.find_line_data_from_3DFT(constant_axes[j], variable_axes[j], centre_point, soh_output)
plot_data_filename = "./data/" + current_peak_str + "/" + direction + ".dat"
plot_filename = "./data/" + current_peak_str + "/" + direction + ".png"
un.plot_pygnuplot(k, intensity, plot_filename, plot_data_filename)
return
<file_sep>/src/select_peak_positions.py
def run(gsqr_max, negative_k, remove_000):
import units as un
import logging as log
log.info("Brick %s started.\n", __name__)
print "Selecting peak positions..."
pos_est = un.build_all_k_values(gsqr_max, negative_k)
pos_est = un.remove_fcc_forbidden_reflections(pos_est)
if remove_000 == True:
pos_est = un.remove_000(pos_est)
gsqr_est = un.get_gsqr_values(pos_est)
log.info("Brick %s finished.\n", __name__)
return pos_est, gsqr_est
<file_sep>/test/oldddd/module.py
#Each function should have its own ability to write to the log file.
# I also want it to save plots of all the potentially interesting data.
########################################################################
# Opens a time log of the run.
def startwatch():
import time
start_time = time.localtime()
tpy0 = time.clock()
t0 = time.time()
t = open('time.pkfd', 'w')
t.write("Peakfinder started at " + str(start_time[3]) + ":" + str(start_time[4]) + ":" + str(start_time[5]) + " " + str(start_time[2]) + "/" + str(start_time[1]) + "/" + str(start_time[0]) + "\n")
t.close()
return t0, tpy0
########################################################################
# Closes the time log of the run.
def stopwatch(t0, tpy0):
import time
stop_time = time.localtime()
tpyf = time.clock()
tf = time.time()
tpyt = tpyf - tpy0
tt = tf - t0
hours = int(tt/3600)
minutes = int((tt - (hours * 3600))/60)
seconds = int(tt - (hours * 3600) - (minutes * 60))
time_elapsed = [hours, minutes, seconds]
t = open('time.pkfd', 'a')
t.write("\n\nPeakfinder finished at " + str(stop_time[3]) + ":" + str(stop_time[4]) + ":" + str(stop_time[5]) + " " + str(stop_time[2]) + "/" + str(stop_time[1]) + "/" + str(stop_time[0]) + "\n")
t.write("\nPeakfinder took " + str(tt) + " s (or " + str(time_elapsed[0]) + "h " + str(time_elapsed[1]) + "m " + str(time_elapsed[2]) + "s) to complete.")
t.write("\n\nThe python portion took " + str(tpyt) + " s.")
t.close()
print "\nPeakfinder took " + str(tt) + " s (or " + str(time_elapsed[0]) + "h " + str(time_elapsed[1]) + "m " + str(time_elapsed[2]) + "s) to complete."
return
########################################################################
#This function creates bcc positions. It takes as input range_num (int), negative_k (bool), and remove_000 (bool). As output it creates gsqr_est (list) and pos_est (list).
def make_bcc(range_num, negative_k, remove_000):
# Variables in this function:
# negative_k -> this bool determines whether negative values of hkl are included.
# x_est/y_est/z_est -> these are sets of integers that make up the estimates of each coordinate in reciprocal space.
# range_num -> this integer determines how far into reciprocal space the peaks positions are estimated.
# pos_est -> this list contains all the k coordinates of each reciprocal peak.
# gsqr_est -> this list contains all the G^2 values for each peak contained in pos_est
# h/k/l -> these are iteration variables used to cycle through each value of x_est/y_est/z_est.
# remove_000 -> this bool determines whether the 000 peak is precluded from the list of pos_est.
# gsqr_temp -> this temporary variable is used to hold the h^2 + k^ + l^2 calculation needed to create G^2, before being added to gsqr_est.
# i -> used as a looping variable to write the log file and print to the console.
if negative_k == True:
x_est = range(-range_num+1, range_num)
y_est = range(-range_num+1, range_num)
z_est = range(-range_num+1, range_num)
elif negative_k == False:
x_est = range(0, range_num)
y_est = range(0, range_num)
z_est = range(0, range_num)
pos_est = [] #This list will have our k coordinates for each peak.
gsqr_est = [] #This list will have the G^2 values for each peak.
for h in x_est:
for k in y_est:
for l in z_est:
if remove_000 == False:
#Here the positions are only accepted if h + k + l are even. If that is not the case, they are not entered into the pos_est list.
if (h + k + l) % 2 == 0:
gsqr_temp = (h * h) + (k * k) + (l * l)
gsqr_est.append(gsqr_temp)
pos_est.append([h, k, l])
else:
pass
elif remove_000 == True:
#Here the positions are only accepted if h + k + l are even. If h = k = l = 0, the peak will not be written. If these conditions are not met the peak is not entered into the pos_est list.
if (h + k + l) % 2 == 0 and abs(h) + abs(k) + abs(l) != 0:
gsqr_temp = (h * h) + (k * k) + (l * l)
gsqr_est.append(gsqr_temp)
pos_est.append([h, k, l])
else:
pass
for i in range(len(pos_est)):
print "\nPeak " + str(i+1) + " of " + str(len(pos_est)) + " estimated: " + str(pos_est[i]) + " with G^2 = " + str(gsqr_est[i])
#This part makes the log entry for the function.
f = open("log.pkfd", "w")
f.write("\nFunction make_bcc called with input:\n"
"range_num = " + str(range_num) + "\n"
"negative_k = " + str(negative_k) + "\n"
"remove_000 = " + str(remove_000) + "\n"
"\nFunction make_bcc returned:\n")
for i in range(len(pos_est)):
f.write( "Peak " + str(i+1) + " of " + str(len(pos_est)) + " estimated: " + str(pos_est[i]) + " with G^2 = " + str(gsqr_est[i]) + "\n")
f.close()
return gsqr_est, pos_est
####################################################################
#This function creates bcc positions. It takes as input range_num (int), negative_k (bool), and remove_000 (bool). As output it creates gsqr_est (list) and pos_est (list).
def make_fcc(gsqr_max, negative_k, remove_000):
# Variables in this function:
# negative_k -> this bool determines whether negative values of hkl are included.
# x_est/y_est/z_est -> these are sets of integers that make up the estimates of each coordinate in reciprocal space.
# range_num -> this integer determines how far into reciprocal space the peaks positions are estimated.
# pos_est -> this list contains all the k coordinates of each reciprocal peak (NOT in units of A^-1).
# gsqr_est -> this list contains all the G^2 values for each peak contained in pos_est (note that this is NOT in units of A^-2)
# h/k/l -> these are iteration variables used to cycle through each value of x_est/y_est/z_est.
# remove_000 -> this bool determines whether the 000 peak is precluded from the list of pos_est.
# gsqr_temp -> this temporary variable is used to hold the h^2 + k^ + l^2 calculation needed to create G^2, before being added to gsqr_est.
# i -> used as a looping variable to write the log file and print to the console.
import numpy as np
import time
t0 = time.time()
range_num = int(np.sqrt(gsqr_max) + 1.0)
if negative_k == True:
x_est = range(-range_num+1, range_num)
y_est = range(-range_num+1, range_num)
z_est = range(-range_num+1, range_num)
else:
x_est = range(range_num)
y_est = range(range_num)
z_est = range(range_num)
pos_est = [] # This list will have our k coordinates for each peak.
gsqr_est = [] # This list will have the G^2 values for each peak.
for i in x_est:
for j in y_est:
for k in z_est:
#The values for i j k are only selected if they are all even or all odd. If remove_000 is true there is an extra condition that makes sure 000 is not included.
if remove_000 == True:
if i % 2 == 0 and j % 2 == 0 and k % 2 == 0 and abs(i) + abs(j) + abs(k) != 0:
l = (i * i) + (j * j) + (k * k)
gsqr_est.append(l)
pos_est.append([i, j, k])
elif i % 2 == 1 and j % 2 == 1 and k % 2 == 1:
l = (i * i) + (j * j) + (k * k)
gsqr_est.append(l)
pos_est.append([i, j, k])
else:
pass
#This part is triggered if we want to keep the 000 peak.
elif remove_000 == False:
if i % 2 == 0 and j % 2 == 0 and k % 2 == 0:
l = (i * i) + (j * j) + (k * k)
gsqr_est.append(l)
pos_est.append([i, j, k])
elif i % 2 == 1 and j % 2 == 1 and k % 2 == 1:
l = (i * i) + (j * j) + (k * k)
gsqr_est.append(l)
pos_est.append([i, j, k])
else:
pass
i = 0
print "Removing peaks with too large gsqr..."
while i + 1 <= len(pos_est):
if gsqr_est[i] <= gsqr_max:
i += 1
continue
else:
del gsqr_est[i]
del pos_est[i]
continue
# This section prints to the console.
print "\nPeaks estimated for a fcc structure:"
for i in range(len(pos_est)):
print "Peak " + str(i+1) + " of " + str(len(pos_est)) + ": " + str(pos_est[i]) + " with G^2 = " + str(gsqr_est[i])
#This part makes the log entry for the function.
f = open("log.pkfd", "w")
f.write("\nFunction make_fcc called with input:\n"
"range_num = " + str(range_num) + "\n"
"negative_k = " + str(negative_k) + "\n"
"remove_000 = " + str(remove_000) + "\n"
"\nFunction make_fcc returned:\n")
for i in range(len(pos_est)):
f.write( "Peak " + str(i+1) + " of " + str(len(pos_est)) + " estimated: " + str(pos_est[i]) + " with G^2 = " + str(gsqr_est[i]) + "\n")
f.close()
t1 = time.time()
tt = t1 - t0
time_elapsed = time.localtime()
t = open('time.pkfd', 'a')
t.write("\nmod.make_fcc took \t\t\t\t" + str(tt) + " s to complete.")
return gsqr_est, pos_est
##################################################################
# This function rotates the estimated pos_est such that the 111 direction is parallel to the z-direction. This is necessary since, in LAMMPS, the way that compression along z is achieved is by rotating the crystal in this way and then simply compressing in the z-direction. The result is that all of our atom positions are rotated and thus the expected peak positions. It takes as input: pos_est (string).
def enforce_rotation_111(pos_est):
# Variables:
# pos_est -> estimated peak positions from, for example, the make_bcc function.
# theta_x -> variable used to calculate the rotation matrix around x-axis.
# theta_z -> variable used to calculate the rotation matrix around z-axis.
# rot_x -> rotation matrix about x-axis.
# rot_z -> rotation matrix about z-axis.
# rot_pos_est -> contains the rotates position_estimates.
# new, new_2 -> intermediate variable to hold the partially rotated pos_est.
import numpy as np
import time
t0 = time.time()
theta_x = np.pi/2.0 - np.arctan(1/np.sqrt(2)) # Calculated by looking at a 111 vector which has been rotated by 45 degrees around the z-axis.
theta_z = np.pi/4.0 # Rotate around z-axis by 45 degrees.
rot_x = np.array([[1,0,0], [0, np.cos(theta_x), -np.sin(theta_x)], [0, np.sin(theta_x), np.cos(theta_x)]]) # creates the array which rotates the positions around the x-axis i.e. rotation matrix
rot_z = np.array([[np.cos(theta_z), -np.sin(theta_z), 0], [np.sin(theta_z), np.cos(theta_z), 0], [0, 0, 1]]) # same as above, this time around z. Note that we won't create a y-rotation matrix since it isn't needed in this instance of 111.
rot_pos_est = [0] * len(pos_est)
pos_est = np.asarray(pos_est) # Converts pos_est to an array so we can multiply it by our rotational matrices.
print "\nPeak estimates rotated:"
for i in range(len(pos_est)): # Loops over all peaks to populate the pos_est list with rotated peak positions.
new = np.dot(rot_z, pos_est[i]) # First matrix multiply the z-rotation with the original position estimate to get "new", an intermediate variable.
new_2 = np.dot(rot_x, new) # Then matrix multiply the x-rotation with "new" to get the array version of the rotated peak.
rot_pos_est[i] = list(new_2) # Convert this to a list (for compatability with the rest of the code).
print "Peak " + str(i + 1) + " of " + str(len(pos_est)) + " at " + str(pos_est[i]) + " rotated to " + str(rot_pos_est[i])
#This part makes the log entry for the function.
f = open("log.pkfd", "a")
f.write("\n\nFunction enforce_rotation_111 called with input:\n"
"pos_est = (listed below with rotated counterpart)\n"
"\nFunction enforce_rotation_111 returned:\n")
for i in range(len(pos_est)):
f.write("Peak " + str(i+1) + " of " + str(len(pos_est)) + ": " + str(pos_est[i]) + " rotated to " + str(rot_pos_est[i]) + "\n")
f.close()
t1 = time.time()
tt = t1 - t0
time_elapsed = time.localtime()
t = open('time.pkfd', 'a')
t.write("\nmod.enforce_rotation_111 took \t\t\t" + str(tt) + " s to complete.")
return rot_pos_est
################################################################
# This function cuts up a LAMMPS file to atoms contained within a certain volume, as defined by the user. It takes as input: source (string), xlo, xhi,ylo, yhi, zlo, zhi (all floats).
def cut_atoms(source, xlo, xhi, ylo, yhi, zlo, zhi):
#Variables:
# source -> the original LAMMPS file to be cut up.
# intermediate_file -> this file is an intermediary to the final cut .atom file. It only differes from the cut .atom file by the number of atoms (which is reported incorrectly in the intermediate file).
# cut_atom_filename -> the name of the file after it has been cut.
# while_initialiser -> variable for looping through a while loop.
# current_line_list -> list of all the words in a line.
# xs_ind, ys_ind, zs_ind, vx_ind, vy_ind, vz_ind, fx_ind, fy_ind, fz_ind -> stores the indices of the columns which contain the atomic coordinates (xs, ys, zs), the velocity components of each atom (vx, vy, vz), and the force components on each atom (fx, fy, fz).
# xlo, xhi, ylo, yhi, zlo, zhi -> floats that describe the boundary of the volume to be cut to. These values are between 0 and 1 (inclusive) and are fractions of the box dimensions.
# atom_counter -> counts the number of atoms in the new cut .atom file.
import time
t0 = time.time()
intermediate_file = "intermediate.file"
cut_atom_filename = "cut_" + source
with open(source, 'r') as f:
g = open(intermediate_file, 'w')
while_initialiser = True
while while_initialiser == True:
for line in f:
g.write(line)
current_line_list = line.split()
for i in range(len(current_line_list)):
if current_line_list[i] == "xs":
xs_ind = current_line_list.index("xs") - 2
ys_ind = current_line_list.index("ys") - 2
zs_ind = current_line_list.index("zs") - 2
vx_ind = current_line_list.index("vx") - 2
vy_ind = current_line_list.index("vy") - 2
vz_ind = current_line_list.index("vz") - 2
fx_ind = current_line_list.index("fx") - 2
fy_ind = current_line_list.index("fy") - 2
fz_ind = current_line_list.index("fz") - 2
while_initialiser = False
break
else:
continue
break
atom_counter = 0
for line in f:
current_line_list = line.split()
if float(current_line_list[fx_ind]) == 0 and float(current_line_list[fy_ind]) == 0 and float(current_line_list[fz_ind]) == 0: # This line gets rid of any of the piston and back wall atoms.
continue
# The following six conditions are triggered if the atoms are outside of the user-defined volume.
if float(current_line_list[xs_ind]) < xlo:
continue
if float(current_line_list[ys_ind]) < ylo:
continue
if float(current_line_list[zs_ind]) < zlo:
continue
if float(current_line_list[xs_ind]) > xhi:
continue
if float(current_line_list[ys_ind]) > yhi:
continue
if float(current_line_list[zs_ind]) > zhi:
continue
# If none of the above conditions are triggered, the current line in the lammps dump file is written to the intermediate file.
else:
g.write(line)
atom_counter += 1
g.close()
with open(intermediate_file, 'r') as g:
number_of_atoms = False
h = open(cut_atom_filename, 'w')
for line in g:
current_line_list = line.split()
if number_of_atoms == True:
h.write(str(atom_counter) + "\n")
number_of_atoms = False
continue
if len(current_line_list) == 4:
if current_line_list[1] == "NUMBER" and current_line_list[2] == "OF" and current_line_list[3] == "ATOMS":
h.write(line)
number_of_atoms = True
else:
h.write(line)
else:
h.write(line)
h.close()
g.close()
lammps_file_name = cut_atom_filename
print "\nThe lammps file has been cut to the user defined parameters. \nThe filename of this abridged list is: " + str(lammps_file_name)
print "\nThe region to which the positions have been cut are bounded by: \n xlo = " + str(xlo) + "\n xhi = " + str(xhi) + "\n ylo = " + str(ylo) + "\n yhi = " + str(yhi) + "\n zlo = " + str(zlo) + "\n zhi = " + str(zhi)
print "\nAll atoms outside of this volume have been removed."
print "There are now " + str(atom_counter) + " atoms remaining after the cut."
#This part makes the log entry for the function.
f = open("log.pkfd", "a")
f.write("\n\nFunction cut_atoms called with input:\n"
"source = " + str(source) + "\n"
"xlo = " + str(xlo) + "\n"
"xhi = " + str(xhi) + "\n"
"ylo = " + str(ylo) + "\n"
"yhi = " + str(yhi) + "\n"
"zlo = " + str(zlo) + "\n"
"zhi = " + str(zhi) + "\n"
"\nFunction cut_atoms returned:\n"
"lammps_file_name = " + str(lammps_file_name) + "\n"
"atom_counter = " + str(atom_counter) + "\n")
f.close()
t1 = time.time()
tt = t1 - t0
time_elapsed = time.localtime()
t = open('time.pkfd', 'a')
t.write("\nmod.cut_atoms took \t\t\t\t" + str(tt) + " s to complete.")
return lammps_file_name, atom_counter
#################################################################
# This function calculates the exact temperature of the MD based on the velocities of the atoms.
def get_md_temperature(source, mass, piston_velocity):
import numpy as np
import scipy.constants as codata
import time
t0 = time.time()
atoms_vx, atoms_vy, atoms_vz = np.loadtxt(source, skiprows=9, usecols=(5, 6, 7), unpack = True) #Atom velocities are loaded from the .atom or .atomcut file.
atoms_vz_minus_piston = atoms_vz - piston_velocity #The piston velocity is subtracted from the z-velocity. The piston velocity is given in angstroms per ps.
v_sqr_2d = ((atoms_vx ** 2) + (atoms_vy ** 2)) * (10e12 ** 2)/(10e10 ** 2) #Since the velocities are in units of angstroms/ps(assuming units = metals is used in lammps), the square of the velocities is adjusted here to be in m/s.
v_sqr_3d = ( (atoms_vx ** 2) + (atoms_vy ** 2) + (atoms_vz_minus_piston ** 2) ) * (10e12 ** 2)/(10e10 ** 2)
E_k_atoms_2d = v_sqr_2d * 0.5 * mass * (10 ** -3) / codata.value("Avogadro constant")
E_k_atoms_3d = v_sqr_3d * 0.5 * mass * (10 ** -3) / codata.value("Avogadro constant")
E_k_average_2d = np.mean(E_k_atoms_2d)
E_k_average_3d = np.mean(E_k_atoms_3d)
md_temperature_2d = (2.0/2.0) * (E_k_average_2d / codata.value("Boltzmann constant"))
md_temperature_3d = (2.0/3.0) * (E_k_average_3d / codata.value("Boltzmann constant"))
print "\nThe temperature has been recalculated for the atom data in " + str(source)
print "\nThe 2D temperature has been calculated to be: " + str(md_temperature_2d) + " K"
print "\nThe 3D temperature has been calculated to be: " + str(md_temperature_3d) + " K"
#This part makes the log entry for the function.
f = open("log.pkfd", "a")
f.write("\n\nFunction get_md_temperature called with input:\n"
"source = " + str(source) + "\n"
"mass = " + str(mass) + "\n"
"piston_velocity = " + str(piston_velocity) + "\n"
"\nThe temperature has been recalculated for the atom data in " + str(source) + ""
"\nThe 2D temperature has been calculated to be: " + str(md_temperature_2d) + " K"
"\nThe 3D temperature has been calculated to be: " + str(md_temperature_3d) + " K\n"
"\nFunction get_md_temperature returned:\n"
"md_temperature_2d = " + str(md_temperature_2d) + "\n"
"md_temperature_3d = " + str(md_temperature_3d) + "\n"
)
f.close()
t1 = time.time()
tt = t1 - t0
time_elapsed = time.localtime()
t = open('time.pkfd', 'a')
t.write("\nmod.get_md_temperature took \t\t\t" + str(tt) + " s to complete.")
return md_temperature_2d, md_temperature_3d
###############################################################
# This function compares the expected peak positions to the actual positions in each dimension (first peak along x, y, and z), and then adjusts the predicted positions of all the peaks based on this. As input it takes: source (string), rotated_to_111 (bool), run_soh (bool), k_steps (int), pos_est (list), a_lattice (float), mass (float), show_plot (bool), timestep (int).
def compensate_for_compression(source, initial_hkl_pos_est, rotated_to_111, run_soh, k_steps, pos_est, a_lattice, mass, show_plot, timestep):
import numpy as np
import subprocess
import os
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import time
t0 = time.time()
# Variables:
# rotated_to_111 -> this determines the points where soh will expect to find the uncompressed peak positions.
# k_lineout_stop -> the point whre soh will stop looking for a peak in each direction.
# k_stop -> list of k_lineout_stop coordinates (one coordinate for each direction).
# k_lineout_start -> the point whre soh will start looking for a peak in each direction.
# k_start -> list of k_lineout_start coordinates (one coordinate for each direction).
# k_lineout_direction -> list of strings for x, y, z.
# k_lineout_file -> string containing the name of the soh input file to be written.
# command_lineout -> the bash command for running soh in each direction.
# current_working_directory -> contains string of the cwd.
# popt, pcov -> stores the results of the gaussian fit routine.
# lineout_out -> string containing the name of the soh output.
# x_lineout_k, y_lineout_intensity -> k-values and intensities for each dimensional lineout, loaded form the soh output.
# A -> amplitude of peak (maximum value of intensity lineout values).
# A_index -> the index of the highest value of intensity.
# sigma -> initial guess for width of gaussian.
# mu -> initial guess for k position of peak centre.
# p0 -> list of sigma, mu, A to be input to the gaussian approximartion.
# compression_peak_pos -> contains the estimated positions of peaks once compression has been taken into consideration.
# compression_factor -> the number by which the pos_est will be multiplied in order to find the corrected peak position estimates.
# compressed_gsqr_est -> G^2 values of the compressed peak positions.
if rotated_to_111 == False:
#First we find the k limits of the 1D ft we want to get soh to perform.
k_lineout_stop = 1.5 * ((2*np.pi)/(a_lattice * 0.5))/(2*np.pi/a_lattice) # The result is k_lineout_stop = 3, which is equivalent to the lattice being compressed to a/1.5. This means we will only detect compressions up to this point.
k_stop = [0] * 3
for i in range(len(k_stop)):
k_stop[i] = [0.0] * 3
k_stop[i][i] = k_lineout_stop
k_lineout_start = 0.5 #Defines where we start looking. If peaks are down here, then we have expansion, not compression.
k_start = [0] * 3
for i in range(len(k_start)):
k_start[i] = [0.0] * 3
k_start[i][i] = k_lineout_start
subprocess.call("mkdir soh_compression_lineouts", shell=True)
k_lineout_direction = ["x", "y", "z"]
#Then we write the soh input file to run the lineouts in each direction.
for i in range(len(k_lineout_direction)):
k_lineout_file = "k" + k_lineout_direction[i] + "_compression_lineout.soh"
f = open(k_lineout_file, "w")
f.write("VERBOSE\t\t\t\t\t0\n\n"
"FILE_TYPE\t\t\t\tlammps-multi\n"
"DATA_FILE\t\t\t\t" + str(source) + "\n"
"APPEND_FILE_NAME\t\tk" + str(k_lineout_direction[i]) + "_compression_lineout\n\n"
"PLOT_OUTPUT\t\t\t\tpdf\n\n"
"COORDS_SCALED\n"
"SET_MASS\t\t" + str(mass) + "\n\n"
"SET_A_CELL\t\t\t\t" + str(a_lattice) + "\n\n"
"CALC_1D_FT\n\n"
"SET_K_START\t\t\t\t" + str(k_start[i][0]) + " " + str(k_start[i][1]) + " " + str(k_start[i][2]) + "\n"
"SET_K_STOP\t\t\t\t" + str(k_stop[i][0]) + " " + str(k_stop[i][1]) + " " + str(k_stop[i][2]) + "\n"
"SET_NK\t\t\t\t\t" + str(k_steps) + "\n")
f.close() # Remember to close the file before you try to run it!
command_lineout = 'mpiexec -np 24 sonOfHoward ' + k_lineout_file # Stores the bash command we will run. If we want to make it faster, we can increase the processor_number here, we just have to make sure we don't get in anyone else's way!
if run_soh == True:
subprocess.call(command_lineout, shell=True)
current_working_directory = os.getcwd()
subprocess.call("mv " + str(current_working_directory) + "/" + str(k_lineout_file) + " " + str(current_working_directory) + "/soh_compression_lineouts/" , shell=True)
subprocess.call("mv " + str(current_working_directory) + "/" + source + "." + str(timestep) + ".k" + k_lineout_direction[i] + "_compression_lineout.ft " + str(current_working_directory) + "/soh_compression_lineouts/" , shell=True)
# Next we fit a symmetric function to the peaks we just found in each direction in k (this should be one peak per direction).
#This method fits a Gaussian to the peak.
popt = [0] * len(k_lineout_direction)
pcov = [0] * len(k_lineout_direction)
def gauss(x, A, mu, sigma):
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
for i in range(len(k_lineout_direction)):
lineout_out = str(current_working_directory) + "/soh_compression_lineouts/" + source + "." + str(timestep) + ".k" + k_lineout_direction[i] + "_compression_lineout.ft"
x_lineout_k, y_lineout_intensity = np.loadtxt(lineout_out, skiprows = 1, usecols = (i, 5), unpack=True)
A = max(y_lineout_intensity)
A_index = np.argmax(y_lineout_intensity)
mu = x_lineout_k[A_index]
sigma = 0.01
p0 = [A, mu, sigma]
popt[i], pcov[i] = curve_fit(gauss, x_lineout_k, y_lineout_intensity, p0)
#The following section will show a plot of each of the lineout peaks with the fitted Gaussian.
plt.plot(x_lineout_k, y_lineout_intensity)
plt.plot(x_lineout_k, gauss(x_lineout_k, popt[i][0], popt[i][1], popt[i][2]))# plt.plot(x_lineout_k, y_lineout_intensity)
plt.xlabel("k" + k_lineout_direction[i])
plt.ylabel("Intensity")
plot_name = "k" + str(k_lineout_direction[i]) + "_compression_lineout.png"
plt.savefig(plot_name, bbox_inches='tight')
if show_plot == True:
plt.show()
plt.close()
print "Plot of compression compensation lineout in k" + str(k_lineout_direction[i]) + " created."
#Now we put the positions of the peak in each direction into a list, then normalise the actual position with respect to the predicited position (i.e divide by 2). This method will only work for fcc since the predicted position is hardcoded in (divided by two).
compression_peak_pos = [0] * len(k_lineout_direction)
for i in range(len(k_lineout_direction)):
compression_peak_pos[i] = popt[i][1]
# This should work for both fcc and bcc.
compression_factor = [1,1,1]
for i in range(len(k_lineout_direction)):
compression_factor[i] = compression_peak_pos[i]/2.0
if rotated_to_111 == True:
k_start = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
k_start[0] = [1.41421356237*2.0*0.8, 0, 0]
k_start[1] = [0, 2.44948974278*2.0*0.8, 0]
k_start[2] = [0, 0, 1.73205080757*0.8]
k_stop = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
k_stop[0] = [1.41421356237*2.0*1.5, 0, 0]
k_stop[1] = [0, 2.44948974278*2.0*1.5, 0]
k_stop[2] = [0, 0, 1.73205080757*1.5]
subprocess.call("mkdir soh_compression_lineouts", shell=True)
k_lineout_direction = ["x", "y", "z"]
#Then we write the soh input file to run the lineouts in each direction.
for i in range(len(k_lineout_direction)):
k_lineout_file = "k" + k_lineout_direction[i] + "_compression_lineout.soh"
f = open(k_lineout_file, "w")
f.write("VERBOSE\t\t\t\t\t0\n\n"
"FILE_TYPE\t\t\t\tlammps-multi\n"
"DATA_FILE\t\t\t\t" + str(source) + "\n"
"APPEND_FILE_NAME\t\tk" + str(k_lineout_direction[i]) + "_compression_lineout\n\n"
"PLOT_OUTPUT\t\t\t\tpdf\n\n"
"COORDS_SCALED\n"
"SET_MASS\t\t" + str(mass) + "\n\n"
"SET_A_CELL\t\t\t\t" + str(a_lattice) + "\n\n"
"CALC_1D_FT\n\n"
"SET_K_START\t\t\t\t" + str(k_start[i][0]) + " " + str(k_start[i][1]) + " " + str(k_start[i][2]) + "\n"
"SET_K_STOP\t\t\t\t" + str(k_stop[i][0]) + " " + str(k_stop[i][1]) + " " + str(k_stop[i][2]) + "\n"
"SET_NK\t\t\t\t\t" + str(k_steps) + "\n")
f.close() # Remember to close the file before you try to run it!
command_lineout = 'mpiexec -np 24 sonOfHoward ' + k_lineout_file # Stores the bash command we will run. If we want to make it faster, we can increase the processor_number here, we just have to make sure we don't get in anyone else's way!
if run_soh == True:
subprocess.call(command_lineout, shell=True)
current_working_directory = os.getcwd()
subprocess.call("mv " + str(current_working_directory) + "/" + str(k_lineout_file) + " " + str(current_working_directory) + "/soh_compression_lineouts/" , shell=True)
subprocess.call("mv " + str(current_working_directory) + "/" + source + "." + str(timestep) + ".k" + k_lineout_direction[i] + "_compression_lineout.ft " + str(current_working_directory) + "/soh_compression_lineouts/" , shell=True)
# Next we fit a symmetric function to the peaks we just found in each direction in k (this should be one peak per direction).
#This method fits a Gaussian to the peak.
popt = [0] * len(k_lineout_direction)
pcov = [0] * len(k_lineout_direction)
def gauss(x, A, mu, sigma):
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
for i in range(len(k_lineout_direction)):
lineout_out = str(current_working_directory) + "/soh_compression_lineouts/" + source + "." + str(timestep) + ".k" + k_lineout_direction[i] + "_compression_lineout.ft"
x_lineout_k, y_lineout_intensity = np.loadtxt(lineout_out, skiprows = 1, usecols = (i, 5), unpack=True)
A = max(y_lineout_intensity)
A_index = np.argmax(y_lineout_intensity)
mu = x_lineout_k[A_index]
sigma = 0.01
p0 = [A, mu, sigma]
popt[i], pcov[i] = curve_fit(gauss, x_lineout_k, y_lineout_intensity, p0)
#The following section will show a plot of each of the lineout peaks with the fitted Gaussian.
plt.plot(x_lineout_k, y_lineout_intensity)
plt.plot(x_lineout_k, gauss(x_lineout_k, popt[i][0], popt[i][1], popt[i][2]))# plt.plot(x_lineout_k, y_lineout_intensity)
plt.xlabel("k" + k_lineout_direction[i])
plt.ylabel("Intensity")
plot_name = "k" + str(k_lineout_direction[i]) + "_compression_lineout.png"
plt.savefig(plot_name, bbox_inches='tight')
if show_plot == True:
plt.show()
plt.close()
print "Plot of compression compensation lineout in k" + str(k_lineout_direction[i]) + " created."
#Now we put the positions of the peak in each direction into a list, then normalise the actual position with respect to the predicited position (i.e divide by 2). This method will only work for fcc since the predicted position is hardcoded in (divided by two).
compression_peak_pos = [0] * len(k_lineout_direction)
for i in range(len(k_lineout_direction)):
compression_peak_pos[i] = popt[i][1]
# This should work for both fcc and bcc.
compression_factor = [1,1,1]
compression_factor[0] = compression_peak_pos[0]/(1.41421356237*2.0)
compression_factor[1] = compression_peak_pos[1]/(2.44948974278*2.0)
compression_factor[2] = compression_peak_pos[2]/1.73205080757
compressed_pos_est = [0] * len(pos_est)
compressed_gsqr_est = [0] * len(pos_est)
for i in range(len(compressed_pos_est)):
compressed_pos_est[i] = [ compression_factor[0] * pos_est[i][0], compression_factor[1] * pos_est[i][1], compression_factor[2] * pos_est[i][2] ]
compressed_gsqr_est[i] = (compressed_pos_est[i][0] ** 2) + (compressed_pos_est[i][1] ** 2) + (compressed_pos_est[i][2] ** 2)
print "\nPeak " + str(i+1) + " of " + str(len(compressed_pos_est)) + " " + str(initial_hkl_pos_est[i]) + ":\n" + str(pos_est[i]) + " compensated for compression to \n" + str(compressed_pos_est[i]) + " with G^2 = " + str(compressed_gsqr_est[i]) + "."
#This part makes the log entry for the function.
f = open("log.pkfd", "a")
f.write("\n\nFunction compensate_for_compression called with input:\n"
"source = " + str(source) + "\n"
"initial_hkl_pos_est = " + str(initial_hkl_pos_est) + "\n"
"rotated_to_111 = " + str(rotated_to_111) + "\n"
"run_soh = " + str(run_soh) + "\n"
"a_lattice = " + str(a_lattice) + "\n"
"k_steps = " + str(k_steps) + "\n"
"mass = " + str(mass) + "\n"
"show_plot = " + str(show_plot) + "\n"
"timestep = " + str(timestep) + "\n"
"pos_est = (given below as the uncompensated coordinates)\n")
for i in range(len(pos_est)):
f.write("\nPeak " + str(i+1) + " of " + str(len(compressed_pos_est)) + " " + str(initial_hkl_pos_est[i]) + ":\n" + str(pos_est[i]) + " compensated for compression to \n" + str(compressed_pos_est[i]) + " with G^2 = " + str(compressed_gsqr_est[i]) + ".")
f.write("\n\nFunction compensate_for_compression returned:\n"
"compressed_pos_est = (shown above)\n"
"compressed_gsqr_est = (shown above)\n"
"compression_factor = " + str(compression_factor) + "\n"
)
f.close()
t1 = time.time()
tt = t1 - t0
time_elapsed = time.localtime()
t = open('time.pkfd', 'a')
t.write("\nmod.compensate_for_compression took \t\t" + str(tt) + " s to complete.")
return compressed_pos_est, compressed_gsqr_est, compression_factor
##################################################################
# This function creates a box around each point in reciprocal space and performs a fourier transform of the atoms in a lammps .atom file with the reciprocal lattice vectors inside the box. This function creates input files for SoH, then runs SoH for each input file. It takes as input: source (str), pos_est (list), compression_factor (list), initial_hkl_pos_est (list), a_lattice (float), del_kx(float), del_ky(float), del_kz(float), k_steps (int), run_soh (bool), mass (float). It does not produce output variables. Instead it creates files which contain the SoH outputs, including intensities at each point.
def get_peak_intensities(source, pos_est, compression_factor, initial_hkl_pos_est, a_lattice, mass, del_kx, del_ky, del_kz, k_steps, k_steps_accurate, run_soh, timestep):
# Variables:
# source -> the name of the lamps file to be analysed.
# pos_est -> the estimated positions of the peaks in reciprocal space.
# compression_factor -> the ratio of expected peak position to actual peak position in each direction.
# a_lattice -> the lattice constant, in Angstroms.
# mass -> the mass of the material in g/mol.
# del_kx -> determines the size of the reciprocal space box in x over which the FT will take place.
# del_ky -> determines the size of the reciprocal space box in y over which the FT will take place.
# del_kz -> determines the size of the reciprocal space box in z over which the FT will take place.
# k_steps -> determines the resolution of the FT; this number sets the number of points in each dimension of the box such that k_steps^3 is the total number of reciprocal space points per box.
# run_soh -> turns on/off SoH.
# timestep -> for file location reasons.
# current_working_directory -> holds a string of the current working directory.
# source_location -> contains location of the lammps source file.
# kx_start/ky_start/kz_start -> holds the start points of the FT in x/y/z.
# kx_end/ky_end/kz_end -> holds the end points of the FT in x/y/z.
# filenum -> the number to be appended to the soh in/out filenames.
# soh_input -> stores the filename we want to write our soh input to.
# t_start_peak/t_end_peak -> stores the start and time of each peak run.
# time_peak -> the difference between t_end and t_start.
# time_remaining -> approximates the time left for the FT based on the time it took for the last calculation.
import time
import subprocess
import os
import numpy
import copy
t0 = time.time()
print "get_peak_intensities started..."
current_working_directory = os.getcwd()
source_location = str(current_working_directory) + "/" + str(source)
subprocess.call("mkdir soh_input", shell=True)
subprocess.call("mkdir soh_output", shell=True)
def accurate_peak_centre_and_breadth(over_width, make_plots_accurate):
print "Finding accurate centre and breadths for each peak..."
acc_dir = "accurate_peak_lineouts"
subprocess.call("rm -r " + acc_dir, shell = True)
subprocess.call("mkdir " + acc_dir, shell=True)
print "accurate_peak_lineouts directory made"
for i in range(len(pos_est)):
lineout_direction = ["kx", "ky", "kz"]
del_k = [del_kx, del_ky, del_kz]
peak_dir = str(initial_hkl_pos_est[i][0]) + str(initial_hkl_pos_est[i][1]) + str(initial_hkl_pos_est[i][2])
subprocess.call("mkdir " + current_working_directory + "/" + peak_dir, shell=True)
subprocess.call("mv " + current_working_directory + "/" + peak_dir + "/ " + current_working_directory + "/" + acc_dir + "/", shell = True)
for j in range(len(lineout_direction)):
width = del_k[j] * (1.0 + over_width)
k_start = pos_est[i][j] - width
k_end = pos_est[i][j] + width
print "\nPeak " + peak_dir
filenum = lineout_direction[j]
soh_input = current_working_directory + "/" + acc_dir + "/" + peak_dir + "/in_" + filenum + ".soh"
if j == 0 :
f = open(str(soh_input), "w")
f.write("VERBOSE\t\t\t0\n\n"
"FILE_TYPE\t\tlammps-multi\n"
"DATA_FILE\t\t" + source_location + "\n"
"APPEND_FILE_NAME\t\t" + filenum + "\n\n"
"PLOT_OUTPUT\t\tpdf\n\n"
"COORDS_SCALED\n"
"SET_MASS\t\t" + str(mass) + "\n\n"
"SET_A_CELL\t\t" + str(a_lattice) + "\n\n"
"CALC_1D_FT\n\n"
"SET_K_START\t\t\t" + str(k_start) + " " + str(pos_est[i][1]) + " " + str(pos_est[i][2]) + "\n"
"SET_K_STOP\t\t\t" + str(k_end) + " " + str(pos_est[i][1]) + " " + str(pos_est[i][2]) + "\n"
"SET_NK\t\t\t" + str(k_steps_accurate) + "\n")
f.close()
if j == 1 :
f = open(str(soh_input), "w")
f.write("VERBOSE\t\t\t0\n\n"
"FILE_TYPE\t\tlammps-multi\n"
"DATA_FILE\t\t" + source_location + "\n"
"APPEND_FILE_NAME\t\t" + filenum + "\n\n"
"PLOT_OUTPUT\t\tpdf\n\n"
"COORDS_SCALED\n"
"SET_MASS\t\t" + str(mass) + "\n\n"
"SET_A_CELL\t\t" + str(a_lattice) + "\n\n"
"CALC_1D_FT\n\n"
"SET_K_START\t\t\t" + str(pos_est[i][0]) + " " + str(k_start) + " " + str(pos_est[i][2]) + "\n"
"SET_K_STOP\t\t\t" + str(pos_est[i][0]) + " " + str(k_end) + " " + str(pos_est[i][2]) + "\n"
"SET_NK\t\t\t" + str(k_steps_accurate) + "\n")
f.close()
if j == 2 :
f = open(str(soh_input), "w")
f.write("VERBOSE\t\t\t0\n\n"
"FILE_TYPE\t\tlammps-multi\n"
"DATA_FILE\t\t" + source_location + "\n"
"APPEND_FILE_NAME\t\t" + filenum + "\n\n"
"PLOT_OUTPUT\t\tpdf\n\n"
"COORDS_SCALED\n"
"SET_MASS\t\t" + str(mass) + "\n\n"
"SET_A_CELL\t\t" + str(a_lattice) + "\n\n"
"CALC_1D_FT\n\n"
"SET_K_START\t\t\t" + str(pos_est[i][0]) + " " + str(pos_est[i][1]) + " " + str(k_start) + "\n"
"SET_K_STOP\t\t\t" + str(pos_est[i][0]) + " " + str(pos_est[i][1]) + " " + str(k_end) + "\n"
"SET_NK\t\t\t" + str(k_steps_accurate) + "\n")
f.close()
if run_soh == True:
subprocess.call('cd soh_input ; mpiexec -np 24 sonOfHoward ' + soh_input, shell=True)
soh_output = str(source) + "." + str(timestep) + "." + str(filenum) + ".ft"
subprocess.call("mv " + soh_output + " " + current_working_directory + "/" + acc_dir + "/" + peak_dir + "/", shell=True)
if make_plots_accurate == True:
lineout_direction = ["kx", "ky", "kz"]
for i in range(len(pos_est)):
peak_dir = str(initial_hkl_pos_est[i][0]) + str(initial_hkl_pos_est[i][1]) + str(initial_hkl_pos_est[i][2])
for j in range(len(lineout_direction)):
soh_output = str(source) + "." + str(timestep) + "." + str(lineout_direction[j]) + ".ft"
plot_datafile = current_working_directory + "/" + acc_dir + "/" + peak_dir + "/" + soh_output
plot_name = lineout_direction[j] + ".png"
gnuplot_input = "in_gnuplot_" + lineout_direction[j]
g = open(gnuplot_input, "w")
g.write(
"set terminal png size 1600,1200 enhanced font 'Helvetica,20'"
"\nset output '" + str(plot_name) + "'"
"\nplot '" + plot_datafile + "' using " + str(j+1) + ":6")
g.close()
print "Plotted " + peak_dir + " along " + str(lineout_direction[j])
subprocess.call("gnuplot " + str(gnuplot_input), shell=True)
subprocess.call("mv " + gnuplot_input + " " + current_working_directory + "/" + acc_dir + "/" + peak_dir + "/", shell=True)
subprocess.call("mv " + plot_name + " " + current_working_directory + "/" + acc_dir + "/" + peak_dir + "/", shell=True)
accurate_pos_est = [0] * len(pos_est)
accurate_breadths = [0] * len(pos_est)
for i in range(len(pos_est)):
accurate_pos_est[i] = [0] * 3
accurate_breadths[i] = [0] * 3
print "\nFinding accurate peak centres and breadths for:"
for i in range(len(pos_est)):
peak_dir = str(initial_hkl_pos_est[i][0]) + str(initial_hkl_pos_est[i][1]) + str(initial_hkl_pos_est[i][2])
lineout_direction = ["kx", "ky", "kz"]
for j in range(len(lineout_direction)):
print "\n" + str(peak_dir) + " along " + lineout_direction[j]
datafile = current_working_directory + "/" + acc_dir + "/" + peak_dir + "/" + str(source) + "." + str(timestep) + "." + str(lineout_direction[j]) + ".ft"
k_temp, intensity_temp = numpy.loadtxt(datafile, skiprows=1, usecols=(j,5), unpack=True)
accurate_pos_est[i][j] = max(k_temp)
ind = numpy.argmax(intensity_temp)
accurate_pos_est[i][j] = k_temp[ind]
for k in range(len(k_temp)):
intensity_diff_left = intensity_temp[ind - k] - intensity_temp[ind - k - 1]
if ind - k - 1 < 0:
print "Lower bound for peak " + peak_dir + " could not be found."
exit()
if intensity_diff_left <= 0.0:
k_acc_start = k_temp[ind - k]
print "\nIntensity diff left for " + peak_dir + " " + lineout_direction[j] + " = " + str(intensity_diff_left)
break
else:
continue
for k in range(len(k_temp)):
if k + 1 + ind >= len(k_temp):
print "Upper bound for peak " + peak_dir + " could not be found."
exit()
intensity_diff_right = intensity_temp[ind + k] - intensity_temp[ind + k + 1]
if intensity_diff_right <= 0.0:
k_acc_end = k_temp[ind + k]
print "Intensity diff right for " + peak_dir + " = " + str(intensity_diff_right)
break
else:
continue
accurate_breadths[i][j] = [k_acc_start, k_acc_end]
return accurate_pos_est, accurate_breadths;
accurate_pos_est, accurate_breadths = accurate_peak_centre_and_breadth(0.5, True)
print "\nCreated accurate estimates of peak centres and breadths.\n"
for i in range(len(pos_est)):
peak_dir = str(initial_hkl_pos_est[i][0]) + str(initial_hkl_pos_est[i][1]) + str(initial_hkl_pos_est[i][2])
t_start_peak = time.time()
kx_start = accurate_breadths[i][0][0]
kx_end = accurate_breadths[i][0][1]
ky_start = accurate_breadths[i][1][0]
ky_end = accurate_breadths[i][1][1]
kz_start = accurate_breadths[i][2][0]
kz_end = accurate_breadths[i][2][1]
filenum = peak_dir
soh_input = "in_" + filenum + ".soh"
f = open(str(current_working_directory) + "/soh_input/" + str(soh_input), "w")
f.write("VERBOSE\t\t\t0\n\n"
"FILE_TYPE\t\tlammps-multi\n"
"DATA_FILE\t\t" + source_location + "\n"
"APPEND_FILE_NAME\t\t" + filenum + "\n\n"
"PLOT_OUTPUT\t\tpdf\n\n"
"COORDS_SCALED\n"
"SET_MASS\t\t" + str(mass) + "\n\n"
"SET_A_CELL\t\t" + str(a_lattice) + "\n\n"
"CALC_3D_FT\n\n"
"SET_KX\t\t\t" + str(kx_start) + " " + str(kx_end) + " " + str(k_steps) + "\n"
"SET_KY\t\t\t" + str(ky_start) + " " + str(ky_end) + " " + str(k_steps) + "\n"
"SET_KZ\t\t\t" + str(kz_start) + " " + str(kz_end) + " " + str(k_steps) + "\n")
f.close()
if run_soh == True:
subprocess.call('cd soh_input ; mpiexec -np 24 sonOfHoward ' + soh_input, shell=True)
soh_output = str(source) + "." + str(timestep) + "." + str(filenum) + ".ft"
subprocess.call("mv " + soh_output + " " + str(current_working_directory) + "/soh_output/", shell=True)
t_end_peak = time.time()
time_peak = t_end_peak - t_start_peak
print "\nTime for peak " + str(i + 1) + " of " + str(len(pos_est)) + " = " + str(time_peak) + " s"
time_remaining = time_peak * ((int(len(pos_est))) - i)
print "Approximate time remaining = " + str(time_remaining) + " s\n"
#This part makes the log entry for the function.
f = open("log.pkfd", "a")
f.write("\n\nFunction get_peak_intensities called with input:\n"
"source = " + str(source) + "\n"
"initial_hkl_pos_est and pos_est =\n")
for i in range(len(pos_est)):
f.write(str(initial_hkl_pos_est[i]) + " sought at " + str(pos_est[i]) + "\n")
f.write("a_lattice = " + str(a_lattice) + "\n"
# "del_kx, del_ky, del_kz = " + str(del_kx) + ", " + str(del_ky) + " ," + str(del_kz) + "\n"
"k_steps = " + str(k_steps) + "\n"
"run_soh = " + str(run_soh) + "\n"
"\nFunction get_peak_intensities returned:\n"
"This function does not return any values. It produces fourier transforms of lammps .atom files.\n"
"The SoH inputs are stored at " + str(current_working_directory) + "/soh_inputs/\n"
"The SoH outputs are stored at " + str(current_working_directory) + "/soh_outputs/\n")
f.close()
t1 = time.time()
tt = t1 - t0
t = open('time.pkfd', 'a')
t.write("\nmod.get_peak_intensities took \t\t\t" + str(tt) + " s to complete.")
return;
##################################################################
# This function
def get_ln_intensity(pos_est, initial_hkl_pos_est, miller_pos_est, source, show_plot, timestep, a_lattice, del_kx, del_ky, del_kz, k_steps, compression_factor, make_plots):
import numpy as np
import os
import subprocess
import matplotlib.pyplot as plt
import time
t0 = time.time()
print "get_ln_intensity started..."
cwd = os.getcwd()
if make_plots == True:
subprocess.call("mkdir " + str(cwd) + "/plots_of_data/", shell = True)
simple_intensity_integrated = [0] * len(pos_est) # Stores a simple sum of intensities of each peak.
complex_intensity_integrated = [0] * len(pos_est) # Stores the sum of intensity*volumes for each peak.
gsqr_integrated = [0] * len(pos_est)
pos_integrated = [[0, 0, 0]] * len(pos_est)
kx = [0] * len(pos_est)
ky = [0] * len(pos_est)
kz = [0] * len(pos_est)
f = open("log.pkfd", "a")
f.write("\n\nFunction get_ln_intensity called with input:\n"
"pos_est = " + str(pos_est) + "\n"
"source = " + str(source) + "\n"
"timestep = " + str(timestep) + "\n"
"a_lattice = " + str(a_lattice) + "\n")
first_peak_dir = str(initial_hkl_pos_est[0][0]) + str(initial_hkl_pos_est[0][1]) + str(initial_hkl_pos_est[0][2])
first_soh_out = str(cwd) + "/soh_output/" + source + "." + str(timestep)+ "." + first_peak_dir+ ".ft" # Stores the name of the soh output file.
kx_coord, ky_coord, kz_coord, first_intensity = np.loadtxt(first_soh_out, skiprows=1, usecols=(0, 1, 2, 5), unpack=True)
#
points_in_bulk = 0
points_in_surface = 0
points_in_edge = 0
points_in_corner = 0
classification = ["bulk", "surface", "edge", "corner"]
classification_ind = 0
volume_fraction = list(first_intensity)
#h = open(intensity_datafile, 'w')
#h.write('#kx ky kz intensity_volume intensity classification')
print "Sorting k-space points into corners, edges, surfaces, and bulk..."
for j in range(len(first_intensity)):
# This finds all of the corner, edge, and surface intensity points.
if kx_coord[j] == min(kx_coord) or kx_coord[j] == max(kx_coord) or ky_coord[j] == min(ky_coord) or ky_coord[j] == max(ky_coord) or kz_coord[j] == min(kz_coord) or kz_coord[j] == max(kz_coord):
# This finds all corner and edge intensity points.
if kx_coord[j] == min(kx_coord) and ky_coord[j] == min(ky_coord) or kx_coord[j] == min(kx_coord) and kz_coord[j] == min(kz_coord) or ky_coord[j] == min(ky_coord) and kz_coord[j] == min(kz_coord) or kx_coord[j] == max(kx_coord) and kz_coord[j] == min(kz_coord) or kx_coord[j] == min(kx_coord) and kz_coord[j] == max(kz_coord) or kx_coord[j] == max(kx_coord) and kz_coord[j] == max(kz_coord) or ky_coord[j] == max(ky_coord) and kz_coord[j] == min(kz_coord) or ky_coord[j] == max(ky_coord) and kx_coord[j] == max(kx_coord) or kx_coord[j] == max(kx_coord) and ky_coord[j] == min(ky_coord) or ky_coord[j] == min(ky_coord) and kz_coord[j] == max(kz_coord) or ky_coord[j] == max(ky_coord) and kz_coord[j] == max(kz_coord) or kx_coord[j] == min(kx_coord) and ky_coord[j] == max(ky_coord):
# This finds all the corner intensity points.
if kx_coord[j] == min(kx_coord) and ky_coord[j] == min(ky_coord) and kz_coord[j] == min(kz_coord) or kx_coord[j] == min(kx_coord) and ky_coord[j] == min(ky_coord) and kz_coord[j] == max(kz_coord) or kx_coord[j] == max(kx_coord) and ky_coord[j] == min(ky_coord) and kz_coord[j] == min(kz_coord) or kx_coord[j] == min(kx_coord) and ky_coord[j] == max(ky_coord) and kz_coord[j] == min(kz_coord) or kx_coord[j] == max(kx_coord) and ky_coord[j] == max(ky_coord) and kz_coord[j] == min(kz_coord) or kx_coord[j] == max(kx_coord) and ky_coord[j] == min(ky_coord) and kz_coord[j] == max(kz_coord) or kx_coord[j] == min(kx_coord) and ky_coord[j] == max(ky_coord) and kz_coord[j] == max(kz_coord) or kx_coord[j] == max(kx_coord) and ky_coord[j] == max(ky_coord) and kz_coord[j] == max(kz_coord):
points_in_corner += 1
classification_ind = 3
volume_fraction[j] = 0.125
# All the edge points must go here.
else:
points_in_edge += 1
classification_ind = 2
volume_fraction[j] = 0.25
# All the surface points must go here.
else:
points_in_surface += 1
classification_ind = 1
volume_fraction[j] = 0.5
# All the bulk points must go here.
else:
points_in_bulk += 1
classification_ind = 0
volume_fraction[j] = 1.0
print "Finished sorting k-space points."
total_points = points_in_bulk + points_in_surface + points_in_edge + points_in_corner
expected_bulk = (k_steps - 2) ** 3
expected_surface = 6 * ((k_steps - 2) ** 2)
expected_edge = 12 * (k_steps - 2)
expected_corner = 8
expected_total = expected_corner + expected_edge + expected_surface + expected_bulk
print "\nPoints in bulk = " + str(points_in_bulk) + " Expected " + str(expected_bulk)
print "Points on a surface = " + str(points_in_surface) + " Expected " + str(expected_surface)
print "Points on an edge = " + str(points_in_edge) + " Expected " + str(expected_edge)
print "Point on a corner = " + str(points_in_corner) + " Expected " + str(expected_corner)
print "\nTotal points = " + str(total_points) + " Expected " + str(expected_total) + "\n"
dk_vol_var = 1.0/(k_steps - 1.0) # Division is computationally expensive so best to do this outside the loop, then multiply it in.
print "Integrating intensities..."
for i in range(len(pos_est)):
peak_dir = str(initial_hkl_pos_est[i][0]) + str(initial_hkl_pos_est[i][1]) + str(initial_hkl_pos_est[i][2])
print peak_dir
soh_out = str(cwd) + "/soh_output/" + source + "." + str(timestep)+ "." + peak_dir+ ".ft" # Stores the name of the soh output file.
kx_coord, ky_coord, kz_coord, tmp_intensity = np.loadtxt(soh_out, skiprows = 1, usecols = (0,1,2,5), unpack=True)
dk_vol = ( (max(kx_coord) - min(kx_coord)) * dk_vol_var) * ( (max(ky_coord) - min(ky_coord)) * dk_vol_var ) * ( (max(kz_coord) - min(kz_coord)) * dk_vol_var )
peak_position_ind = np.argmax(tmp_intensity)
tmp_gsqr_integrated = (kx_coord[peak_position_ind] * kx_coord[peak_position_ind]) + (ky_coord[peak_position_ind] * ky_coord[peak_position_ind]) + (kz_coord[peak_position_ind] * kz_coord[peak_position_ind])
gsqr_integrated[i] = tmp_gsqr_integrated * (2 * np.pi / a_lattice ) * (2 * np.pi / a_lattice) # This is because of how soh handles the data. It is also the reason I was initially getting more peaks than Will.
pos_integrated[i] = [ kx_coord[peak_position_ind], ky_coord[peak_position_ind], kz_coord[peak_position_ind] ]
subprocess.call("mkdir " + str(cwd) + "/plots_of_data/" + peak_dir, shell=True)
intensity_datafile = str(cwd) + "/plots_of_data/" + peak_dir + "/intensity_vs_position.dat"
simple_intensity_integrated[i] = sum(tmp_intensity)
intensity_volume = list(tmp_intensity)
for j in range(len(tmp_intensity)):
intensity_volume[j] = tmp_intensity[j] * dk_vol * volume_fraction[j]
complex_intensity_integrated[i] = sum(intensity_volume)
f.write("\nIntegrated intensity of " + str(miller_pos_est[i]) + " sought at " + str(pos_est[i]) + " = " + str(complex_intensity_integrated[i]) + "\n"
"Simple sum of intensities = " + str(simple_intensity_integrated[i]) )
if make_plots == True:
plot_directory_name = str(miller_pos_est[i][0]) + str(miller_pos_est[i][1]) + str(miller_pos_est[i][2])
subprocess.call("mkdir " + str(cwd) + "/plots_of_data/" + str(plot_directory_name), shell = True)
kx_for_lineout_plot = []
intensity_for_kx_lineout_plot = []
for j in range(len(kx_coord)):
if ky_coord[j] == ky_coord[peak_position_ind] and kz_coord[j] == kz_coord[peak_position_ind]:
kx_for_lineout_plot.append(kx_coord[j])
intensity_for_kx_lineout_plot.append(tmp_intensity[j])
ky_for_lineout_plot = []
intensity_for_ky_lineout_plot = []
for j in range(len(kx_coord)):
if kx_coord[j] == kx_coord[peak_position_ind] and kz_coord[j] == kz_coord[peak_position_ind]:
ky_for_lineout_plot.append(ky_coord[j])
intensity_for_ky_lineout_plot.append(tmp_intensity[j])
kz_for_lineout_plot = []
intensity_for_kz_lineout_plot = []
for j in range(len(kx_coord)):
if kx_coord[j] == kx_coord[peak_position_ind] and ky_coord[j] == ky_coord[peak_position_ind]:
kz_for_lineout_plot.append(kz_coord[j])
intensity_for_kz_lineout_plot.append(tmp_intensity[j])
k_value = [kx_for_lineout_plot, ky_for_lineout_plot, kz_for_lineout_plot]
intensity_value = [intensity_for_kx_lineout_plot, intensity_for_ky_lineout_plot, intensity_for_kz_lineout_plot]
lineout_direction = ["kx", "ky", "kz"]
for j in range(len(lineout_direction)):
d = open(str(cwd) + "/plots_of_data/" + str(plot_directory_name) + "/I_vs_" + lineout_direction[j] + ".dat", "w")
d.write("#" + lineout_direction[j] + " intensity\n")
for k in range(len(k_value[j])):
d.write(str(k_value[j][k]) + " " + str(intensity_value[j][k]) + "\n")
d.close()
if make_plots == True:
lineout_direction = ["kx", "ky", "kz"]
for i in range(len(pos_est)):
plot_directory_name = str(miller_pos_est[i][0]) + str(miller_pos_est[i][1]) + str(miller_pos_est[i][2])
for j in range(len(lineout_direction)):
datafile = cwd + "/plots_of_data/" + plot_directory_name + "/I_vs_" + lineout_direction[j] + ".dat"
plot_name = cwd + "/plots_of_data/" + plot_directory_name + "/" + lineout_direction[j] + "_lineout.png"
g = open(cwd + "/plots_of_data/" + plot_directory_name + "/" + lineout_direction[j] + "_gnuplot.in", "w")
g.write(
"set terminal png size 1600,1200 enhanced font 'Helvetica,20'"
"\nset output '" + str(plot_name) + "'"
"\nplot '" + datafile + "' using 1:2"
)
g.close()
if make_plots == True:
lineout_direction = ["kx", "ky", "kz"]
for i in range(len(pos_est)):
plot_directory_name = str(miller_pos_est[i][0]) + str(miller_pos_est[i][1]) + str(miller_pos_est[i][2])
for j in range(len(lineout_direction)):
gnuplot_input = cwd + "/plots_of_data/" + plot_directory_name + "/" + lineout_direction[j] + "_gnuplot.in"
subprocess.call("gnuplot " + gnuplot_input, shell=True)
# This section works on the complex_integrated_intensity.
complex_intensity_integrated_max_ind = np.argmax(complex_intensity_integrated)
ln_complex_intensity_integrated = np.log(complex_intensity_integrated)
ln_norm_complex_intensity_integrated = np.log(complex_intensity_integrated/max(complex_intensity_integrated))
ln_norm_complex_intensity_integrated.tolist()
g = open("ln_complex_intensity_vs_g_squared.dat", "w")
g.write("ln_complex_intensity g_squared h k l\n")
for i in range(len(pos_est)):
g.write(str(ln_complex_intensity_integrated[i]) + " " + str(gsqr_integrated[i]) + " " + str(miller_pos_est[i][0]) + " " + str(miller_pos_est[i][1]) + " " + str(miller_pos_est[i][2]) + "\n")
g.close()
# This section works on the simple_integrated_intensity.
simple_intensity_integrated_max_ind = np.argmax(simple_intensity_integrated)
ln_simple_intensity_integrated = np.log(simple_intensity_integrated)
ln_norm_simple_intensity_integrated = np.log(simple_intensity_integrated/max(simple_intensity_integrated))
ln_norm_simple_intensity_integrated.tolist()
g = open("ln_simple_intensity_vs_g_squared.dat", "w")
g.write("ln_simple_intensity g_squared h k l\n")
for i in range(len(pos_est)):
g.write(str(ln_simple_intensity_integrated[i]) + " " + str(gsqr_integrated[i]) + " " + str(miller_pos_est[i][0]) + " " + str(miller_pos_est[i][1]) + " " + str(miller_pos_est[i][2]) + "\n")
g.close()
#This part makes the final log entry for the function.
f.write("\nFunction get_ln_intensity returned:\n"
"This function obtains the integrated intensity and then returns ln of the intensity."
"\nIt also returns the gsqr values of the estimated peak centres.")
f.close()
t1 = time.time()
tt = t1 - t0
t = open('time.pkfd', 'a')
t.write("\nmod.get_ln_intensity took \t\t\t" + str(tt) + " s to complete.")
return pos_integrated, gsqr_integrated, ln_complex_intensity_integrated, ln_norm_complex_intensity_integrated, ln_simple_intensity_integrated, ln_norm_simple_intensity_integrated
################################################################
def get_slope_ln_intensity_vs_gsqr(gsqr, ln_intensity):
import numpy as np
import time
t0 = time.time()
print "\nget_slope_ln_intensity_vs_gsqr started..."
slope_ln_intensity_vs_gsqr, constant_ln_intensity_vs_gsqr = np.polyfit(gsqr, ln_intensity, 1)
print "\n\nThe slope of ln(I) vs. G^2 = " + str(slope_ln_intensity_vs_gsqr)
print "The line constant of ln(I) vs. G^2 = " + str(constant_ln_intensity_vs_gsqr)
# The log entry for the function.
f= open("log.pkfd", "a")
f.write("\n\nFunction get_slope_ln_intensity called with input:\n"
"ln_intensity, gsqr = the values written above for each peak.\n"
"\nFunction get_slope_ln_intensity_vs_gsqr returned:\n"
"slope_ln_intensity_vs_gsqr = " + str(slope_ln_intensity_vs_gsqr) + "\n"
"constant_ln_intensity_vs_gsqr = " + str(constant_ln_intensity_vs_gsqr) + "\n"
)
f.close()
t1 = time.time()
tt = t1 - t0
time_elapsed = time.localtime()
t = open('time.pkfd', 'a')
t.write("\nmod.get_slope_ln_intensity took \t\t" + str(tt) + " s to complete.")
return slope_ln_intensity_vs_gsqr, constant_ln_intensity_vs_gsqr
################################################################
def calc_temperature_xrd(slope_ln_intensity_vs_gsqr, constant_ln_intensity_vs_gsqr, gruneisen_uncompressed, debye_temperature_uncompressed, a_lattice, compression_factor, mass, pos, gsqr, uncompressed_pos_est, uncompressed_gsqr_est, plot_name, show_plot, ln_intensity, md_temperature_3d, md_temperature_2d):
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as codata
from scipy.integrate import quad
import time
t0 = time.time()
print "\ncalc_temperature_xrd started..."
compressed_volume = (a_lattice ** 3)/( compression_factor[0] * compression_factor[1] * compression_factor[2])
gruneisen_over_volume = gruneisen_uncompressed/((a_lattice ** 3) * (10 ** -30)) #This is the model that says gruneisen/V = constant.
#Checked with google calculator.
# This function is used in the Pandya and Ramakrishnan models.
def gruneisen_over_volume_func(integrable_v, initial_gruneisen, q_power):
return (1.0/(integrable_v)) * initial_gruneisen * ( ( (integrable_v)/((a_lattice * 1e-10) ** 3) ) ** q_power )
# This function is used with Walsh's model.
def walsh_gruneisen_over_volume_func(integrable_v, initial_gruneisen):
return (1.0/(integrable_v)) * ( initial_gruneisen + ( -3.296 * ( (((a_lattice * 1e-10)** 3)/integrable_v) - 1.0) ) + ( 10.493 * ( ( (((a_lattice * 1e-10)** 3)/integrable_v) - 1.0) ** 2 ) ) + ( -19.264 * ( ( (((a_lattice * 1e-10)** 3)/integrable_v) - 1.0) ) ** 3) )
# The function is integrated between the uncompressed volume and the compressed volume.
integrated_gruneisen_1, err_integrated_gruneisen_1 = quad(gruneisen_over_volume_func, ((a_lattice * 1e-10)** 3), compressed_volume * 1e-30, args=(1.93, 1.085)) #Pandya
#Checked with Wolfram Alpha
integrated_gruneisen_2, err_integrated_gruneisen_2 = quad(gruneisen_over_volume_func, ((a_lattice * 1e-10)** 3), compressed_volume * 1e-30, args=(2.008, 1.33)) #Ramakrishnan
#Checked with Wolfram Alpha
#Again the function is integrated between the uncompressed volume and the compressed volume.
integrated_gruneisen_3, err_integrated_gruneisen_3 = quad(walsh_gruneisen_over_volume_func, ((a_lattice * 1e-10)** 3), compressed_volume * 1e-30, args=(2.04)) #Walsh
#Checked with integral-calculator.com
# Here the Debye temperature at the compressed volume is calculated for each model.
#This list will contain the Debye temperature at the given compression.
estimated_debye_temperature = [0] * 4
estimated_debye_temperature[0] = debye_temperature_uncompressed * np.exp(-gruneisen_over_volume * (10 ** -30) * (compressed_volume - (a_lattice ** 3))) #Uses the equation from Murphy et. al. 2008 which. Note that this is dependent on the material_debye_temperature, i.e the Debye temperature of the uncompressed crystal.
# Checked with google calculator.
estimated_debye_temperature[1] = debye_temperature_uncompressed * np.exp(-integrated_gruneisen_1)
estimated_debye_temperature[2] = debye_temperature_uncompressed * np.exp(-integrated_gruneisen_2)
estimated_debye_temperature[3] = debye_temperature_uncompressed * np.exp(-integrated_gruneisen_3)
# Now we use the estimated Debye temperatures to calculate the temperature of the sample predicted by each model, using Debye-Waller.
temperature_est = [0] * 4
temperature_normalisation_factor = ( mass * (10 ** -3) * codata.value("Boltzmann constant") * 4 * np.pi * np.pi) / ((10 ** 20) * 3 * codata.value("Planck constant") * codata.value("Planck constant") * codata.value("Avogadro constant")) # Note that the factor of 10^20 is because our G^2 is in 1/Angstroms^2, so we convert it to meters here. This formulation is from <NAME>'s PhD thesis.
#Checked with Google calculator
for i in range(len(estimated_debye_temperature)):
temperature_est[i] = - (estimated_debye_temperature[i] ** 2) * slope_ln_intensity_vs_gsqr * temperature_normalisation_factor
# The following is all about plotting the ln(I) vs G^2 with a line fit, and an ideal line fit (as in, the line required to obtain the correct temperature).
def line(x, m, c):
return m*x + c
line_point_x1 = 0
line_point_x2 = max(gsqr)
line_point_y1 = line(line_point_x1, slope_ln_intensity_vs_gsqr, constant_ln_intensity_vs_gsqr)
line_point_y2 = line(line_point_x2, slope_ln_intensity_vs_gsqr, constant_ln_intensity_vs_gsqr)
line_points_x = [line_point_x1, line_point_x2]
line_points_y = [line_point_y1, line_point_y2]
# This part calculates the slope required to perfectly calculate the temperature. It then creates the points necessary to plot this slope on the plot.
ideal_slope_constant_model = - md_temperature_2d/( (estimated_debye_temperature[0] ** 2) * temperature_normalisation_factor)
ideal_line_point_y1 = line(line_point_x1, ideal_slope_constant_model, constant_ln_intensity_vs_gsqr)
ideal_line_point_y2 = line(line_point_x2, ideal_slope_constant_model, constant_ln_intensity_vs_gsqr)
ideal_line_points_x = [line_point_x1, line_point_x2]
ideal_line_points_y = [line_point_y1, line_point_y2]
# This part calculates an approximation of the upper and lower bounds of the measured temperature from the peaks.
gsqr_types = set(uncompressed_gsqr_est)
minimum_ln_intensity = []
minimum_gsqr = []
minimum_actual_gsqr = [] # These values of actual_gsqr are in units of A^-2. The above are not.
maximum_ln_intensity = []
maximum_gsqr = []
maximum_actual_gsqr = []
for e in gsqr_types:
intensities_for_each_gsqr_est = []
actual_gsqrs_for_each_gsqr_est = []
for i in range(len(uncompressed_gsqr_est)):
if uncompressed_gsqr_est[i] == e:
intensities_for_each_gsqr_est.append(ln_intensity[i])
actual_gsqrs_for_each_gsqr_est.append(gsqr[i])
minimum_ln_intensity.append(min(intensities_for_each_gsqr_est))
maximum_ln_intensity.append(max(intensities_for_each_gsqr_est))
index_of_minimum_intensity = intensities_for_each_gsqr_est.index(min(intensities_for_each_gsqr_est))
index_of_maximum_intensity = intensities_for_each_gsqr_est.index(max(intensities_for_each_gsqr_est))
minimum_actual_gsqr.append(actual_gsqrs_for_each_gsqr_est[index_of_minimum_intensity])
maximum_actual_gsqr.append(actual_gsqrs_for_each_gsqr_est[index_of_maximum_intensity])
slope_min_boundary, constant_min_boundary = np.polyfit(minimum_actual_gsqr, minimum_ln_intensity, 1)
slope_max_boundary, constant_max_boundary = np.polyfit(maximum_actual_gsqr, maximum_ln_intensity, 1)
# At this point we swap notation of the maximum to minimum and vice versa. This is because the upper boundary of the slope corresponds to the lower boundary of the temperature estimate and vice versa.
maximum_temperature_est = [0] * len(estimated_debye_temperature)
for i in range(len(estimated_debye_temperature)):
maximum_temperature_est[i] = - (estimated_debye_temperature[i] ** 2) * slope_min_boundary * temperature_normalisation_factor
minimum_temperature_est = [0] * len(estimated_debye_temperature)
for i in range(len(estimated_debye_temperature)):
minimum_temperature_est[i] = - (estimated_debye_temperature[i] ** 2) * slope_max_boundary * temperature_normalisation_factor
# This part finds an average of the temperatures and standard deviation.
central_temperature_mean = np.mean(temperature_est)
central_temperature_stdev = np.std(temperature_est)
minimum_temperature_mean = np.mean(minimum_temperature_est)
maximum_temperature_mean = np.mean(maximum_temperature_est)
temperature_error = np.mean([abs(central_temperature_mean - minimum_temperature_mean), abs(central_temperature_mean - maximum_temperature_mean)])
# This part creates the plot.
plt.plot(gsqr, ln_intensity, 'ko')
plt.plot(line_points_x, line_points_y, 'k')
plt.plot(ideal_line_points_x, ideal_line_points_y, 'r')
for i in range(len(gsqr)):
label = "(" + str(uncompressed_pos_est[i][0]) + str(uncompressed_pos_est[i][1]) + str(uncompressed_pos_est[i][2]) + ")"
plt.annotate(label, xy = (gsqr[i], ln_intensity[i]) )
plt.xlabel('|$G^{2}$| / A$^{-2}$')
plt.ylabel('$Ln(I/I_{0})$ / arb.')
plt.title('$Ln(I/I_{0})$ vs. $G^{2}$')
plt.xticks()
plt.yticks()
plt.savefig(plot_name, bbox_inches='tight')
if show_plot == True:
plt.show()
plt.close()
compression_ratio_x = 1/compression_factor[0]
compression_ratio_y = 1/compression_factor[1]
compression_ratio_z = 1/compression_factor[2]
compression_ratio = compressed_volume/(a_lattice ** 3)
# This part prints out our temperature calculations.
print ("\nVolume compression ratio = " + str(compression_ratio) + "\n"
"Compression ratio in x = " + str(compression_ratio_x) + "\n"
"Compression ratio in y = " + str(compression_ratio_y) + "\n"
"Compression ratio in z = " + str(compression_ratio_z) + "\n"
"\nThe best fit slope gives the following measurements of temperarature for each Gruneisen model:\n"
"Gruneisen/vol constant -> T = " + str(temperature_est[0]) + "\n"
"Pandya -> T = " + str(temperature_est[1]) + "\n"
"Ramakrishnan -> T = " + str(temperature_est[2]) + "\n"
"Walsh -> T = " + str(temperature_est[3]) + "\n"
"Ideal slope for Gruneisen/vol constant model (in order to predict correct temperature) = " + str(ideal_slope_constant_model) + "\n"
"\nA lower temperature boundary is given by the slope of the upper-most peaks :\n"
"Gruneisen/vol constant -> T = " + str(minimum_temperature_est[0]) + "\n"
"Pandya -> T = " + str(minimum_temperature_est[1]) + "\n"
"Ramakrishnan -> T = " + str(minimum_temperature_est[2]) + "\n"
"Walsh -> T = " + str(minimum_temperature_est[3]) + "\n"
"\nAn upper temperature boundary is given by the slope of the lower-most peaks:\n"
"Gruneisen/vol constant -> T = " + str(maximum_temperature_est[0]) + "\n"
"Pandya -> T = " + str(maximum_temperature_est[1]) + "\n"
"Ramakrishnan -> T = " + str(maximum_temperature_est[2]) + "\n"
"Walsh -> T = " + str(maximum_temperature_est[3]) + "\n"
"\nThe 3D MD temperature = " + str(md_temperature_3d) + "\n"
"The 2D MD temperature = " + str(md_temperature_2d) + "\n"
"From the best fit slope, the mean temperature is T = " + str(central_temperature_mean) + " +/- " + str(central_temperature_stdev) + " K."
"\nIf we consider the boundary temperatures as well (something that might look like an experiment), we can get T = " + str(central_temperature_mean) + " +/- " + str(temperature_error) + " K.")
#This part makes the log entry for the function.
f = open("log.pkfd", "a")
f.write("\n\nFunction calc_temperature_xrd called with input:\n"
"slope_ln_intensity_vs_gsqr = " + str(slope_ln_intensity_vs_gsqr) + "\n"
"constant_ln_intensity_vs_gsqr = " + str(constant_ln_intensity_vs_gsqr) + "\n"
"gruneisen_uncompressed = " + str(gruneisen_uncompressed) + "\n"
"debye_temperature_uncompressed = " + str(debye_temperature_uncompressed) + "\n"
"a_lattice = " + str(a_lattice) + "\n"
"compressed_volume = " + str(compressed_volume) + "\n"
"mass = " + str(mass) + "\n"
"pos = " + str(pos) + "\n"
"gsqr = " + str(gsqr) + "\n"
"plot_name = " + str(plot_name) + "\n"
"show_plot = " + str(show_plot) + "\n"
"ln_intensity = " + str(ln_intensity) + "\n"
"md_temperature_3d = " + str(md_temperature_3d) + "\n"
"md_temperature_2d = " + str(md_temperature_2d) + "\n"
"\nFunction calc_temperature_xrd returned:\n"
"Compression ratio = " + str(compression_ratio) + "\n"
"Ideal slope (required to exactly calculate T) = " + str(ideal_slope_constant_model) + "\n"
"\nThe best fit slope gives the following measurements of temperarature for each Gruneisen model:\n"
"Gruneisen/vol constant -> T = " + str(temperature_est[0]) + "\n"
"Pandya -> T = " + str(temperature_est[1]) + "\n"
"Ramakrishnan -> T = " + str(temperature_est[2]) + "\n"
"Walsh -> T = " + str(temperature_est[3]) + "\n"
"Ideal slope for Gruneisen/vol constant model (in order to predict correct temperature) = " + str(ideal_slope_constant_model) + "\n"
"\nA lower temperature boundary is given by the slope of the upper-most peaks :\n"
"Gruneisen/vol constant -> T = " + str(minimum_temperature_est[0]) + "\n"
"Pandya -> T = " + str(minimum_temperature_est[1]) + "\n"
"Ramakrishnan -> T = " + str(minimum_temperature_est[2]) + "\n"
"Walsh -> T = " + str(minimum_temperature_est[3]) + "\n"
"\nAn upper temperature boundary is given by the slope of the lower-most peaks:\n"
"Gruneisen/vol constant -> T = " + str(maximum_temperature_est[0]) + "\n"
"Pandya -> T = " + str(maximum_temperature_est[1]) + "\n"
"Ramakrishnan -> T = " + str(maximum_temperature_est[2]) + "\n"
"Walsh -> T = " + str(maximum_temperature_est[3]) + "\n"
"\nFrom the best fit slope, the mean temperature is T = " + str(central_temperature_mean) + " +/- " + str(central_temperature_stdev) + " K."
"\nIf we consider the boundary temperatures as well (something that might look like an experiment), we can get T = " + str(central_temperature_mean) + " +/- " + str(temperature_error) + " K.")
f.close()
t1 = time.time()
tt = t1 - t0
time_elapsed = time.localtime()
t = open('time.pkfd', 'a')
t.write("\nmod.calc_temperature_xrd took \t\t\t" + str(tt) + " s to complete.")
return temperature_est, central_temperature_mean
#########################################################################
def calc_debye_temperature(slope_ln_intensity_vs_gsqr, mass, md_temperature):
import scipy.constants as codata
import numpy as np
import time
t0 = time.clock()
debye_normalisation_factor = (10 ** 20) * md_temperature * 3 * codata.value("Planck constant") * codata.value("Planck constant") * codata.value("Avogadro constant") / (mass * (10 ** -3) * codata.value("Boltzmann constant") * 4 * np.pi * np.pi)
debye_temperature = np.sqrt(abs(debye_normalisation_factor/slope_ln_intensity_vs_gsqr))
print "\n\nCalculated Debye temperature = " + str(debye_temperature) + " K\n"
f = open("log.pkfd", "a")
f.write("\n\nFunction calc_debye_temperature called with input:\n"
"slope_ln_intensity_vs_gsqr = " + str(slope_ln_intensity_vs_gsqr) + "\n"
"mass = " + str( mass) + "\n"
"md_temperature = " + str(md_temperature) + "\n"
"\nFunction calc_debye_temp returned:\n"
"debye_temperature = " + str(debye_temperature) + "\n"
)
f.close()
t1 = time.clock()
tt = t1 - t0
t = open('time.pkfd', 'a')
t.write("\nmod.calc_debye_temperature took \t\t" + str(tt) + " s to complete.")
return debye_temperature
###########################################################################
def profile_peaks(source, timestep, initial_hkl_pos_est, make_plots):
import time
import numpy as np
import os
import subprocess
t0 = time.time()
# This function profiles each peak by plotting each intensity point vs
# distance from the central position.
cwd = os.getcwd()
intensity = [0] * len(initial_hkl_pos_est)
k_diff_abs = [0] * len(initial_hkl_pos_est)
for i in range(len(initial_hkl_pos_est)):
peak_dir = str(initial_hkl_pos_est[i][0]) + str(initial_hkl_pos_est[i][1]) + str(initial_hkl_pos_est[i][2])
datafile = str(cwd) + "/soh_output/" + source + "." + str(timestep) + "." + peak_dir + ".ft"
kx, ky, kz, intensity[i] = np.loadtxt(datafile, skiprows=1, usecols=(0,1,2,5), unpack=True)
k_diff_abs[i] = [0] * len(intensity[i])
peak_position = np.argmax(intensity[i])
gsqr_centre = (kx[peak_position] ** 2) + (ky[peak_position] ** 2) + (kz[peak_position] ** 2)
for j in range(len(intensity[i])):
gsqr = (kx[j] ** 2) + (ky[j] ** 2) + (kz[j] ** 2)
k_diff_abs[i][j] = np.sqrt(abs(gsqr - gsqr_centre))
if make_plots == True:
rm_command = "rm -r " + cwd + "/peak_histograms"
subprocess.call(rm_command, shell=True)
mkdir_command = "mkdir " + cwd + "/peak_histograms"
subprocess.call(mkdir_command, shell=True)
for i in range(len(initial_hkl_pos_est)):
peak_dir = str(initial_hkl_pos_est[i][0]) + str(initial_hkl_pos_est[i][1]) + str(initial_hkl_pos_est[i][2])
location = str(cwd) + "/peak_histograms/" + peak_dir
mkdir_command_2 = "mkdir " + location
subprocess.call(mkdir_command_2, shell=True)
dat_filename = location + "/histogram.dat"
h = open(dat_filename, "w")
h.write("#k_differential intensity")
for j in range(len(intensity[i])):
h.write("\n" + str(k_diff_abs[i][j]) + " " + str(intensity[i][j]) + "")
h.close()
in_filename = location + "/histogram_gnuplot.in"
plot_name = "histogram_" + peak_dir + ".png"
g = open(in_filename, "w")
g.write(
"set terminal png size 1600,1200 enhanced font 'Helvetica,20'"
"\nset output '" + str(plot_name) + "'"
"\nplot '" + dat_filename + "' using 1:2"
)
g.close()
if make_plots == True:
for i in range(len(initial_hkl_pos_est)):
peak_dir = str(initial_hkl_pos_est[i][0]) + str(initial_hkl_pos_est[i][1]) + str(initial_hkl_pos_est[i][2])
location = str(cwd) + "/peak_histograms/" + peak_dir
in_filename = location + "/histogram_gnuplot.in"
plot_name = "histogram_" + peak_dir + ".png"
subprocess.call("gnuplot < " + str(in_filename), shell=True)
mv_command = "mv " + plot_name + " " + location
subprocess.call(mv_command, shell=True)
tf = time.time()
tt = tf - t0
t = open('time.pkfd', 'a')
t.write("\nmod.profile_peaks took \t\t" + str(tt) + " s to complete.")
return;
############################################################################
# This function gives the final message at the end of a run.
def checkout(xrd_temperatures, xrd_temperature_labels, md_temperatures, md_temperature_labels):
f = open("log.pkfd", "a")
print "\n\n#########################\n\npeakfinder.py finished\n\n#########################"
f.write("\n\n#########################\n\npeakfinder.py finished\n\n#########################")
print "\n\nThe MD temperatures are:\n"
f.write("\n\nThe MD temperatures are:\n")
for i in range(len(md_temperatures)):
print md_temperature_labels[i] + "\t\t= " + str(md_temperatures[i]) + " K"
f.write(md_temperature_labels[i] + "\t\t= " + str(md_temperatures[i]) + " K\n")
print "\nThe estimated x-ray diffraction temperatures are:\n"
f.write("\nThe estimated x-ray diffraction temperatures are:\n")
for i in range(len(xrd_temperatures)):
percent_off = 100.0 * (abs(xrd_temperatures[i] - md_temperatures[0]))/md_temperatures[0]
print xrd_temperature_labels[i] + "\t= " + str(xrd_temperatures[i]) + " K \t\t" + str(percent_off) + " % from the 2D MD temperature."
f.write(xrd_temperature_labels[i] + "\t= " + str(xrd_temperatures[i]) + " K\n" + str(percent_off) + " % from the 2D MD temperature.")
return;
<file_sep>/development/fit_to_peak_edges.py
def run(run_soh, current_pos_est, raw_pos_est, source_name, timestep, undershoot, overshoot, source, mass, a_lattice, k_steps, num_cores):
import units as un
import copy
import os
print "Fitting to peak edges..."
direction_str = ["kx", "ky", "kz"]
k_start_accurate = copy.deepcopy(current_pos_est)
k_stop_accurate = copy.deepcopy(current_pos_est)
for i, pos in enumerate(raw_pos_est):
peak_str = un.make_peak_str(pos)
for j, direction in enumerate(direction_str):
k_start, k_stop = un.calc_peak_edge_k_start_stop(current_pos_est[i], undershoot[j], overshoot[j])
soh_location = un.determine_soh_edge_finding_input_file_location(direction, peak_str)
un.write_soh_input_1DFT(source_name, soh_location, peak_str + "_find_edges_" + direction, mass, a_lattice, k_start, k_stop, k_steps)
if run_soh is True:
for i, pos in enumerate(raw_pos_est):
peak_str = un.make_peak_str(pos)
for j, direction in enumerate(direction_str):
soh_location = un.determine_soh_edge_finding_input_file_location(direction, peak_str)
un.run_soh(soh_location, num_cores)
un.move_soh_rough_output_to_peak_folder(peak_str, peak_str + "_find_edges_" + direction, source_name, timestep)
for i, pos in enumerate(raw_pos_est):
peak_str = un.make_peak_str(pos)
for j, direction in enumerate(direction_str):
soh_location = un.determine_soh_edge_finding_output_file_location(peak_str, direction, source, timestep)
soh_output = un.read_from_soh_output(soh_location)
un.plot_pygnuplot(soh_output[j], soh_output[3], os.getcwd() + "/data/" + peak_str + "/" + direction + "_lineout.png", os.getcwd() + "/data/" + peak_str + "/" + direction + "_lineout.dat")
k_start_accurate[i][j], k_stop_accurate[i][j] = un.find_k_start_stop_for_peak_from_first_minima(soh_output[j], soh_output[3])
return k_start_accurate, k_stop_accurate<file_sep>/oldddd/newmodule.py
#####################################################
#
# Header last edited: 22/08/17
# This file contains the nuts and bolts of the new
# code. Each large process undertaken by peakfinder
# is described by a "brick" function. These
# bricks functions are made up of smaller functions called a
# "unit"; units are accompanied by unit
# tests, found in the testmodule.py file.
#
# Unit functions are in the upper section of this
# file. Brick functions are in the lower section.
#
#####################################################
# Units.
def LoadData(filename, columnnumbers):
from numpy import loadtxt
columnnumbers = tuple(columnnumbers)
data = loadtxt(filename, usecols = columnnumbers, skiprows = 1, unpack = True)
return data
def FindPeakCentre(data, intensityindex, kxkykzindex):
from numpy import argmax
intensity = data[intensityindex]
peakintensityindex = argmax(intensity)
kx = data[kxkykzindex[0]]
ky = data[kxkykzindex[1]]
kz = data[kxkykzindex[2]]
peakcentre = [kx[peakintensityindex], ky[peakintensityindex], kz[peakintensityindex]]
return peakcentre;
def FindOrthogonalLineout(data, intensityindex, kxkykzindex, directionindex, point):
# directionindex = 0 for kx, = 1 for ky, and = 2 for kz.
kindex = kxkykzindex[directionindex]
kdirectiondata = data[kindex]
otherdirectionsindex = list(kxkykzindex)
otherdirectionsindex.remove(kindex)
orthogonallineout = []
for i in range(len(kdirectiondata)):
if data[otherdirectionsindex[0]][i] == point[otherdirectionsindex[0]] and data[otherdirectionsindex[1]][i] == point[otherdirectionsindex[1]]:
lineoutpoint = [0,0,0,0]
lineoutpoint[otherdirectionsindex[0]] = data[otherdirectionsindex[0]][i]
lineoutpoint[otherdirectionsindex[1]] = data[otherdirectionsindex[1]][i]
lineoutpoint[kindex] = kdirectiondata[i]
lineoutpoint[intensityindex] = data[intensityindex][i]
orthogonallineout.append(lineoutpoint)
return orthogonallineout
def FindIntensityMinima1D(data, kxkykzindex, intensityindex):
import numpy as np
intensity = [0] * len(data)
for i in range(len(data)):
intensity[i] = data[i][intensityindex]
maxintensityindex = np.argmax(intensity)
maxpoint = data[maxintensityindex]
minima = [0,0]
imax = 1 + len(data)/2
tempintensity = [0] * imax
for i in range(imax):
tempintensity[i] = intensity[i]
minimumindex = np.argmin(tempintensity)
minima[0] = data[minimumindex]
tempintensity = [0] * imax
for i in range(imax):
tempintensity[i] = intensity[imax - 1 + i]
minimumindex = np.argmin(tempintensity)
minima[1] = data[imax - 1 + minimumindex]
return minima
def BuildIntensityVolume(points, kxkykzindex, intensityindex):
############ Work in progress. #############
for i in range(len(kxkykzindex)):
coordinate1 = points[kxkykzindex[i]][0]
coordinate2 = points[kxkykzindex[i]][1]
intensity1 = points
return
def GetCompressedGruneisenParameterModel1(uncompressedgruneisenparameter, volumetriccompressionratio):
# Documentation edited: 22/08/17
# Models the Gruneisen parameter under the
# asumption that:
#
# Gruneisen_parameter/Volume = constant
#
# Since Gruneisen0/V0 = Gruneisencompressed /
# (V0 * volumetric_compression_ratio), the
# calculation is simplified to:
# Gruneisencompressed = Gruneisen0 *
# volumetric_compression_ratio.
#
# This function is only configured for cubic
# lattices.
#
# Inputs:
#
# uncompressedgruneisenparameter - The Gruneisen
# parameter of the material at ambient
# conditions.
# volumetriccompressionratio = V/V0. This value
# should be less than 1.
#
#
# Outputs:
# compressedgruneisenparameter - the Gruneisen
# parameter at this compression, according to
# this model.
compressedgruneisenparameter = uncompressedgruneisenparameter * volumetriccompressionratio
return compressedgruneisenparameter
def GetDebyeTemperatureFromGruneisenParameter():
############ Work in progress. #############
return
#######################################
# Bricks.
def RemoveTDS():
#LoadData()
#FindPeakCentre()
#FindOrthogonalVector()
#FindOrthogonalVector()
#FindOrthogonalVector()
#FindMinimumIntensity1D()
#FindMinimumIntensity1D()
#FindMinimumIntensity1D()
#Build3DIntensityVolume()
#SubtractIntensityVolumeFromIntensity()
return;
<file_sep>/peakrunner/create_copy_location_list.py
def run(root_directory, subdirectory_prefix, subdirectory_suffix, subdirectory_variable):
copy_location_list = []
for current_subdirectory_variable in subdirectory_variable:
location = root_directory + "/" + subdirectory_prefix + current_subdirectory_variable + subdirectory_suffix + "/peakfinder"
copy_location_list.append(location)
return copy_location_list
<file_sep>/src/units.py
import logging as log
def get_time():
import time
localtime = time.localtime()
t0 = time.time()
return t0, localtime
def build_all_k_values(gsqr_max, negative_k):
# Builds all possible combinations of integers, up to the k-value given by sqrt(gsqr_max), rounded up. It only includes combinations that give gsqr < gsqr_max. Bool negative_k can be used to include/exclude negative k-values.
import numpy as np
import math
k_max = int(math.ceil(np.sqrt(gsqr_max)))
if negative_k == True:
k_values = range(-k_max, k_max + 1)
elif negative_k == False:
k_values = range(k_max + 1)
else:
print "Incorrect entry:\nnegative_k must be of bool type.\n"
exit()
pos = []
for i in k_values:
for j in k_values:
for k in k_values:
if i**2 + j**2 + k**2 <= gsqr_max:
pos.append([i, j, k])
else:
pass
log.debug(pos)
return pos
def remove_fcc_forbidden_reflections(old_pos):
# Removes pos_est values that are forbidden in fcc crystals. Diffraction is allowed at positions where all the k-values are all-even or all-odd.
new_pos = []
for i in range(len(old_pos)):
if old_pos[i][0] % 2 == 1 and old_pos[i][1] % 2 == 1 and old_pos[i][2] % 2 == 1:
new_pos.append(old_pos[i])
elif old_pos[i][0] % 2 == 0 and old_pos[i][1] % 2 == 0 and old_pos[i][2] % 2 == 0:
new_pos.append(old_pos[i])
else:
pass
log.debug(new_pos)
return new_pos
def remove_000(old_pos):
# Removes [0, 0, 0] from pos.
new_pos = []
for i in old_pos:
if i != [0, 0, 0]:
new_pos.append(i)
log.debug(new_pos)
return new_pos
def get_gsqr_values(pos):
# Calculates the value of G^2 for each position in pos.
gsqr = []
for i in pos:
current_gsqr = (i[0] ** 2) + (i[1] ** 2) + (i[2] ** 2)
gsqr.append(current_gsqr)
log.debug(gsqr)
return gsqr
def build_datafile_structure(pos):
import os
peak_str = []
for i in pos:
current_peak_str = str(i[0]) + str(i[1]) + str(i[2])
peak_str.append(current_peak_str)
if not os.path.exists("data/" + current_peak_str):
os.makedirs("data/" + current_peak_str)
log.debug(peak_str)
return peak_str
def calc_k_offset_with_N_atoms(N_atoms):
offset = [1.0/N_atoms[0], 1.0/N_atoms[1], 1.0/N_atoms[2]]
log.debug(offset)
return offset
def convert_to_per_angstrom(element, a_lattice):
import numpy as np
element = np.asarray(element)
converted_element = element * ( (2 * np.pi) / a_lattice )
converted_element = list(converted_element)
log.debug(converted_element)
return converted_element
def make_peak_str(i):
peak_str = str(i[0]) + str(i[1]) + str(i[2])
log.debug(peak_str)
return peak_str
def find_k_start(pos_element, offset):
k_start = [pos_element[0] - offset[0], pos_element[1] - offset[1], pos_element[2] - offset[2]]
log.debug(k_start)
return k_start
def find_k_stop(pos_element, offset):
k_stop = [pos_element[0] + offset[0], pos_element[1] + offset[1], pos_element[2] + offset[2]]
log.debug(k_stop)
return k_stop
def determine_soh_input_file_location(peak_str):
import os
cwd = os.getcwd()
input_file_location = cwd + "/data/" + peak_str + "/" + peak_str + ".in"
log.debug(input_file_location)
return input_file_location
def write_soh_input_3DFT(source_name, file_destination, peak_str, mass, a_lattice, k_steps, k_start, k_stop):
import os
cwd = os.getcwd()
source_location = cwd + "/lammps/" + source_name
string_to_write = ("VERBOSE 0"
+ "\nFILE_TYPE lammps-multi"
+ "\nDATA_FILE " + str(source_location)
+ "\nAPPEND_FILE_NAME " + str(peak_str)
+ "\nPLOT_OUTPUT pdf"
+ "\nCOORDS_SCALED"
+ "\nSET_MASS " + str(mass)
+ "\nSET_A_CELL " + str(a_lattice)
+ "\nCALC_3D_FT"
+ "\nSET_KX " + str(k_start[0]) + " " + str(k_stop[0]) + " " + str(k_steps)
+ "\nSET_KY " + str(k_start[1]) + " " + str(k_stop[1]) + " " + str(k_steps)
+ "\nSET_KZ " + str(k_start[2]) + " " + str(k_stop[2]) + " " + str(k_steps)
+ "\n"
)
f = open(file_destination, "w")
f.write(string_to_write)
f.close()
log.debug(string_to_write)
return string_to_write
def run_soh(input_file_location, num_cores):
import subprocess
shell_command = "mpiexec -np " + str(num_cores) + " sonOfHoward " + input_file_location + " >/dev/null"
subprocess.call(shell_command, shell=True)
log.debug("sonOfHoward called using input file at " + input_file_location)
return
def move_soh_output_to_peak_folder(peak_str, source_name, timestep):
import shutil
origin = "./lammps/" + source_name + "." + timestep + "." + peak_str + ".ft"
destination = "./data/" + peak_str + "/"
shutil.move(origin, destination)
log.debug(origin + " moved to " + destination)
return
def move_plot_output_to_peak_folder(direction, peak_str):
import shutil
origin = direction + ".png"
destination = "./data/" + peak_str + "/"
shutil.move(origin, destination)
log.debug(origin + " moved to " + destination)
return
def determine_soh_output_file_location(peak_str, source_name, timestep):
import os
cwd = os.getcwd()
output_file_location = cwd + "/data/" + peak_str + "/" + source_name + "." + timestep + "." + peak_str + ".ft"
log.debug(output_file_location)
return output_file_location
def read_from_soh_output(filename):
import numpy as np
kx, ky, kz, intensity = np.loadtxt(filename, skiprows=1, usecols=(0,1,2,5), unpack=True)
soh_output = [kx, ky, kz, intensity]
log.debug(soh_output)
return soh_output
def find_point_of_max_height(soh_output):
import numpy as np
max_height_index = np.argmax(soh_output[3])
point_of_max_height = [soh_output[0][max_height_index], soh_output[1][max_height_index], soh_output[2][max_height_index]]
log.debug(point_of_max_height)
return point_of_max_height
def calc_dvol(soh_output):
k_step = len(soh_output[0]) ** (1.0/3.0)
dkx = ( max(soh_output[0]) - min(soh_output[0]) ) / (k_step - 1)
dky = ( max(soh_output[1]) - min(soh_output[1]) ) / (k_step - 1)
dkz = ( max(soh_output[2]) - min(soh_output[2]) ) / (k_step - 1)
dvol = dkx * dky * dkz
log.debug(dvol)
return dvol
def calc_integrated_intensity(soh_output, dvol):
intensity_sum = sum(soh_output[3])
integrated_intensity = dvol * intensity_sum
log.debug(integrated_intensity)
return integrated_intensity
def get_ln_intensity(intensity):
import numpy as np
ln_intensity = np.log(intensity)
log.debug(ln_intensity)
return ln_intensity
def calc_line_slope_and_constant(x, y):
import numpy as np
slope, constant = np.polyfit(x, y, 1, cov=False)
log.debug("slope = " + str(slope) + "\nconstant = " + str(constant))
return slope, constant
def calc_debye_waller_constant(m):
from scipy.constants import h, N_A, k, pi
debye_waller_constant = (10 ** 20) * 3 * (h ** 2) * N_A / (4 * (pi ** 2) * m * (10 ** -3) * k)
log.debug(debye_waller_constant)
return debye_waller_constant
def calc_debye_temperature_xrd(temperature, slope, debye_waller_constant):
import numpy as np
debye_temperature = np.sqrt(debye_waller_constant * temperature * abs( 1.0 / slope ))
log.debug(debye_temperature)
return debye_temperature
def calc_debye_temperature_from_single_term_gruneisen_model(debye_temperautre_300K_uncompressed, initial_volume, final_volume, gamma_uncompressed, exponent):
import numpy as np
# See <NAME> PHYSICAL REVIEW B 78, 014109 (2008) for the source of this equation.
exponent_term = - (gamma_uncompressed / (exponent * initial_volume)) * ((final_volume ** exponent) - (initial_volume ** exponent))
correction_factor = np.exp(exponent_term)
model_debye_temperature = debye_temperautre_300K_uncompressed * correction_factor
log.debug(model_debye_temperature)
return model_debye_temperature
def calc_debye_temperature_from_triple_term_gruneisen_model(debye_temperature_300K_uncompressed, initial_volume, final_volume, gamma_uncompressed, constants):
import numpy as np
constant_term_1 = gamma_uncompressed - constants[0] + constants[1] - constants[2]
volume_term_1 = np.log(final_volume) - np.log(initial_volume)
constant_term_2 = - constants[0] + 2 * constants[1] - 3 * constants[2]
volume_term_2 = initial_volume * ((1 / final_volume) - (1 / initial_volume))
constant_term_3 = - (constants[1] / 2.0) + (3 * constants[2] / 2.0)
volume_term_3 = (initial_volume ** 2) * ((1 / (final_volume ** 2)) - (1 / (initial_volume ** 2)))
constant_term_4 = - constants[2] / 3.0
volume_term_4 = (initial_volume ** 3) * ((1 / (final_volume ** 3)) - (1 / (initial_volume ** 3)))
exponent_term = (constant_term_1 * volume_term_1) + (constant_term_2 * volume_term_2) + (constant_term_3 * volume_term_3) + (constant_term_4 * volume_term_4)
correction_factor = np.exp(- exponent_term)
model_debye_temperature = debye_temperature_300K_uncompressed * correction_factor
log.debug(model_debye_temperature)
return model_debye_temperature
def calc_volume_lattice_units(a_lattice, compression_factors):
volume = a_lattice ** 3 * (compression_factors[0] * compression_factors[1] * compression_factors[2])
log.debug(volume)
return volume
def calc_temperature_xrd(debye_temperature, slope, debye_waller_constant):
temperature = (debye_temperature ** 2) * abs(slope) * (1.0 / debye_waller_constant)
log.debug(temperature)
return temperature
def plot_matplotlib(x, y, filename, x_label, y_label, plot_title):
import matplotlib.pyplot as plt
plt.scatter(x, y)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(plot_title)
plt.savefig(filename)
plt.close()
return
def plot_pyqtgraph(x, y, filename):
import pyqtgraph as pg
import pyqtgraph.exporters
class MyPlotClass():
def __init__(self):
self.windowplt = pg.plot()
self.windowplt.win.hide()
def savePlot(self, x, y, filename):
self.windowplt.plot(x, y)
exporter = pg.exporters.ImageExporter(self.windowplt.plotItem)
exporter.params.param('width').setValue(256, blockSignal=exporter.widthChanged)
exporter.params.param('height').setValue(256, blockSignal=exporter.heightChanged)
exporter.export(filename)
save_plot = MyPlotClass()
save_plot.savePlot(x, y, filename)
return
def plot_pygnuplot(x, y, filename, data_filename):
import PyGnuplot as gnu
gnu.s([x,y], data_filename)
gnu.c('set terminal pngcairo size 350,262 enhanced font "Verdana,10"')
gnu.c('set output "' + filename + '"')
gnu.c('plot "' + data_filename + '" w lp pi -1')
return
def find_line_data_from_3DFT(constant_axes, variable_axis, centre_point, soh_output):
constant_value_0 = centre_point[constant_axes[0]]
constant_value_1 = centre_point[constant_axes[1]]
line_points = []
line_intensity = []
for i, intensity in enumerate(soh_output[3]):
if soh_output[constant_axes[0]][i] == constant_value_0 and soh_output[constant_axes[1]][i] == constant_value_1:
line_k = soh_output[variable_axis][i]
line_points.append(line_k)
line_intensity.append(intensity)
return line_points, line_intensity
def write_temperatures_to_file(debye_temperature, temperature, filename_temperatures):
f = open(filename_temperatures, "w")
f.write(
"Debye temperature\t\t\t\t" + str(debye_temperature) + "\n"
"Temperature\t\t\t\t\t\t" + str(temperature)
)
f.close()
return
def write_peak_intensities_to_file(pos_est, peak_centre, gsqr, integrated_intensity, ln_intensity, filename):
header_string = "peak_name peak_centre gsqr integrated_intensity ln_intensity\n"
f = open(filename, "w")
f.write(header_string)
for i, pos in enumerate(pos_est):
f.write("%s %s %s %s %s\n" % (pos, peak_centre[i], gsqr[i], integrated_intensity[i], ln_intensity[i]))
f.close()
return
def find_if_vectors_parallel(v_1, v_2):
import numpy as np
import math
length_1 = np.linalg.norm(v_1)
length_2 = np.linalg.norm(v_2)
if length_1 < 0.000001:
result = False
elif length_2 < 0.00001:
result = False
else:
normalised_1 = v_1 / length_1
normalised_2 = v_2 / length_2
dot_prod = np.dot(normalised_1, normalised_2)
if math.isnan(dot_prod) is True:
result = False
elif int(dot_prod) is 1:
result = True
else:
result = False
return result
<file_sep>/src/inpkfd.py
path = "path_1_static_peakfinding" # Choose from: "path_1_static_peakfinding", "path_2_dynamic_peakfinding"
run_soh = True
make_peak_plots = True
num_cores = 2
# Input for "select_peak_positions"
gsqr_max = 81
negative_k = False
remove_000 = False
# Input for "use_soh_for_3DFT"
source_name = "uncompressed_cu_300K_5x5x60_10000.atom"
timestep = "10000" # Only used in moving soh output files.
mass = 63.546 # In amu
a_lattice = 3.628 # In Angstroms
k_steps = 21
N_atoms = [5, 5, 60]
# Input for use by "calc_debye_waller"
uncompressed_debye_temperature = 320.0
temperature = 300.0
# These Cu model values are from <NAME> et. al. PHYSICAL REVIEW B 78, 014109 (2008)
single_term_model_gamma_0_values = [1.98, 1.93, 2.008]
single_term_model_exponent_values = [1.0, 1.085, 1.33]
triple_term_model_gamma_0_values = [2.04]
triple_term_model_constants = [[-3.296, 10.493, -19.264]]
<file_sep>/test/initialise.py
def run():
import units as un
import inpkfd as ip
import logging as log
import shutil
import os
# Sets up the log system.
log.basicConfig(filename = "log.pkfd", filemode = "w", level = log.DEBUG, format = "%(asctime)s\t\t%(filename)s\t\t%(funcName)s\n\t %(message)s")
log.info("Peakfinder intialised.\n")
# Removes previously created folders.
if os.path.exists("data"):
shutil.rmtree("data")
return
<file_sep>/src/test_units.py
def run(unit_name, test_input, expected_result):
import numpy as np
exec("from units import " + unit_name)
string_test_input = str(test_input)
string_test_input = string_test_input[1:-1]
exec("actual_result = " + str(unit_name) + "(" + string_test_input + ")")
if actual_result == expected_result:
print "Pass\t\t" + "UNIT\t\t" + unit_name
elif actual_result != expected_result:
print "\n####FAIL####\t\t" + "UNIT\t\t" + unit_name
print "Expected result =\t" + str(expected_result)
print "Actual result \t=\t" + str(actual_result) + "\n"
else:
print "TEST FRAMEWORK BROKEN"
return
<file_sep>/test/test_bricks.py
def run(brick_name, test_input, expected_result):
import importlib
exec("import " + brick_name)
string_test_input = str(test_input)
string_test_input = string_test_input[1:-1]
exec("actual_result = " + str(brick_name) + ".run(" + string_test_input + ")")
if actual_result == expected_result:
print "Pass\t\t" + "BRICK\t\t" + brick_name
else:
print "####FAIL####\t\t" + "BRICK\t\t" + brick_name
return
<file_sep>/src/write_output_files.py
def run(debye_temperature, temperature, pos_est, peak_centre, gsqr, integrated_intensity, ln_intensity):
import units as un
filename_temperatures = "results.pkfd"
un.write_temperatures_to_file(debye_temperature, temperature, filename_temperatures)
filename_peaks = "integrated_intensity.dat"
un.write_peak_intensities_to_file(pos_est, peak_centre, gsqr, integrated_intensity, ln_intensity, filename_peaks)
return<file_sep>/peakrunner/move_lammps_files.py
def run(current_lammps_directory, lammps_prefix, lammps_suffix, lammps_variable, copy_locations):
from subprocess import call
for i, variable in enumerate(lammps_variable):
lammps_filename = lammps_prefix + variable + lammps_suffix
start_point = current_lammps_directory + "/" + lammps_filename
print start_point
end_point = copy_locations[i] + "/lammps/" + lammps_filename
print end_point
call("mv " + start_point + " " + end_point, shell=True)
return
<file_sep>/development/rotate_peak_positions_to_z_along_111.py
def run(pos_est, gsqr_est):
import units as un
print "Rotating peak positions so that z lies along the 111 direction..."
rot_x, rot_z = un.create_rotation_matrix_for_111_rotation()
rotated_pos_est = un.rotate_pos_est_using_rotation_matrices(pos_est, rot_x, rot_z)
rotated_gsqr_est = un.get_gsqr_values(rotated_pos_est)
return rotated_pos_est, rotated_gsqr_est
<file_sep>/development/find_compression_ratio.py
def run(run_soh, lineout_directions, undershoot, overshoot, source, mass, a_lattice, lineout_k_steps,
timestep, soh_command):
import units as un
import os
print "Finding compression ratios..."
compression_ratio = [1.0, 1.0, 1.0]
direction_str = ["kx", "ky", "kz"]
if os.path.exists("./data/lineouts/"):
pass
else:
un.make_lineout_directory()
for i, direction in enumerate(lineout_directions):
k_start, k_stop = un.calc_lineout_k_start_stop(direction, undershoot, overshoot)
soh_location = un.determine_soh_compression_finding_input_file_location(direction_str[i])
un.write_soh_input_1DFT(source, soh_location, "lineout_" + direction_str[i], mass, a_lattice, k_start, k_stop, lineout_k_steps)
if run_soh is True:
for i, direction in enumerate(lineout_directions):
soh_location = un.determine_soh_compression_finding_input_file_location(direction_str[i])
un.run_soh(soh_location, soh_command)
un.move_soh_output_to_lineout_folder(direction_str[i], source, timestep)
for i, direction in enumerate(lineout_directions):
soh_location = un.determine_soh_1DFT_output_file_location(direction_str[i], source, timestep)
soh_output = un.read_from_soh_output(soh_location)
un.plot_pygnuplot(soh_output[i], soh_output[3], os.getcwd() + "/data/lineouts/" + direction_str[i] + "_lineout.png", os.getcwd() + "/data/lineouts/" + direction_str[i] + "_lineout.dat")
k_of_max_height = un.find_point_of_max_height(soh_output)
compression_ratio[i] = un.calc_compression_ratio(k_of_max_height[i], lineout_directions[i][i])
return compression_ratio
<file_sep>/peakrunner/copy_peakfinder_to_locations.py
def run(src_location, copy_location_list):
import shutil
import os
for copy_location in copy_location_list:
if os.path.isdir(copy_location) is True:
print "WARNING: Copy location already exists. The command to copy to this location has been aborted."
pass
else:
shutil.copytree(src_location, copy_location)
return
<file_sep>/development/calc_peak_intensities.py
def run(raw_pos_est, source_name, timestep):
import units as un
import logging as log
log.debug("Brick %s started.\n", __name__)
print "Calculating peak intensities..."
peak_centre = []
integrated_intensity = []
for i in raw_pos_est:
peak_str = un.make_peak_str(i)
soh_output_file_location = un.determine_accurate_soh_output_file_location(peak_str, source_name, timestep)
soh_output = un.read_from_soh_output(soh_output_file_location)
point_of_max_height = un.find_point_of_max_height(soh_output)
peak_centre.append(point_of_max_height)
dvol = un.calc_dvol(soh_output)
current_integrated_intensity = un.calc_integrated_intensity(soh_output, dvol)
integrated_intensity.append(current_integrated_intensity)
log.debug("Brick %s finished.\n", __name__)
return peak_centre, integrated_intensity
<file_sep>/development/peakfinder.py
import inpkfd
import importlib
import initialise
import finalise
path = importlib.import_module(inpkfd.path) # Imports the path specified in inpkfd.
initialise.run()
path.run()
finalise.run()
<file_sep>/peakrunner/peakrunner.py
import in_peakrunner as ip
def run():
import create_copy_location_list
import copy_peakfinder_to_locations
import write_bash_script
import move_lammps_files
copy_location_list = create_copy_location_list.run(
ip.root_directory, ip.subdirectory_prefix, ip.subdirectory_suffix, ip.subdirectory_variable_list)
copy_peakfinder_to_locations.run(ip.peakfinder_src_location, copy_location_list)
write_bash_script.run(ip.bash_script_filename, copy_location_list)
return
run()
<file_sep>/oldddd/testmodule.py
########################################################
# These functions are multi-use functions used by the
# tests.
def checktestresult(testresult, functionname):
if testresult == 0:
print "***FAIL***\tFunction '" + functionname + "' test complete."
elif testresult == 1:
print "PASS \t\tFunction '" + functionname + "' test complete."
else:
print "ERROR HANDLED:"
print "Function '" + functionname + "' test returned non-binary value."
print "Aborting test."
exit()
return;
def testdatavariables():
filename = "test.dat"
datacolumnnumbers = [0, 1, 2, 3]
intensityindex = 3
kxkykzindex = [0,1,2]
latticeparameter = 3.002
#volumetriccompressionratio =
return filename, datacolumnnumbers, intensityindex, kxkykzindex
##########################################################
# These test the smallest units of code.
def TestLoadData():
from newmodule import LoadData
def TEST():
functionname = LoadData.__name__
filename, datacolumnnumbers, intensityindex, kxkykzindex = testdatavariables()
data = LoadData(filename, datacolumnnumbers)
sumcolumn = [sum(data[0]), sum(data[1]), sum(data[2]), sum(data[3])]
sumlastrow = data[0,-1] + data[1,-1] + data[2,-1] + data[3,-1]
expectedsumcolumn = [54.0, 54.0, 54.0, 47.0]
expectedsumlastrow = 10.0
if sumcolumn == expectedsumcolumn and sumlastrow == expectedsumlastrow:
testresult = 1
else:
testresult = 0
return testresult, functionname;
testresult, functionname = TEST()
checktestresult(testresult, functionname)
return;
def TestFindPeakCentre():
from newmodule import FindPeakCentre, LoadData
def TEST():
functionname = FindPeakCentre.__name__
filename, datacolumnnumbers, intensityindex, kxkykzindex = testdatavariables()
data = LoadData(filename, datacolumnnumbers)
peakcentre = FindPeakCentre(data, intensityindex, kxkykzindex)
expectedpeakcentre = [2.0,2.0,2.0]
if peakcentre == expectedpeakcentre:
testresult = 1
else:
testresult = 0
return testresult, functionname;
testresult, functionname = TEST()
checktestresult(testresult, functionname)
return;
def TestFindOrthogonalLineout():
from newmodule import FindOrthogonalLineout, LoadData
def TEST():
functionname = FindOrthogonalLineout.__name__
filename, datacolumnnumbers, intensityindex, kxkykzindex = testdatavariables()
data = LoadData(filename, datacolumnnumbers)
point = [2,2,2]
directionindex = 0
orthogonallineout1 = FindOrthogonalLineout(data, intensityindex, kxkykzindex, directionindex, point)
directionindex = 1
orthogonallineout2 = FindOrthogonalLineout(data, intensityindex, kxkykzindex, directionindex, point)
directionindex = 2
orthogonallineout3 = FindOrthogonalLineout(data, intensityindex, kxkykzindex, directionindex, point)
expectedorthogonallineout1 = [[1, 2, 2, 2],[2, 2, 2, 3],[3, 2, 2, 2]]
expectedorthogonallineout2 = [[2, 1, 2, 2],[2, 2, 2, 3],[2, 3, 2, 2]]
expectedorthogonallineout3 = [[2, 2, 1, 2],[2, 2, 2, 3],[2, 2, 3, 2]]
if orthogonallineout1 == expectedorthogonallineout1 and orthogonallineout2 == expectedorthogonallineout2 and orthogonallineout3 == expectedorthogonallineout3:
testresult = 1
else:
testresult = 0
return testresult, functionname;
testresult, functionname = TEST()
checktestresult(testresult, functionname)
return;
def TestFindIntensityMinima1D():
from newmodule import FindIntensityMinima1D
def TEST():
functionname = FindIntensityMinima1D.__name__
lineout = [[2, 1, 2, 2],[2, 1.5, 2, 2.5],[2, 2, 2, 3],[2, 2.5, 2, 2.5],[2, 3, 2, 2]]
kxkykzindex = [0,1,2]
intensityindex = 3
minima = FindIntensityMinima1D(lineout, kxkykzindex, intensityindex)
expectedminima = [[2, 1, 2, 2],[2, 3, 2, 2]]
if minima == expectedminima:
testresult = 1
else:
testresult = 0
return testresult, functionname;
testresult, functionname = TEST()
checktestresult(testresult, functionname)
return
def TestBuildIntensityVolume():
############ Work in progress. #############
from newmodule import BuildIntensityVolume
def TEST():
functionname = BuildIntensityVolume.__name__
filename, datacolumnnumbers, intensityindex, kxkykzindex = testdatavariables()
intensityvolumecoefficients = 0
points = [ [[1, 2, 2, 2],[3, 2, 2, 2]], [[2, 1, 2, 2],[2, 3, 2, 2]], [[2, 2, 1, 2],[2, 2, 3, 2]] ]
BuildIntensityVolume(points, kxkykzindex, intensityindex)
expectedintensityvolumecoefficients = [[0,2],[0,2],[0,2]]
if intensityvolumecoefficients == expectedintensityvolumecoefficients:
testresult = 1
else:
testresult = 0
return testresult, functionname;
testresult, functionname = TEST()
checktestresult(testresult, functionname)
return
def TestGetCompressedGruneisenParameterModel1():
from newmodule import GetCompressedGruneisenParameterModel1
def TEST():
functionname = GetCompressedGruneisenParameterModel1.__name__
filename, datacolumnnumbers, intensityindex, kxkykzindex = testdatavariables()
uncompresedgruneisenparameter = 1.75
volumetriccompressionratio = 0.89
compressedgruneisenparameter = GetCompressedGruneisenParameterModel1(uncompresedgruneisenparameter, volumetriccompressionratio)
expectedcompressedgruneisenparameter = 1.5575
if compressedgruneisenparameter == expectedcompressedgruneisenparameter:
testresult = 1
else:
testresult = 0
return testresult, functionname;
testresult, functionname = TEST()
checktestresult(testresult, functionname)
return
def TestGetDebyeTemperatureFromGruneisenParameter():
############ Work in progress. #############
from newmodule import GetDebyeTemperatureFromGruneisenParameter
def TEST():
functionname = GetDebyeTemperatureFromGruneisenParameter.__name__
filename, datacolumnnumbers, intensityindex, kxkykzindex = testdatavariables()
debyetemperature = GetDebyeTemperatureFromGruneisenParameter()
expecteddebyetemperature = 300
if debyetemperature == expecteddebyetemperature:
testresult = 1
else:
testresult = 0
return testresult, functionname;
testresult, functionname = TEST()
checktestresult(testresult, functionname)
return
<file_sep>/test/build_datafile_structure.py
def run(pos):
import units as un
import logging as log
log.info("Brick %s started.\n", __name__)
peak_str = un.build_datafile_structure(pos)
log.info("Brick %s finished.\n", __name__)
return peak_str
<file_sep>/development/inpkfd.py
path = "path_2_dynamic_peakfinding" # Choose from: "path_1_static_peakfinding", "path_2_dynamic_peakfinding", "path_3_rotated_dynamic_peakfinding"
run_soh = True
make_final_peak_plots = True
make_plots_peak_centre_fit = True
num_cores = 8
soh_command = "mpiexec -np " + str(num_cores) + " sonOfHoward "
N_atoms = [5, 5, 5]
# Input for "select_peak_positions"
crystal_type = 'FCC' # Choose between 'FCC' and 'BCC'
gsqr_max = 11
negative_k = False
remove_000 = False
# Input for "find_compression_ratio"
uncompressed_peak_positions = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]] #[[1.4142135623730951, 0.81649658092772592, 1.1547005383792515], [-1.4142135623730949, 0.81649658092772603, 1.1547005383792517], [0.0, -1.6329931618554521, 1.1547005383792515]]
compression_ratio_undershoot = 0.9
compression_ratio_overshoot = 1.1
lineout_k_steps = 1e3 + 1
# Input for "fit_to_peak_centres"
k_steps_find_centre_1DFT = 1001
k_steps_find_centre_3DFT = 3
# Input for "fit_to_peak_edges"
peak_edge_undershoot = [1.0/N_atoms[0], 1.0/N_atoms[1], 1.0/N_atoms[2]]
peak_edge_overshoot = [1.0/N_atoms[0], 1.0/N_atoms[1], 1.0/N_atoms[2]]
peak_edge_k_steps = 1e3 + 1
# Input for "use_soh_for_3DFT"
source_name = "uncompressed_450K_1000.atom"
timestep = "1000" # Only used in moving soh output files.
mass = 63.546 # In amu
a_lattice = 3.6288 # In Angstroms
k_steps = 11
# Input for use by "calc_debye_waller"
uncompressed_debye_temperature = 311.0
temperature = 450.0
# Input for calc_md_temperature. Simulations must have "units metal" for this calculation to work properly.
calc_md_temperature_from_dump_file = True
calculated_temperature_dimensionality = 3 # Enter "3" for 3D temperature, and "2" for temperature calculated from vx
# and vy only.
velocity_columns = [5, 6, 7] # The columns in the lammps dump file that contain vx, vy, vz, respectively. The first
# column is column number 0.
number_velocity_bins = 10 # The number of bins used to make the histogram of atom velocity.
# These Cu model values are from <NAME> et. al. PHYSICAL REVIEW B 78, 014109 (2008)
single_term_model_gamma_0_values = [1.98, 1.93, 2.008]
single_term_model_exponent_values = [1.0, 1.085, 1.33]
triple_term_model_gamma_0_values = [2.04]
triple_term_model_constants = [[-3.296, 10.493, -19.264]]
# These terms are for a polynomial fit of the Debye temperature under compression, derived from MD. Note that these
# models are absolute values of theta_D; they do not calculate theta_D / theta_0 and so the input parameter
# 'uncompressed_debye_temperature' is not used in these models.
polynomial_coeff = [-940.77071405, 4409.41804026, -6459.1848251, 3300.71717034] # The list starts at the highest degree term in the polynomial.
# Cu MD model -> [-940.77071405, 4409.41804026, -6459.1848251, 3300.71717034]
# Nb MD model -> [-769547.13166358, 3387086.89639316, -5920835.76544171, 5134767.61272078, -2208100.44517431, 376887.88877453]<file_sep>/development/overstep_peak_edges.py
def run(current_pos_est, undershoot, overshoot):
import units as un
import copy
print "Calculating over-stepped k_start and k_stop for each peak..."
k_start_overstepped = copy.deepcopy(current_pos_est)
k_stop_overstepped = copy.deepcopy(current_pos_est)
for i, pos in enumerate(current_pos_est):
k_start_overstepped[i], k_stop_overstepped[i] = un.calc_overstepped_k_start_k_stop(pos, undershoot, overshoot)
return k_start_overstepped, k_stop_overstepped
<file_sep>/test/units.py
import logging as log
def get_time():
import time
localtime = time.localtime()
t0 = time.time()
return t0, localtime
def build_all_k_values(gsqr_max, negative_k):
# Builds all possible combinations of integers, up to the k-value given by sqrt(gsqr_max), rounded up. It only includes combinations that give gsqr < gsqr_max. Bool negative_k can be used to include/exclude negative k-values.
import numpy as np
import math
k_max = int(math.ceil(np.sqrt(gsqr_max)))
if negative_k == True:
k_values = range(-k_max, k_max + 1)
elif negative_k == False:
k_values = range(k_max + 1)
else:
print "Incorrect entry:\nnegative_k must be of bool type.\n"
exit()
pos = []
for i in k_values:
for j in k_values:
for k in k_values:
if i**2 + j**2 + k**2 <= gsqr_max:
pos.append([i, j, k])
else:
pass
log.debug(pos)
return pos
def remove_fcc_forbidden_reflections(old_pos):
# Removes pos_est values that are forbidden in fcc crystals. Diffraction is allowed at positions where all the k-values are all-even or all-odd.
new_pos = []
for i in range(len(old_pos)):
if old_pos[i][0] % 2 == 1 and old_pos[i][1] % 2 == 1 and old_pos[i][2] % 2 == 1:
new_pos.append(old_pos[i])
elif old_pos[i][0] % 2 == 0 and old_pos[i][1] % 2 == 0 and old_pos[i][2] % 2 == 0:
new_pos.append(old_pos[i])
else:
pass
log.debug(new_pos)
return new_pos
def remove_000(old_pos):
# Removes [0, 0, 0] from pos.
new_pos = []
for i in old_pos:
if i != [0, 0, 0]:
new_pos.append(i)
log.debug(new_pos)
return new_pos
def get_gsqr_values(pos):
# Calculates the value of G^2 for each position in pos.
gsqr = []
for i in pos:
current_gsqr = (i[0] ** 2) + (i[1] ** 2) + (i[2] ** 2)
gsqr.append(current_gsqr)
log.debug(gsqr)
return gsqr
def build_datafile_structure(pos):
import os
peak_str = []
for i in pos:
current_peak_str = str(i[0]) + str(i[1]) + str(i[2])
peak_str.append(current_peak_str)
if not os.path.exists("data/" + current_peak_str):
os.makedirs("data/" + current_peak_str)
log.debug(peak_str)
return peak_str
def calc_k_offset_with_N_atoms(N_atoms):
offset = [1.0/N_atoms[0], 1.0/N_atoms[1], 1.0/N_atoms[2]]
return offset
def convert_to_per_angstrom(element, a_lattice):
import numpy as np
element = np.asarray(element)
converted_element = element * ( a_lattice / (2 *np.pi) )
converted_element = list(converted_element)
return converted_element
def make_peak_str(i):
peak_str = str(i[0]) + str(i[1]) + str(i[2])
return peak_str
def find_k_start(pos_element, offset):
k_start = [pos_element[0] - offset[0], pos_element[1] - offset[1], pos_element[2] - offset[2]]
return k_start
def find_k_stop(pos_element, offset):
k_stop = [pos_element[0] + offset[0], pos_element[1] + offset[1], pos_element[2] + offset[2]]
return k_stop
def determine_soh_input_file_location(peak_str):
import os
cwd = os.getcwd()
input_file_location = cwd + "/data/" + peak_str + "/" + peak_str + ".in"
return input_file_location
def write_soh_input_3DFT(source_location, file_destination, peak_str, mass, a_lattice, k_steps, k_start, k_stop):
string_to_write = "VERBOSE 0\nFILE_TYPE lammps-multi\nDATA_FILE " + str(source_location) + "\nAPPEND_FILE_NAME " + str(peak_str) + "\nPLOT_OUTPUT pdf\nCOORDS_SCALED\nSET_MASS " + str(mass) + "\nSET_A_CELL " + str(a_lattice) + "\nCALC_3D_FT\nSET_KX " + str(k_start[0]) + " " + str(k_stop[0]) + " " + str(k_steps) + "\nSET_KY " + str(k_start[1]) + " " + str(k_stop[1]) + " " + str(k_steps) + "\nSET_KZ " + str(k_start[2]) + " " + str(k_stop[2]) + " " + str(k_steps)
f= open(file_destination, "w")
f.write(string_to_write)
f.close()
log.debug("Unit finished, no internal output.")
return string_to_write
def run_soh(input_file_location):
import subprocess
shell_command = "sonOfHoward " + input_file_location
subprocess.call(shell_command, shell=True)
return
<file_sep>/src/use_soh_for_3DFT.py
def run(pos_est, source_name, timestep, mass, a_lattice, N_atoms, k_steps, run_soh, num_cores):
import units as un
import logging as log
log.debug("Brick %s started.\n", __name__)
print "Performing 3DFT of each peak..."
offset = un.calc_k_offset_with_N_atoms(N_atoms)
for i in pos_est:
k_start = un.find_simple_k_start(i, offset)
k_stop = un.find_simple_k_stop(i, offset)
peak_str = un.make_peak_str(i)
input_file_location = un.determine_accurate_soh_input_file_location(peak_str)
un.write_soh_input_3DFT(source_name, input_file_location, peak_str, mass, a_lattice, k_steps, k_start, k_stop)
if run_soh is True:
for i in pos_est:
peak_str = un.make_peak_str(i)
input_file_location = un.determine_accurate_soh_input_file_location(peak_str)
un.run_soh(input_file_location, num_cores)
un.move_soh_accurate_output_to_peak_folder(peak_str, source_name, timestep)
log.debug("Brick %s finished.\n", __name__)
return
<file_sep>/test/inpkfd.py
path = "path_1_stat" # Choose: "path_1_stat"
run_soh = True
# Input for "select_peak_positions"
gsqr_max = 4
negative_k = True
remove_000 = True
# Input for "use_soh_for_3DFT"
source_location = "./lammps/uncompressed_300K.atom"
mass = 63.546 # In amu
a_lattice = 3.621 # In Angstroms
k_steps = 3
N_atoms = [60,60,60]
|
2869b115d4d1c6d87d5a40f7657a4a29904b3e0e
|
[
"Python"
] | 51
|
Python
|
Pooleyo/peakfinder
|
5ec54a8aceda4babca65e56438d5417390d01669
|
6c3b683bcc75ef68a1f2db765b7da7858f5ebae7
|
refs/heads/main
|
<file_sep>using System;
namespace tiendita
{
public class Pantalon : Prenda
{
//atributos
private String tipo;
//geter y seter
public string Tipo { get => tipo; set => tipo = value; }
public Pantalon(string Calidad, int CantStock, double Precio, string tipo)
{
this.Calidad = Calidad;
this.CantStock = CantStock;
this.Precio = Precio;
this.tipo = tipo;
}
public override double PrecioFinal(int cant)
{
double precioF = base.PrecioFinal(cant);
if(Tipo == "chupin")
{
precioF = precioF + (precioF / 100 * 12);
}
return precioF;
}
public override string ImprimirPr()
{
return base.ImprimirPr()+
"Pantalon Tipo: "+ Tipo+
"------------------------------------";
}
}
}
<file_sep>using System;
namespace tiendita
{
public class Prenda
{
//atributos
private String calidad;
private int cantStock;
private double precio;
protected string Calidad { get => calidad; set => calidad = value; }
protected int CantStock { get => cantStock; set => cantStock = value; }
protected double Precio { get => precio; set => precio = value; }
public Prenda()
{
}
public virtual String ImprimirPr()
{
return "-----------------------"+
"Calidad: "+ Calidad +
"Precio: " + Precio +
"Unidad Disponibles: " + CantStock;
}
public virtual double PrecioFinal(int cant)
{
double precioF = Precio * cant;
if(Calidad == "premium")
{
precioF = precioF + (precioF / 100 * 30);
}
else
{
precioF = precioF;
}
return precioF;
}
}
}
<file_sep>using System;
using System.Collections.Generic;
namespace tiendita
{
public class TiendadeRopa
{
//atributos
private String nombre, direccion;
private List<Vendedor> vendedores;
private List<Prenda> prendas;
public string Nombre { get => nombre; set => nombre = value; }
public string Direccion { get => direccion; set => direccion = value; }
public List<Vendedor> Vendedores { get => vendedores; set => vendedores = value; }
public List<Prenda> Prendas { get => prendas; set => prendas = value; }
//Constructores
public TiendadeRopa(string nombre, string direccion, List<Vendedor> vendedores, List<Prenda> prendas)
{
this.nombre = nombre;
this.direccion = direccion;
this.vendedores = vendedores;
this.prendas = prendas;
}
public String nmb()
{
return nombre;
}
}
}
<file_sep>using System;
namespace tiendita
{
public class Camisa : Prenda
{
//atributos
private String manga;
private String cuello;
//constructores
public Camisa(string Calidad, int CantStock, double Precio, string manga, string cuello)
{
this.Calidad = Calidad;
this.CantStock = CantStock;
this.Precio = Precio;
this.Manga = manga;
this.Cuello = cuello;
}
public string Manga { get => manga; set => manga = value; }
public string Cuello { get => cuello; set => cuello = value; }
public override double PrecioFinal(int cant)
{
double precioF = base.PrecioFinal(cant);
if(Manga == "corta")
{
precioF = precioF - (precioF / 100 * 10);
}
if(Cuello == "mao")
{
precioF = precioF + (precioF / 100 * 30);
}
return precioF;
}
public override string ImprimirPr()
{
return base.ImprimirPr() +
"Camisa Manga: " + Manga +
"Cuello: " + Cuello +
"--------------------------------------";
}
}
}
<file_sep>using System;
using Gtk;
namespace tiendita
{
public class Controlador
{
//atributos
public RadioButton rb1, rb2;
public CheckButton cb1, cb2, cb3, cb4;
public Entry e1, e2;
public TiendadeRopa t1;
//constructor
public Controlador()
{
}
public Controlador(RadioButton rb1, RadioButton rb2, CheckButton cb1, CheckButton cb2, CheckButton cb3, CheckButton cb4, Entry e1, Entry e2)
{
this.rb1 = rb1;
this.rb2 = rb2;
this.cb1 = cb1;
this.cb2 = cb2;
this.cb3 = cb3;
this.cb4 = cb4;
this.e1 = e1;
this.e2 = e2;
}
Vendedor v1 = new Vendedor("Pedro", "Picapiedra", 101);
//metodos
public void Total()
{
Camisa c1 = new Camisa("normal", 200, 500, "corta", "mao");
Camisa c2 = new Camisa("premium", 200, 500, "corta", "mao");
Camisa c3 = new Camisa("normal", 300, 500, "corta", "normal");
Camisa c4 = new Camisa("premium", 300, 500, "corta", "normal");
Camisa c5 = new Camisa("normal", 150, 500, "larga", "mao");
Camisa c6 = new Camisa("premium", 150, 500, "larga", "mao");
Camisa c7 = new Camisa("normal", 350, 500, "larga", "normal");
Camisa c8 = new Camisa("premium", 350, 500, "larga", "normal");
Pantalon p1 = new Pantalon("normal", 1500, 750, "chupin");
Pantalon p2 = new Pantalon("premium", 1500, 750, "chupin");
Pantalon p3 = new Pantalon("normal", 500, 750, "normal");
Pantalon p4 = new Pantalon("premium", 500, 750, "normal");
int dato1;
String dato2;
double dato3;
if ((rb1.Active == true) && (cb1.Active == true) && (cb2.Active == true))
{
dato1 = Int32.Parse(e1.Text);
dato3 = c1.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$"+dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb1.Active == true) && (cb1.Active == true) && (cb2.Active == true)&&(cb4.Active == true))
{
dato1 = Int32.Parse(e1.Text);
dato3 = c2.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb1.Active == true) && (cb1.Active == true) && (cb2.Active == false)&& (cb4.Active == false))
{
dato1 = Int32.Parse(e1.Text);
dato3 = c3.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb1.Active == true) && (cb1.Active == true) && (cb2.Active == false) && (cb4.Active == true))
{
dato1 = Int32.Parse(e1.Text);
dato3 = c4.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb1.Active == true) && (cb1.Active == false) && (cb2.Active == true) && (cb4.Active == false))
{
dato1 = Int32.Parse(e1.Text);
dato3 = c5.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb1.Active == true) && (cb1.Active == false) && (cb2.Active == true) && (cb4.Active == true))
{
dato1 = Int32.Parse(e1.Text);
dato3 = c6.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb1.Active == true) && (cb1.Active == false) && (cb2.Active == false) && (cb4.Active == false))
{
dato1 = Int32.Parse(e1.Text);
dato3 = c7.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb1.Active == true) && (cb1.Active == false) && (cb2.Active == false) && (cb4.Active == true))
{
dato1 = Int32.Parse(e1.Text);
dato3 = c8.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb2.Active == true) && (cb3.Active == true) && (cb4.Active == false))
{
dato1 = Int32.Parse(e1.Text);
dato3 = p1.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb2.Active == true) && (cb3.Active == true) && (cb4.Active == true))
{
dato1 = Int32.Parse(e1.Text);
dato3 = p2.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb2.Active == true) && (cb3.Active == false) && (cb4.Active == false))
{
dato1 = Int32.Parse(e1.Text);
dato3 = p3.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
if ((rb2.Active == true) && (cb3.Active == false) && (cb4.Active == true))
{
dato1 = Int32.Parse(e1.Text);
dato3 = p1.PrecioFinal(dato1);
dato2 = dato3.ToString();
e2.Text = "$" + dato2;
v1.GenerarCotizacion(c1, dato1, dato3);
}
}
public String CC()
{
return v1.ImprimirHistorial();
}
}
}
<file_sep>using System;
namespace tiendita
{
public class Cotizacion
{
//atributos
private int codigoCotizacion;
private DateTime fechaHora;
private int codigoVendedor;
private Prenda prendaCotizada;
private int unidadesCotizadas;
private double resultadoCotizacion;
//geters y seters
public int CodigoCotizacion { get => codigoCotizacion; set => codigoCotizacion = value; }
public DateTime FechaHora { get => fechaHora; set => fechaHora = value; }
public int CodigoVendedor { get => codigoVendedor; set => codigoVendedor = value; }
public Prenda PrendaCotizada { get => prendaCotizada; set => prendaCotizada = value; }
public int UnidadesCotizadas { get => unidadesCotizadas; set => unidadesCotizadas = value; }
public double ResultadoCotizacion { get => resultadoCotizacion; set => resultadoCotizacion = value; }
//constructor
public Cotizacion(int codigoCotizacion, DateTime fechaHora, int codigoVendedor, Prenda prendaCotizada, int unidadesCotizadas, double resultadoCotizacion)
{
this.codigoCotizacion = codigoCotizacion;
this.fechaHora = fechaHora;
this.codigoVendedor = codigoVendedor;
this.prendaCotizada = prendaCotizada;
this.unidadesCotizadas = unidadesCotizadas;
this.resultadoCotizacion = resultadoCotizacion;
}
public String ImprimirC()
{
return "----------------------" +
"Cod. Cotizacion :" + codigoCotizacion +
"Fecha y Hora: " + FechaHora +
"Cod. Vendedor: " + codigoVendedor +
PrendaCotizada.ImprimirPr() +
"Unidades Pedidas: " + UnidadesCotizadas +
"Precio Final: " + ResultadoCotizacion +
"-------------------------";
}
}
}
<file_sep># Exmaen
¿C# permite herencia múltiple? - No permite para los casos que debamos elegir algo similar a la herencia multiple deberemos emplear interfaces.
¿Cuándo utilizaría una Clase Abstracta en lugar de una Interfaz? Ejemplifique. - Cuando queramos establecer una estructura para el resto de las clases pero pudiendo editar o sobreescribir o no los metodos que vamos a utilizar en cambio una interfaz no permite editar el contenido del metodo.
¿Qué implica una relación de Generalización entre dos clases? - que una hereda de la otra sus atributos y metodos.
¿Qué implica una relación de Implementación entre una clase y una interfaz? - que la clase esta implementando la interfaz
¿Qué diferencia hay entre la relación de Composición y la Agregación? - Que en la agregacion las partes pueden formar parte de distintas agregados mientras que en la composicion las partes solo existen asociadas al compuesto.
Indique V o F según corresponda. Diferencia entre Asociación y Agregación:
a. Una diferencia es que la segunda indica la relación entre un “todo” y
sus “partes”, mientras que en la primera los objetos están al mismo
nivel contextual. (FALSO)
b. Una diferencia es que la Agregación es de cardinalidad 1 a muchos
mientras que la Asociación es de 1 a 1. (vERDADERO)
c. Una diferencia es que, en la Agregación, la vida o existencia de los
objetos relacionados está fuertemente ligada, es decir que si “muere”
el objeto contenedor también morirán las “partes”, en cambio en la
Asociación los objetos viven y existen independientemente de la
relación. (VERDADERO)
<file_sep>using System;
using System.Collections.Generic;
using Gtk;
using tiendita;
public partial class MainWindow : Gtk.Window
{
public MainWindow() : base(Gtk.WindowType.Toplevel)
{
Build();
Vendedor v1 = new Vendedor("Pedro", "Picapiedra", 101);
List<Vendedor> vendedores = new List<Vendedor>();
vendedores.Add(v1);
List<Prenda> prendas = new List<Prenda>();
Camisa c1 = new Camisa("normal", 200, 500, "corta", "mao");
Camisa c2 = new Camisa("premium", 200, 500, "corta", "mao");
Camisa c3 = new Camisa("normal", 300, 500, "corta", "normal");
Camisa c4 = new Camisa("premium", 300, 500, "corta", "normal");
Camisa c5 = new Camisa("normal", 150, 500, "larga", "mao");
Camisa c6 = new Camisa("premium", 150, 500, "larga", "mao");
Camisa c7 = new Camisa("normal", 350, 500, "larga", "normal");
Camisa c8 = new Camisa("premium", 350, 500, "larga", "normal");
Pantalon p1 = new Pantalon("normal", 1500, 750, "chupin");
Pantalon p2 = new Pantalon("premium", 1500, 750, "chupin");
Pantalon p3 = new Pantalon("normal", 500, 750, "normal");
Pantalon p4 = new Pantalon("premium", 500, 750, "normal");
prendas.Add(c1);
prendas.Add(c2);
prendas.Add(c3);
prendas.Add(c4);
prendas.Add(c5);
prendas.Add(c6);
prendas.Add(c7);
prendas.Add(c8);
prendas.Add(p1);
prendas.Add(p2);
prendas.Add(p3);
prendas.Add(p4);
TiendadeRopa t1 = new TiendadeRopa("Topper", "San Martin 503", vendedores, prendas);
label2.Text = t1.nmb();
label3.Text = t1.Direccion;
label4.Text = v1.Nombre + " " + v1.Apellido + " Cod:";
}
protected void OnDeleteEvent(object sender, DeleteEventArgs a)
{
Application.Quit();
a.RetVal = true;
}
Controlador con1 = new Controlador();
protected void Click(object sender, EventArgs e)
{
con1 = new Controlador(radiobutton1, radiobutton2, checkbutton1, checkbutton2, checkbutton3, checkbutton4, entry1, entry2);
con1.Total();
}
protected void click2(object sender, EventArgs e)
{
MessageDialog md = new MessageDialog(null, DialogFlags.Modal, MessageType.Info, ButtonsType.Ok, con1.CC());
md.Run();
md.Destroy();
}
}
<file_sep>
// This file has been generated by the GUI designer. Do not modify.
public partial class MainWindow
{
private global::Gtk.VBox vbox1;
private global::Gtk.Fixed fixed1;
private global::Gtk.Label label1;
private global::Gtk.Label label2;
private global::Gtk.Label label3;
private global::Gtk.Label label4;
private global::Gtk.Button button1;
private global::Gtk.HSeparator hseparator1;
private global::Gtk.HSeparator hseparator2;
private global::Gtk.Fixed fixed2;
private global::Gtk.Label label5;
private global::Gtk.RadioButton radiobutton1;
private global::Gtk.CheckButton checkbutton1;
private global::Gtk.CheckButton checkbutton2;
private global::Gtk.CheckButton checkbutton3;
private global::Gtk.RadioButton radiobutton2;
private global::Gtk.CheckButton checkbutton4;
private global::Gtk.Entry entry1;
private global::Gtk.Label label6;
private global::Gtk.HSeparator hseparator3;
private global::Gtk.Fixed fixed3;
private global::Gtk.Button button2;
private global::Gtk.Entry entry2;
protected virtual void Build()
{
global::Stetic.Gui.Initialize(this);
// Widget MainWindow
this.Name = "MainWindow";
this.Title = global::Mono.Unix.Catalog.GetString("MainWindow");
this.WindowPosition = ((global::Gtk.WindowPosition)(4));
// Container child MainWindow.Gtk.Container+ContainerChild
this.vbox1 = new global::Gtk.VBox();
this.vbox1.Name = "vbox1";
this.vbox1.Spacing = 6;
// Container child vbox1.Gtk.Box+BoxChild
this.fixed1 = new global::Gtk.Fixed();
this.fixed1.WidthRequest = 22;
this.fixed1.HeightRequest = 88;
this.fixed1.Name = "fixed1";
this.fixed1.HasWindow = false;
// Container child fixed1.Gtk.Fixed+FixedChild
this.label1 = new global::Gtk.Label();
this.label1.Name = "label1";
this.label1.LabelProp = global::Mono.Unix.Catalog.GetString("Cotizador Express");
this.fixed1.Add(this.label1);
global::Gtk.Fixed.FixedChild w1 = ((global::Gtk.Fixed.FixedChild)(this.fixed1[this.label1]));
w1.X = 177;
w1.Y = 3;
// Container child fixed1.Gtk.Fixed+FixedChild
this.label2 = new global::Gtk.Label();
this.label2.Name = "label2";
this.label2.LabelProp = global::Mono.Unix.Catalog.GetString("Tienda:");
this.fixed1.Add(this.label2);
global::Gtk.Fixed.FixedChild w2 = ((global::Gtk.Fixed.FixedChild)(this.fixed1[this.label2]));
w2.X = 3;
w2.Y = 20;
// Container child fixed1.Gtk.Fixed+FixedChild
this.label3 = new global::Gtk.Label();
this.label3.Name = "label3";
this.label3.LabelProp = global::Mono.Unix.Catalog.GetString("Direccion:");
this.fixed1.Add(this.label3);
global::Gtk.Fixed.FixedChild w3 = ((global::Gtk.Fixed.FixedChild)(this.fixed1[this.label3]));
w3.X = 313;
w3.Y = 19;
// Container child fixed1.Gtk.Fixed+FixedChild
this.label4 = new global::Gtk.Label();
this.label4.Name = "label4";
this.label4.LabelProp = global::Mono.Unix.Catalog.GetString("Vendedor - Codigo");
this.fixed1.Add(this.label4);
global::Gtk.Fixed.FixedChild w4 = ((global::Gtk.Fixed.FixedChild)(this.fixed1[this.label4]));
w4.X = 4;
w4.Y = 48;
// Container child fixed1.Gtk.Fixed+FixedChild
this.button1 = new global::Gtk.Button();
this.button1.CanFocus = true;
this.button1.Name = "button1";
this.button1.UseUnderline = true;
this.button1.Label = global::Mono.Unix.Catalog.GetString("Historial");
this.fixed1.Add(this.button1);
global::Gtk.Fixed.FixedChild w5 = ((global::Gtk.Fixed.FixedChild)(this.fixed1[this.button1]));
w5.X = 313;
w5.Y = 42;
// Container child fixed1.Gtk.Fixed+FixedChild
this.hseparator1 = new global::Gtk.HSeparator();
this.hseparator1.Name = "hseparator1";
this.fixed1.Add(this.hseparator1);
global::Gtk.Fixed.FixedChild w6 = ((global::Gtk.Fixed.FixedChild)(this.fixed1[this.hseparator1]));
w6.X = -23;
w6.Y = 79;
// Container child fixed1.Gtk.Fixed+FixedChild
this.hseparator2 = new global::Gtk.HSeparator();
this.hseparator2.WidthRequest = 450;
this.hseparator2.HeightRequest = 6;
this.hseparator2.Name = "hseparator2";
this.fixed1.Add(this.hseparator2);
global::Gtk.Fixed.FixedChild w7 = ((global::Gtk.Fixed.FixedChild)(this.fixed1[this.hseparator2]));
w7.X = 8;
w7.Y = 81;
this.vbox1.Add(this.fixed1);
global::Gtk.Box.BoxChild w8 = ((global::Gtk.Box.BoxChild)(this.vbox1[this.fixed1]));
w8.Position = 0;
w8.Expand = false;
w8.Fill = false;
// Container child vbox1.Gtk.Box+BoxChild
this.fixed2 = new global::Gtk.Fixed();
this.fixed2.WidthRequest = 4;
this.fixed2.HeightRequest = 125;
this.fixed2.Name = "fixed2";
this.fixed2.HasWindow = false;
// Container child fixed2.Gtk.Fixed+FixedChild
this.label5 = new global::Gtk.Label();
this.label5.Name = "label5";
this.label5.LabelProp = global::Mono.Unix.Catalog.GetString("Prenda");
this.fixed2.Add(this.label5);
global::Gtk.Fixed.FixedChild w9 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.label5]));
w9.X = 195;
w9.Y = 5;
// Container child fixed2.Gtk.Fixed+FixedChild
this.radiobutton1 = new global::Gtk.RadioButton(global::Mono.Unix.Catalog.GetString("Camisa"));
this.radiobutton1.CanFocus = true;
this.radiobutton1.Name = "radiobutton1";
this.radiobutton1.DrawIndicator = true;
this.radiobutton1.UseUnderline = true;
this.radiobutton1.Group = new global::GLib.SList(global::System.IntPtr.Zero);
this.fixed2.Add(this.radiobutton1);
global::Gtk.Fixed.FixedChild w10 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.radiobutton1]));
w10.X = 13;
w10.Y = 23;
// Container child fixed2.Gtk.Fixed+FixedChild
this.checkbutton1 = new global::Gtk.CheckButton();
this.checkbutton1.CanFocus = true;
this.checkbutton1.Name = "checkbutton1";
this.checkbutton1.Label = global::Mono.Unix.Catalog.GetString("Manga Corta");
this.checkbutton1.DrawIndicator = true;
this.checkbutton1.UseUnderline = true;
this.fixed2.Add(this.checkbutton1);
global::Gtk.Fixed.FixedChild w11 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.checkbutton1]));
w11.X = 139;
w11.Y = 25;
// Container child fixed2.Gtk.Fixed+FixedChild
this.checkbutton2 = new global::Gtk.CheckButton();
this.checkbutton2.CanFocus = true;
this.checkbutton2.Name = "checkbutton2";
this.checkbutton2.Label = global::Mono.Unix.Catalog.GetString("<NAME>");
this.checkbutton2.DrawIndicator = true;
this.checkbutton2.UseUnderline = true;
this.fixed2.Add(this.checkbutton2);
global::Gtk.Fixed.FixedChild w12 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.checkbutton2]));
w12.X = 275;
w12.Y = 26;
// Container child fixed2.Gtk.Fixed+FixedChild
this.checkbutton3 = new global::Gtk.CheckButton();
this.checkbutton3.CanFocus = true;
this.checkbutton3.Name = "checkbutton3";
this.checkbutton3.Label = global::Mono.Unix.Catalog.GetString("Chupin");
this.checkbutton3.DrawIndicator = true;
this.checkbutton3.UseUnderline = true;
this.fixed2.Add(this.checkbutton3);
global::Gtk.Fixed.FixedChild w13 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.checkbutton3]));
w13.X = 138;
w13.Y = 55;
// Container child fixed2.Gtk.Fixed+FixedChild
this.radiobutton2 = new global::Gtk.RadioButton(global::Mono.Unix.Catalog.GetString("Pantalon"));
this.radiobutton2.CanFocus = true;
this.radiobutton2.Name = "radiobutton2";
this.radiobutton2.DrawIndicator = true;
this.radiobutton2.UseUnderline = true;
this.radiobutton2.Group = this.radiobutton1.Group;
this.fixed2.Add(this.radiobutton2);
global::Gtk.Fixed.FixedChild w14 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.radiobutton2]));
w14.X = 10;
w14.Y = 54;
// Container child fixed2.Gtk.Fixed+FixedChild
this.checkbutton4 = new global::Gtk.CheckButton();
this.checkbutton4.CanFocus = true;
this.checkbutton4.Name = "checkbutton4";
this.checkbutton4.Label = global::Mono.Unix.Catalog.GetString("Premium");
this.checkbutton4.DrawIndicator = true;
this.checkbutton4.UseUnderline = true;
this.fixed2.Add(this.checkbutton4);
global::Gtk.Fixed.FixedChild w15 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.checkbutton4]));
w15.X = 68;
w15.Y = 86;
// Container child fixed2.Gtk.Fixed+FixedChild
this.entry1 = new global::Gtk.Entry();
this.entry1.WidthRequest = 100;
this.entry1.CanFocus = true;
this.entry1.Name = "entry1";
this.entry1.IsEditable = true;
this.entry1.InvisibleChar = '•';
this.fixed2.Add(this.entry1);
global::Gtk.Fixed.FixedChild w16 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.entry1]));
w16.X = 323;
w16.Y = 82;
// Container child fixed2.Gtk.Fixed+FixedChild
this.label6 = new global::Gtk.Label();
this.label6.Name = "label6";
this.label6.LabelProp = global::Mono.Unix.Catalog.GetString("Cant. :");
this.fixed2.Add(this.label6);
global::Gtk.Fixed.FixedChild w17 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.label6]));
w17.X = 280;
w17.Y = 85;
// Container child fixed2.Gtk.Fixed+FixedChild
this.hseparator3 = new global::Gtk.HSeparator();
this.hseparator3.WidthRequest = 461;
this.hseparator3.HeightRequest = 6;
this.hseparator3.Name = "hseparator3";
this.fixed2.Add(this.hseparator3);
global::Gtk.Fixed.FixedChild w18 = ((global::Gtk.Fixed.FixedChild)(this.fixed2[this.hseparator3]));
w18.X = 12;
w18.Y = 113;
this.vbox1.Add(this.fixed2);
global::Gtk.Box.BoxChild w19 = ((global::Gtk.Box.BoxChild)(this.vbox1[this.fixed2]));
w19.Position = 1;
w19.Expand = false;
w19.Fill = false;
// Container child vbox1.Gtk.Box+BoxChild
this.fixed3 = new global::Gtk.Fixed();
this.fixed3.WidthRequest = 19;
this.fixed3.HeightRequest = 78;
this.fixed3.Name = "fixed3";
this.fixed3.HasWindow = false;
// Container child fixed3.Gtk.Fixed+FixedChild
this.button2 = new global::Gtk.Button();
this.button2.WidthRequest = 140;
this.button2.HeightRequest = 50;
this.button2.CanFocus = true;
this.button2.Name = "button2";
this.button2.UseUnderline = true;
this.button2.Label = global::Mono.Unix.Catalog.GetString("Cotizar");
this.fixed3.Add(this.button2);
global::Gtk.Fixed.FixedChild w20 = ((global::Gtk.Fixed.FixedChild)(this.fixed3[this.button2]));
w20.X = 29;
w20.Y = 8;
// Container child fixed3.Gtk.Fixed+FixedChild
this.entry2 = new global::Gtk.Entry();
this.entry2.CanFocus = true;
this.entry2.Name = "entry2";
this.entry2.Text = global::Mono.Unix.Catalog.GetString("Resultado.");
this.entry2.IsEditable = true;
this.entry2.InvisibleChar = '•';
this.fixed3.Add(this.entry2);
global::Gtk.Fixed.FixedChild w21 = ((global::Gtk.Fixed.FixedChild)(this.fixed3[this.entry2]));
w21.X = 192;
w21.Y = 18;
this.vbox1.Add(this.fixed3);
global::Gtk.Box.BoxChild w22 = ((global::Gtk.Box.BoxChild)(this.vbox1[this.fixed3]));
w22.Position = 2;
w22.Expand = false;
w22.Fill = false;
this.Add(this.vbox1);
if ((this.Child != null))
{
this.Child.ShowAll();
}
this.DefaultWidth = 437;
this.DefaultHeight = 303;
this.Show();
this.DeleteEvent += new global::Gtk.DeleteEventHandler(this.OnDeleteEvent);
this.button1.Clicked += new global::System.EventHandler(this.click2);
this.button2.Clicked += new global::System.EventHandler(this.Click);
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
namespace tiendita
{
public class Vendedor
{
//atributos
private String nombre, apellido;
private int codigoVendedor;
private List<Cotizacion> cotizaciones;
//geters y seters
public string Nombre { get => nombre; set => nombre = value; }
public string Apellido { get => apellido; set => apellido = value; }
public int CodigoVendedor { get => codigoVendedor; set => codigoVendedor = value; }
public List<Cotizacion> Cotizaciones { get => cotizaciones; set => cotizaciones = value; }
//Constructor
public Vendedor(string nombre, string apellido, int codigoVendedor)
{
this.nombre = nombre;
this.apellido = apellido;
this.codigoVendedor = codigoVendedor;
List<Cotizacion> coti = new List<Cotizacion>();
}
//metodos
List<Cotizacion> coti = new List<Cotizacion>();
public void GenerarCotizacion(Prenda prendacotizada, int cant, double resultado)
{
//codigo
Random r1 = new Random();
int codigo = r1.Next(999, 9999);
DateTime fyh = DateTime.Now;
Cotizacion coti1 = new Cotizacion(codigo, fyh, codigoVendedor, prendacotizada, cant, resultado);
coti.Add(coti1);
}
public String ImprimirHistorial()
{
String cadena, cad1 = " ";
if (coti.Count()!= 0)
{
for (int i = 0; i < coti.Count(); i ++ )
{
cadena = coti[i].ImprimirC();
cad1 = cad1 + cadena;
return cad1;
}
}
return "Todavia no se hacen cotizaciones.";
}
}
}
|
df5a9d38dcd833d53981e5053a8104db61e47ddc
|
[
"Markdown",
"C#"
] | 10
|
C#
|
alemago14/Exmaen
|
23e4a3ad4ab735b20969907287ccd76ad9b0f572
|
628c1546a1636739a7f2411c48349b1815398854
|
refs/heads/master
|
<file_sep>#include <iostream>
template <typename T>
struct Node {
Node<T> * next;
T val;
Node() : next(nullptr), val() {}
~Node() {
delete next;
}
};
template <typename T>
int size(Node<T> * cur) {
int sz = 0;
while (cur != nullptr) {
++sz;
cur = cur->next;
}
return sz;
}
template <typename T>
Node<T> * advance(Node<T> * x, int cnt) {
for (int i = 0; i < cnt; ++i) {
x = x->next;
}
return x;
}
template <typename T>
Node<T> * merge(Node<T> * l, Node<T> * r) {
Node<T> * cur, * res;
if (l->val < r->val) {
cur = l;
l = l->next;
} else {
cur = r;
r = r->next;
}
res = cur;
while (l != nullptr && r != nullptr) {
if (l->val < r->val) {
cur->next = l;
l = l->next;
} else {
cur->next = r;
r = r->next;
}
cur = cur->next;
}
if (l != nullptr) {
cur->next = l;
} else if (r != nullptr) {
cur->next = r;
}
return res;
}
template <typename T>
void mysort(Node<T> *& x) {
int sz = size(x);
if (sz < 2) {
return;
}
Node<T> * l, * r, * cur;
l = x;
cur = advance(x, sz / 2 - 1);
r = cur->next;
cur->next = nullptr;
mysort(l);
mysort(r);
x = merge(l, r);
}
int main() {
std::ios_base::sync_with_stdio(0);
std::cin.tie(0);
std::cout.tie(0);
int n;
std::cin >> n;
Node<int> * beg, * cur;
beg = cur = new Node<int>();
std::cin >> cur->val;
--n;
for (int i = 0; i < n; ++i) {
cur->next = new Node<int>();
std::cin >> cur->next->val;
cur = cur->next;
}
mysort(beg);
cur = beg;
while (cur != nullptr) {
std::cout << cur->val << ' ';
cur = cur->next;
}
std::cout << std::endl;
delete beg;
return 0;
}
<file_sep>#pragma once
#include <list>
#include <vector>
#include <cstddef>
#include <functional>
#include <stdexcept>
template<class KeyType, class ValueType, class Hash = std::hash<KeyType>>
class HashMap {
public:
using iterator = typename std::list<std::pair<const KeyType, ValueType>>::iterator;
using const_iterator = typename std::list<std::pair<const KeyType, ValueType>>::const_iterator;
HashMap(Hash h = Hash()) : elements_size(0), hasher(h), elements(), table(1, std::make_pair(elements.end(), 0)) {};
template<class Iterator>
HashMap(Iterator beginIt, Iterator endIt, Hash h = Hash()) : HashMap(h) {
size_t sz = std::distance(beginIt, endIt);
if (sz > 0) {
rebuild(2 * sz);
while (beginIt != endIt) {
insert(*beginIt);
++beginIt;
}
}
}
HashMap(const HashMap &b) : HashMap(b.begin(), b.end(), b.hash_function()) {}
HashMap(HashMap &&b) {
std::swap(elements_size, b.elements_size);
std::swap(hasher, b.hasher);
std::swap(elements, b.elements);
std::swap(table, b.table);
}
HashMap(typename std::initializer_list<std::pair<KeyType, ValueType>> l, Hash h = Hash()) : HashMap(l.begin(), l.end(), h) {};
HashMap &operator=(HashMap b) {
std::swap(elements_size, b.elements_size);
std::swap(hasher, b.hasher);
std::swap(elements, b.elements);
std::swap(table, b.table);
return *this;
}
size_t size() const {
return elements_size;
}
bool empty() const {
return elements.empty();
}
Hash hash_function() const {
return hasher;
}
std::pair<iterator, bool> insert(const std::pair<KeyType, ValueType> &p) {
const KeyType &key = p.first;
auto it = find(key);
if (it == elements.end()) {
if (2 * size() + 1 >= table.size()) {
rebuild(table.size() * 2);
}
auto hs = hasher(key) % table.size();
++table[hs].second;
++elements_size;
table[hs].first = elements.insert(table[hs].first, p);
return {table[hs].first, 1};
}
return {it, 0};
}
void erase(const KeyType &key) {
auto it = find(key);
if (it != elements.end()) {
auto hs = hasher(key) % table.size();
--table[hs].second;
--elements_size;
if (table[hs].first == it) {
if (table[hs].second == 0) {
table[hs].first = elements.end();
} else {
++table[hs].first;
}
}
elements.erase(it);
}
}
iterator begin() {
return elements.begin();
}
iterator end() {
return elements.end();
}
const_iterator begin() const {
return elements.cbegin();
}
const_iterator end() const {
return elements.cend();
}
iterator find(const KeyType &key) {
auto res = elements.end();
auto hs = hasher(key) % table.size();
auto it = table[hs].first;
for (size_t i = 0; i < table[hs].second; ++i, ++it) {
if (key == it->first) {
res = it;
break;
}
}
return res;
}
const_iterator find(const KeyType &key) const {
auto res = elements.end();
auto hs = hasher(key) % table.size();
auto it = table[hs].first;
for (size_t i = 0; i < table[hs].second; ++i, ++it) {
if (key == it->first) {
res = it;
break;
}
}
return res;
}
ValueType &operator[](const KeyType &key) {
auto it = find(key);
if (it == elements.end()) {
it = insert({key, ValueType()}).first;
}
return it->second;
}
const ValueType &at(const KeyType &key) const {
auto it = find(key);
if (it == elements.end()) {
throw std::out_of_range("Key not found");
}
return it->second;
}
void clear() {
elements.clear();
elements_size = 0;
table.assign(1, {elements.end(), 0});
}
private:
void rebuild(size_t sz) {
decltype(elements) old;
swap(old, elements);
elements_size = 0;
table.assign(sz, {elements.end(), 0});
while (!old.empty()) {
insert(old.front());
old.pop_front();
}
}
size_t elements_size;
Hash hasher;
std::list<std::pair<const KeyType, ValueType>> elements;
std::vector<std::pair<iterator, size_t>> table;
};
<file_sep># Code-Review-Tasks
Задачи на code review из длинных контестов.
Ручная сортировка. 1-ая задача из 1-ого длинного контеста.
HashMap. 1-ая задача из 2-ого длинного контеста.
Min Cost Max Flow, 1-ая задача с 4-ого длинного контеста.
<file_sep>#include <vector>
#include <queue>
#include <cassert>
#include <cstddef>
#include <iostream>
#include <iomanip>
#include <limits>
const int N = 300;
const int INF = std::numeric_limits<int>::max();
struct Edge {
int from, to, capacity, weight, flow;
int resCapacity() const {
return capacity - flow;
}
};
class Graph {
public:
Graph() : edges(), nx(), phi(), source(0), target(0) {}
Graph(int size) : edges(), nx(size), phi(size), source(0), target(0) {}
void clear() {
edges.clear();
nx.clear();
phi.clear();
source = 0;
target = 0;
}
void setSource(int val) {
source = val;
}
void setTarget(int val) {
target = val;
}
void resize(size_t size) {
clear();
nx.resize(size);
phi.resize(size);
}
void addEdge(int from, int to , int capacity, int weight, bool directed = true) {
nx[from].push_back(edges.size());
edges.push_back({from, to, capacity, weight, 0});
nx[to].push_back(edges.size());
edges.push_back({to, from, (directed? 0 : capacity), -weight, 0});
}
void calcPhi() {
phi.assign(phi.size(), INF);
phi[source] = 0;
bool changed;
int step = 0;
do {
changed = false;
for (const auto &i : edges) {
if (i.resCapacity() > 0 && phi[i.from] != INF && phi[i.from] + i.weight < phi[i.to]) {
changed = true;
phi[i.to] = phi[i.from] + i.weight;
}
}
++step;
} while (changed && step < 2 * phi.size());
assert(!changed);
}
int getPhiWeight(const Edge &e) const {
return e.weight + phi[e.from] - phi[e.to];
}
void recalcPhi() {
std::vector<int> newphi(phi.size(), INF);
newphi[source] = 0;
std::priority_queue<std::pair<int, int>, class std::vector<std::pair<int, int>>, class std::greater<std::pair<int, int>>> qu;
qu.push({0, source});
while (!qu.empty()) {
auto cur = qu.top();
qu.pop();
if (cur.first <= newphi[cur.second]) {
newphi[cur.second] = cur.first;
for (int e : nx[cur.second]) {
int dist = cur.first + getPhiWeight(edges[e]);
if (edges[e].resCapacity() > 0 && dist < newphi[edges[e].to]) {
newphi[edges[e].to] = dist;
qu.push({dist, edges[e].to});
}
}
}
}
for (int i = 0; i < phi.size(); ++i) {
if (newphi[i] != INF) {
newphi[i] += phi[i];
}
}
swap(phi, newphi);
}
int currentFlow() const {
int flow = 0;
for (int i : nx[source]) {
flow += edges[i].flow;
}
return flow;
}
int currentCost() const {
int cost = 0;
for (const auto &i : edges) {
cost += i.flow * i.weight;
}
cost /= 2;
return cost;
}
void findMinCostMaxFlow(int k = INF) {
calcPhi();
std::vector<bool> usd(phi.size());
while (phi[target] != INF && k > 0) {
usd.assign(usd.size(), 0);
pushFlow(source, usd);
recalcPhi();
--k;
}
}
std::vector<std::vector<int>> getFlowDecomposition() {
int flow = currentFlow();
std::vector<std::vector<int>> res(flow);
for (int i = 0; i < flow; ++i) {
int v = source;
while (v != target) {
for (int j : nx[v]) {
if (edges[j].flow > 0) {
addFlow(j, -1);
v = edges[j].to;
res[i].push_back(j / 2);
break;
}
}
}
}
return res;
}
private:
void addFlow(int e, int flow) {
edges[e].flow += flow;
edges[e ^ 1].flow -= flow;
}
bool pushFlow(int v, std::vector<bool> &usd) {
if (v == target) return true;
usd[v] = true;
for (int i : nx[v]) {
if (!usd[edges[i].to] && edges[i].resCapacity() > 0 && phi[edges[i].to] == phi[v] + edges[i].weight) {
if (pushFlow(edges[i].to, usd)) {
addFlow(i, 1);
return true;
}
}
}
return false;
}
std::vector<Edge> edges;
std::vector<std::vector<size_t>> nx;
std::vector<int> phi;
int source, target;
};
int main(){
std::ios_base::sync_with_stdio(0);
std::cin.tie(0);
std::cout.tie(0);
std::cout << std::fixed << std::setprecision(10);
int n, m, k;
std::cin >> n >> m >> k;
Graph g(n);
g.setTarget(n - 1);
for (int i = 0; i < m; ++i) {
int a, b, c;
std::cin >> a >> b >> c;
--a; --b;
g.addEdge(a, b, 1, c);
g.addEdge(b, a, 1, c);
}
g.findMinCostMaxFlow(k);
if (g.currentFlow() != k) {
std::cout << "-1" << std::endl;
} else {
std::cout << 1.0 * g.currentCost() / k << std::endl;
auto d = g.getFlowDecomposition();
for (int i = 0; i < d.size(); ++i) {
std::cout << d[i].size() << ' ';
for (int j : d[i]) std::cout << j / 2 + 1 << ' ';
std::cout << '\n';
}
}
return 0;
}
|
5d7ffdd3545236ff201f38ddd199e9c6b6d404e2
|
[
"Markdown",
"C++"
] | 4
|
C++
|
DimasKovas/Code-Review-Tasks
|
50d9e410b9575d00e417897ef5b80567aafdb675
|
5c8daff687f619e525e84ed7464ce498427d7354
|
refs/heads/main
|
<file_sep>import Vue from 'vue'
import Router from 'vue-router'
import HelloWorld from '@/components/HelloWorld'
import message from '@/pages/message/message'
import my from '@/pages/my/my'
import personinfo from '@/pages/my/personinfo'
import edit_personInfo from '@/pages/my/edit_personInfo'
import schedule from '@/pages/schedule/schedule'
import studentsignrecord from '@/pages/sign/studentsignrecord'
import sign from '@/pages/sign/sign'
import messagemanagement from '@/pages/sign/messagemanagement'
import sutdengsignresult from '@/pages/sign/sutdengsignresult'
import studentsign from '@/pages/sign/studentsign'
import studentsigndetail from '@/pages/sign/studentsigndetail'
import signmanagement from '@/pages/sign/signmanagement'
import creatnewsign from '@/pages/sign/creatnewsign'
import selectLocation from '@/pages/sign/selectLocation'
import login from '@/pages/login/login'
import loginbak from '@/pages/login/loginbak'
import register from '@/pages/register/register'
import messageadd from '@/pages/message/messageadd'
import hallSeat from '@/pages/sign/hallSeat'
import addsign from '@/pages/sign/addsign'
import wifiget from '@/pages/wifi/wifiget'
import wifiTest from '@/pages/wifi/wifiTest'
import seat from '@/pages/seat/seat'
import question from '@/pages/question/question'
Vue.use(Router)
export default new Router({
mode: 'history',
base: process.env.BASE_URL,
routes: [
{
path: '/',
name: 'login',
component: login
},
{
path: '/schedule',
name: 'schedule',
component: schedule
},
{
path: '/sign',
name: 'sign',
component: sign
},
{
path: '/message',
name: 'message',
component: message
},
{
path: '/my',
name: 'my',
component: my
},
{
path: '/personinfo',
name: 'personinfo',
component: personinfo
},
{
path: '/edit_personInfo',
name: 'edit_personInfo',
component: edit_personInfo
},
{
path: '/selectLocation',
name: 'selectLocation',
component: selectLocation
},
{
path: '/login',
name: 'login',
component: login
},
{
path: '/register',
name: 'register',
component: register
},
{
path: '/loginbak',
name: 'loginbak',
component: loginbak
},
{
path: '/messageadd',
name: 'messageadd',
component: messageadd
},
{
path: '/hallSeat',
name: 'hallSeat',
component: hallSeat
},
{
path: '/addsign',
name: 'addsign',
component: addsign
},
{
path: '/wifiget',
name: 'wifiget',
component: wifiget
},
{
path: '/wifiTest',
name: 'wifiTest',
component: wifiTest
},
{
path: '/seat',
name: 'seat',
component: seat
},
{
path: '/question',
name: 'question',
component: question
},
{
path: '/creatnewsign',
name: 'creatnewsign',
component: creatnewsign
},
{
path: '/signmanagement',
name: 'signmanagement',
component: signmanagement
},
{
path: '/studentsign',
name: 'studentsign',
component: studentsign
},
{
path: '/studentsigndetail',
name: 'studentsigndetail',
component: studentsigndetail
},
{
path: '/sutdengsignresult',
name: 'sutdengsignresult',
component: sutdengsignresult
},
{
path: '/messagemanagement',
name: 'messagemanagement',
component: messagemanagement
},
{
path: '/studentsignrecord',
name: 'studentsignrecord',
component: studentsignrecord
},
]
})
<file_sep># cordova
签到系统安卓端
<file_sep>// The Vue build version to load with the `import` command
// (runtime-only or standalone) has been set in webpack.base.conf with an alias.
import Vue from 'vue'
import App from './App'
import router from './router'
import ElementUI from 'element-ui'
import 'element-ui/lib/theme-chalk/index.css'
import Vant from 'vant';
import 'vant/lib/index.css';
import 'amfe-flexible'
import Api from './api/api'
import store from './vuex'
import { TreeSelect } from 'vant';
import { post, get, patch, put } from './http'
var VueTouch = require('vue-touch')
import Vconsole from 'vconsole'
import './assets/stylus/reset.styl'
import './assets/styles/border.css'
import './assets/styles/reset.css'
let vConsole = new Vconsole()
Vue.config.productionTip = false;
// 定义全局变量
Vue.prototype.$post = post
Vue.prototype.$get = get
Vue.prototype.$patch = patch
Vue.prototype.$put = put
Vue.prototype.$req = Api;
Vue.use(ElementUI);
Vue.use(Vant);
Vue.use(VueTouch, { name: 'v-touch' })
Vue.use(TreeSelect);
// eslint-disable-next-line
/* eslint-disable */
/* eslint-disable no-new */
new Vue({
el: '#app',
router,
store,
components: { App },
template: '<App/>'
});
|
e77f814e763b61eadfe61d2e255bc1b1869104f6
|
[
"JavaScript",
"Markdown"
] | 3
|
JavaScript
|
Gravelwpy/cordova
|
d7d1cda14b761c2aaf9a7bcef33b0791e1740447
|
fb9c1f45dfa922e3f983250e6450b9ef0743136e
|
refs/heads/master
|
<repo_name>michaelephilau/MusicAudioVisualizer<file_sep>/Assets/Scripts/Graphing.cs
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using System;
using UnityEngine.UI;
using System.Runtime.InteropServices;
using System.IO;
public class Graphing : MonoBehaviour {
//[DllImport("System.Windows.Forms.dll")]
//private static extern void OpenFileDialog ();
public Camera mainCam;
private AudioSource aud;
public GameObject circle;
public GameObject[] circles;
public GameObject circle2;
public GameObject[] circles2;
public GameObject light;
public GameObject[] lights;
public GameObject openButton;
public float freq;
public float amp = 1f;
private float pausedTime;
public int samples;
public int numberofCircles;
public int numberofCircles2;
public int numberofLights;
public int buttonAmt;
public int div;
public bool op;
public bool spec;
public bool lightarray;
public bool grid;
public string url;
public string prevUrl;
public string songName;
public string[] dir;
public string[] fileExt;
public string currentDir;
public string currentDirParent;
public string currentDirSub;
public List<string> songList;
public List<string> fileExtList;
public List<string> dirList;
public Text txt;
public Text playbackTimeText;
public Text captionText;
public RenderTexture renderTexture;
public Sprite play;
public Sprite pause;
public Slider s1;
public Slider s2;
public Slider s3;
public Slider playbackTime;
void Start () {
renderTexture.width = Screen.width;
renderTexture.height = Screen.height;
mainCam = Camera.main;
aud = GetComponent<AudioSource> ();
if(!Application.isEditor)
Application.runInBackground = true;
spec = true;
Vector3 worldPos = mainCam.ScreenToWorldPoint(new Vector3(Screen.width, Screen.height,0));
float xWidth = worldPos.x;
float yHeight = worldPos.y;
for (float i = 0; i < worldPos.x * 2; i+=0.01f)
{
GameObject spawnedCircle = Instantiate (circle, circle.transform.position, circle.transform.rotation) as GameObject;
GameObject spawnedCircle2 = Instantiate (circle2, circle2.transform.position, circle2.transform.rotation) as GameObject;
spawnedCircle.transform.position = new Vector3 (i - xWidth, 0);
spawnedCircle2.transform.position = new Vector3 (i - xWidth, 0);
circles = GameObject.FindGameObjectsWithTag ("sphere");
circles2 = GameObject.FindGameObjectsWithTag ("sphere2");
numberofCircles = circles.Length;
numberofCircles2 = circles2.Length;
}
for (float x = 0; x < xWidth * 2; x+=0.3f)
{
for (float y = 0; y < yHeight * 2; y+=0.3f)
{
GameObject spawnedLight = Instantiate (light, light.transform.position, light.transform.rotation) as GameObject;
spawnedLight.transform.position = new Vector3 (x - xWidth, y - yHeight, 10);
lights = GameObject.FindGameObjectsWithTag ("star");
numberofLights = lights.Length;
}
}
foreach (GameObject light in lights)
light.SetActive (false);
GameObject.Find ("Grid").GetComponent<MeshRenderer> ().enabled = false;
buttonAmt = 0;
fileExt = new string[6]{"*.wav", "*.ogg", "*.s3m", "*.mod", "*.it", "*.xm"};
for (int i = 0; i < fileExt.Length; i++) {
fileExtList.Add (fileExt [i]);
}
GameObject.Find ("FileExt Dropdown").GetComponent<Dropdown> ().AddOptions(fileExtList);
string chosenFileExt = GameObject.Find("FileExt Dropdown").GetComponent<Dropdown>().captionText.text;
currentDir = Environment.GetFolderPath(Environment.SpecialFolder.Desktop) + @"\";
currentDirSub = currentDir.Substring(0,currentDir.Length-1);
string[] currentSubDirs = Directory.GetDirectories (currentDir, "*", SearchOption.TopDirectoryOnly);
dir = Directory.GetFiles (currentDir, chosenFileExt, SearchOption.TopDirectoryOnly);
dirList = new List<string> ();
currentDirParent = Directory.GetParent (currentDirSub).ToString();
dirList.Add (currentDir);
dirList.Add (currentDirParent);
for (int i = 0; i < currentSubDirs.Length; i++) {
dirList.Add (currentSubDirs[i]);
}
GameObject.Find("Directory Dropdown").GetComponent<Dropdown>().AddOptions(dirList);
dir = Directory.GetFiles (currentDir, chosenFileExt, SearchOption.TopDirectoryOnly);
songList = new List<string>();
for (int i = 0; i < dir.Length; i++) {
songList.Add (dir[i].Substring(dir[i].LastIndexOf(@"\")+1));
}
GameObject.Find("Song Dropdown").GetComponent<Dropdown>().AddOptions(songList);
GameObject.Find("Song Dropdown").GetComponent<Dropdown>().value = 0;
}
void Update () {
Vector3 worldPos = mainCam.ScreenToWorldPoint(new Vector3(Screen.width, Screen.height,0));
float[] output = aud.GetOutputData (8192, 1);
float[] spectrum = aud.GetSpectrumData (samples, 0, FFTWindow.BlackmanHarris);
if (aud.isPlaying) {
if (spec || op) {
foreach (GameObject spawnedCircle in circles)
spawnedCircle.SetActive (true);
foreach (GameObject spawnedLight in lights)
spawnedLight.SetActive (false);
GameObject.Find ("Grid").GetComponent<MeshRenderer> ().enabled = false;
for (int j = 0; j < circles.Length; j++) {
if (spec) {
circles [j].transform.position = new Vector3 (circles [j].transform.position.x, spectrum[j] * amp);
circles2 [j].transform.position = new Vector3 (circles2 [j].transform.position.x, -spectrum[j] * amp);
}
if (op) {
circles [j].transform.position = new Vector3 (circles2 [j].transform.position.x, output[j]);
}
Color color = circles [j].GetComponent<SpriteRenderer> ().color;
color.r = s1.normalizedValue + spectrum [j] * 200;
color.g = s2.normalizedValue;
color.b = s3.normalizedValue;
circles [j].GetComponent<SpriteRenderer> ().color = color;
Color color2 = circles2 [j].GetComponent<SpriteRenderer> ().color;
color2.r = s1.normalizedValue + spectrum [j] * 200;
color2.g = s2.normalizedValue;
color2.b = s3.normalizedValue;
circles2 [j].GetComponent<SpriteRenderer> ().color = color2;
}
}
if (spec) {
foreach (GameObject spawnedCircle2 in circles2)
spawnedCircle2.SetActive (true);
}
if (op) {
foreach (GameObject spawnedCircle2 in circles2)
spawnedCircle2.SetActive (false);
}
if (lightarray) {
foreach (GameObject spawnedLight in lights)
spawnedLight.SetActive (true);
foreach (GameObject spawnedCircle in circles)
spawnedCircle.SetActive (false);
foreach (GameObject spawnedCircle2 in circles2)
spawnedCircle2.SetActive (false);
if (aud.isPlaying) {
for (int i = 0; i < lights.Length; i++) {
Color color = lights [i].GetComponent<Light> ().color;
color.r = s1.normalizedValue;
color.g = s2.normalizedValue;
color.b = s3.normalizedValue;
lights [i].GetComponent<Light> ().color = color;
lights [i].GetComponent<Light> ().range = spectrum [i] * amp;
}
}
}
if (grid) {
foreach (GameObject spawnedLight in lights)
spawnedLight.SetActive (false);
foreach (GameObject spawnedCircle in circles)
spawnedCircle.SetActive (false);
foreach (GameObject spawnedCircle2 in circles2)
spawnedCircle2.SetActive (false);
GameObject gridObj = GameObject.Find ("Grid");
gridObj.GetComponent<MeshRenderer> ().enabled = true;
Mesh mesh = gridObj.GetComponent<MeshFilter> ().mesh;
Vector3[] verts = mesh.vertices;
Color color = gridObj.GetComponent<MeshRenderer>().material.color;
color.r = s1.normalizedValue;
color.g = s2.normalizedValue;
color.b = s3.normalizedValue;
for (int i = 0; i < verts.Length; i++) {
verts[i].y = spectrum[i];
}
gridObj.GetComponent<MeshRenderer> ().material.color = color;
mesh.vertices = verts;
mesh.RecalculateNormals ();
mesh.RecalculateBounds ();
}
playbackTime.value = aud.time;
playbackTime.maxValue = aud.clip.length;
var playingAudioTime = TimeSpan.FromMinutes (aud.time);
TimeSpan playingAudioTime2 = new TimeSpan (playingAudioTime.Ticks - (playingAudioTime.Ticks % TimeSpan.TicksPerSecond));
var totalAudioTime = TimeSpan.FromMinutes (aud.clip.length);
TimeSpan totalAudioTime2 = new TimeSpan (totalAudioTime.Ticks - (totalAudioTime.Ticks % TimeSpan.TicksPerSecond));
playbackTimeText.text = "" + playingAudioTime2 + " / " + totalAudioTime2;
if (buttonAmt == 0) {
grid = false;
spec = true;
}
if (buttonAmt == 1) {
lightarray = false;
spec = false;
op = true;
}
if (buttonAmt == 2) {
op = false;
lightarray = true;
}
if (buttonAmt == 3) {
lightarray = false;
grid = true;
}
}
if (songList.Count <= 0) {
openButton.SetActive(false);
} else if (songList.Count > 0) {
openButton.SetActive (true);
}
}
public void ExtensionChange(){
string chosenFileExt = GameObject.Find("FileExt Dropdown").GetComponent<Dropdown>().captionText.text;
dir = Directory.GetFiles (currentDir, chosenFileExt, SearchOption.TopDirectoryOnly);
songList = new List<string>();
for (int i = 0; i < dir.Length; i++) {
songList.Add (dir[i].Substring(dir[i].LastIndexOf(@"\")+1));
}
GameObject.Find("Song Dropdown").GetComponent<Dropdown>().ClearOptions();
GameObject.Find("Song Dropdown").GetComponent<Dropdown>().AddOptions(songList);
GameObject.Find("Song Dropdown").GetComponent<Dropdown>().value = 0;
}
public void SongChange(){
captionText.text = captionText.text.Substring (captionText.text.LastIndexOf (@"\")+1);
}
public void DirChange(){
currentDir = GameObject.Find("Directory Dropdown").GetComponent<Dropdown>().captionText.text + @"\";
currentDirSub = currentDir.Substring(0,currentDir.Length-1);
currentDirParent = Directory.GetParent(currentDirSub).ToString();
string[] currentSubDirs = Directory.GetDirectories (currentDirSub, "*", SearchOption.TopDirectoryOnly);
List<string> dirList = new List<string> ();
dirList.Add (currentDirSub);
dirList.Add (currentDirParent);
for (int i = 0; i < currentSubDirs.Length; i++) {
dirList.Add (currentSubDirs[i]);
}
GameObject.Find ("Directory Dropdown").GetComponent<Dropdown> ().ClearOptions ();
GameObject.Find("Directory Dropdown").GetComponent<Dropdown>().AddOptions(dirList);
GameObject.Find("Directory Dropdown").GetComponent<Dropdown>().value = 0;
string chosenFileExt = GameObject.Find("FileExt Dropdown").GetComponent<Dropdown>().captionText.text;
dir = Directory.GetFiles (currentDir, chosenFileExt, SearchOption.TopDirectoryOnly);
songList.Clear ();
songList = new List<string>();
for (int i = 0; i < dir.Length; i++) {
songList.Add (dir[i].Substring(dir[i].LastIndexOf(@"\")+1));
}
GameObject.Find("Song Dropdown").GetComponent<Dropdown>().ClearOptions();
GameObject.Find("Song Dropdown").GetComponent<Dropdown>().AddOptions(songList);
GameObject.Find("Song Dropdown").GetComponent<Dropdown>().value = 0;
}
public void ButtonPress(){
buttonAmt++;
if (buttonAmt == 4) {
buttonAmt = 0;
}
}
public void PlayAndPause(){
if (aud.isPlaying) {
aud.Pause ();
GameObject.Find ("PlayPause").GetComponent<Button> ().image.sprite = play;
} else if (!aud.isPlaying) {
aud.Play ();
GameObject.Find ("PlayPause").GetComponent<Button> ().image.sprite = pause;
}
}
public void OpenFile(){
pausedTime = aud.time;
prevUrl = url;
url = currentDir + captionText.text;
StartCoroutine ("OpenMusic");
}
IEnumerator OpenMusic(){
WWW www = new WWW("file://" + url);
yield return www;
aud.clip = www.GetAudioClip(false, false);
if (pausedTime > 0.0f && url == prevUrl) {
aud.time = pausedTime;
}
else if (pausedTime <= 0.0f || url != prevUrl) {
aud.time = 0.0f;
}
aud.Play ();
GameObject.Find ("PlayPause").GetComponent<Button> ().image.sprite = pause;
StopCoroutine ("OpenMusic");
}
public void Dragging()
{
aud.volume = 0.5f;
aud.time = playbackTime.value;
}
public void StopDragging(){
aud.volume = 1.0f;
}
public void Quit(){
Application.Quit ();
}
}
|
d7ac4e1945887ce165b17f957927e50e2880686c
|
[
"C#"
] | 1
|
C#
|
michaelephilau/MusicAudioVisualizer
|
ca46b287b1d016d3f39aa4af05a2675d01f76bbe
|
354ae76394471826887714fa0b2612235309af7e
|
refs/heads/master
|
<file_sep>package com.yixuninfo.m2.base;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.yixuninfo.m2.ui.dialog.DialogFragmentHelper;
import com.yixuninfo.m2.utils.CommonUtils;
/**
* Created by tangxin on 2017/9/25.
*/
public abstract class BaseFragment extends Fragment {
private View mFragmentView;
private DialogFragmentHelper mDialogHelper;
public BaseFragment() {
}
@Override
public void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setRetainInstance(true);
mDialogHelper = new DialogFragmentHelper(getChildFragmentManager());
setHasOptionsMenu(true);
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
if (mFragmentView == null) {
mFragmentView = inflater.inflate(getLayoutId(), container, false);
initViews(mFragmentView, savedInstanceState);
}
return mFragmentView;
}
protected abstract void initViews(View mFragmentView, Bundle savedInstanceState);
public abstract int getLayoutId();
@Override
public void onDetach() {
super.onDetach();
}
@Override
public void onDestroy() {
super.onDestroy();
}
public void hideDialog() {
mDialogHelper.dismiss();
}
public void showDialog() {
mDialogHelper.showDialog();
}
public void showToast(String message) {
CommonUtils.Companion.showToast(message);
}
public void reload() {
}
}
<file_sep>package com.yixuninfo.m2.utils
import android.annotation.SuppressLint
import android.content.Context
import android.content.pm.PackageManager
import android.telephony.TelephonyManager
import android.text.TextUtils
import com.yixuninfo.m2.base.MMApplication
import com.yixuninfo.m2.ui.view.CustomToast
import com.yixuninfo.m2.utils.cache.ACache
import java.io.File
import java.text.SimpleDateFormat
import java.util.*
/**
* Created by tangxin on 2017/8/29.
*/
class CommonUtils {
companion object {
private lateinit var aCache: ACache
private var cacheTime = 3650
fun showToast(message: String) {
CustomToast.showToast(message)
}
fun isNotEmpty(str: String?): Boolean {
return !(null == str || "" == str || TextUtils.isEmpty(str) || "null" == str)
}
@SuppressLint("MissingPermission")
fun getPackageManager(number: Int): String? {
var versionCode = 0
var deviceId: String
val currentApiVersion: String = android.os.Build.VERSION.RELEASE
var installTime = "0"
val EquipmentNumber: String
var versionName = "0"
try {
val info = MMApplication.sContext.packageManager.getPackageInfo(MMApplication.sContext.packageName, 0)
// 当前应用的版本名称
versionName = info.versionName
// 当前版本的版本号
versionCode = info.versionCode
// 得到应用程序安装时间
val file = File(info.applicationInfo.sourceDir)
@SuppressLint("SimpleDateFormat")
val formatter = SimpleDateFormat("yyyyMMddHHmmss")
installTime = formatter.format(Date(file.lastModified()))
} catch (e: PackageManager.NameNotFoundException) {
e.printStackTrace()
}
val tm = MMApplication.sContext.getSystemService(Context.TELEPHONY_SERVICE) as TelephonyManager
deviceId = tm.deviceId
if (TextUtils.isEmpty(deviceId)) {
deviceId = Installation.UUID_id(MMApplication.sContext)
} else {
}
// Android系统版本号
EquipmentNumber = "$deviceId|1|$currentApiVersion|$installTime"
if (number == 1) {
return versionCode.toString()
} else if (number == 2) {
return EquipmentNumber
} else if (number == 3) {
return currentApiVersion
} else if (number == 4) {
return deviceId
} else if (number == 9) {
return versionName
} else if (number == 5) {
return "" + versionCode
}
return null
}
fun initCache(context: Context) {
aCache = ACache.get(context)
}
fun setJsonArray(JsonName: String, jo: String) {
try {
aCache.remove(JsonName)
} catch (e: NullPointerException) {
print("AcaChe NullPointerException ${e.message}")
}
print("存入数据:=> $jo")
aCache.put(JsonName, jo, cacheTime * ACache.TIME_DAY)
}
fun setData(name: String, PwdStr: String) {
setJsonArray(name, PwdStr)
}
fun getData(name: String): String {
return aCache.getAsString(name) ?: ""
}
}
}<file_sep>package com.yixuninfo.m2.ui.main
import com.yixuninfo.m2.base.BaseData
import com.yixuninfo.m2.base.BaseView
import com.yixuninfo.m2.data.bean.GetProjectBusinessListBean
import com.yixuninfo.m2.data.dp.ServerInfoDP
import com.yixuninfo.m2.data.provider.DialogProgressDataProvider
import com.yixuninfo.m2.utils.CommonUtils
import com.yixuninfo.m2.utils.cache.CacheKey
/**
* Created by tangxin on 2017/9/28.
*/
class MainFragmentViewModel(var mainFragmentView: MainFragmentView) {
private var serverInfoDP = ServerInfoDP()
fun getProjectBusinessList() {
val map = HashMap<String, Any>()
map.put("version", CommonUtils.getPackageManager(5).toString())
map.put("merchantId", CommonUtils.getData(CacheKey.merchantId))
map.put("token", CommonUtils.getData(CacheKey.loginToken))
map.put("userId", CommonUtils.getData(CacheKey.loginUserId))
map.put("terminalCode", CommonUtils.getPackageManager(2).toString())
map.put("projectCode", CommonUtils.getData(CacheKey.projectCode))
serverInfoDP.getProjectBusinessList(map, object : DialogProgressDataProvider<BaseView, BaseData<GetProjectBusinessListBean>>(mainFragmentView) {
override fun dataSuccess(result: BaseData<GetProjectBusinessListBean>) {
mainFragmentView.resetData(result.resData)
}
override fun dataEmpty(errorMessage: String?) {
super.dataEmpty(errorMessage)
mainFragmentView.showToast(errorMessage)
}
})
}
}<file_sep>package com.yixuninfo.m2.ui.main
import android.annotation.SuppressLint
import android.databinding.DataBindingUtil
import android.os.Bundle
import android.view.Gravity
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.BaseFragment
import com.yixuninfo.m2.data.bean.ListBean
import com.yixuninfo.m2.databinding.FragmentCustomerLayoutBinding
import com.yixuninfo.m2.ui.view.DrawableTextView
import com.yixuninfo.m2.ui.view.MenuLayout
import com.yixuninfo.m2.ui.view.menu.*
import com.yixuninfo.m2.utils.DimenUtils
class CustomerFragment : BaseFragment(), CustomerFragmentView {
private lateinit var mBinding: FragmentCustomerLayoutBinding
private var menuSortView: MenuView<ListBean>? = null
private var menuScreenView: MenuView<ListBean>? = null
override fun getLayoutId(): Int {
return R.layout.fragment_customer_layout
}
override fun onCreateView(inflater: LayoutInflater?, container: ViewGroup?, savedInstanceState: Bundle?): View? {
mBinding = DataBindingUtil.inflate<FragmentCustomerLayoutBinding>(inflater, layoutId, container, false)
initViews(mBinding.root, savedInstanceState)
return mBinding.root
}
override fun initViews(mFragmentView: View?, savedInstanceState: Bundle?) {
mBinding.model = CustomerFragmentViewModel()
mBinding.model.customerFragmentView = this
initMenu()
mBinding.model.domain()
}
private fun initMenu() {
mBinding.tbCustomerMenu.setDrawables(MenuLayout.MenuIconType.MENU_RIGHT_ICON).setTabIdArray(mBinding.model.ids).setImagesUp(mBinding.model.iconDown)
.setImagesDown(mBinding.model.iconUp).setTabName(mBinding.model.names.toTypedArray())
mBinding.tbCustomerMenu.setOnMenuClickListener { position, view ->
mBinding.tbCustomerMenu.updateMenu(view)
when (position) {
0 -> {
showSortList(view)
}
1 -> {
showScreenMenu(view)
}
}
}
}
private fun showSortList(view: View) {
if (null == menuSortView) {
menuSortView = MenuView(activity, view)
menuSortView?.setTopItemView(object : ItemView<ListBean> {
@SuppressLint("RtlHardcoded")
override fun BindViewHolder(holder: MenuAdapter<BaseMenuDataBean>.MenuViewHolder, item: ListBean, position: Int, itemResetView: ItemResetView) {
drawableItemView(menuSortView?.topItemViewId ?: MenuView.TOP_ITEM_ID, holder, item, position, itemResetView)
}
override fun itemTopClick(item: ListBean, position: Int) {
}
})
menuSortView?.initView(mBinding.model.listSort)
menuSortView?.menuStyles?.selectIndex = 0
}
menuSortView?.showView(menuSortView, menuScreenView, menuSortView)
}
private fun showScreenMenu(view: View) {
if (null == menuScreenView) {
menuScreenView = MenuView(activity, view)
val menuStyle = MenuStyles()
menuStyle.isOpenSecondMenu = true
menuScreenView?.menuStyles = menuStyle
menuScreenView?.setTopItemView(object : ItemView<ListBean> {
@SuppressLint("RtlHardcoded")
override fun BindViewHolder(holder: MenuAdapter<BaseMenuDataBean>.MenuViewHolder, item: ListBean, position: Int, itemResetView: ItemResetView) {
drawableItemView(menuScreenView?.topItemViewId ?: MenuView.TOP_ITEM_ID, holder, item, position, itemResetView)
}
override fun itemTopClick(item: ListBean, position: Int) {
print("A选中了$position")
menuScreenView?.notifySecond(mBinding.model.secondLists[position], position)
}
})
menuScreenView?.setSecondItemView(object : ItemView<ListBean> {
override fun BindViewHolder(holder: MenuAdapter<BaseMenuDataBean>.MenuViewHolder, item: ListBean, position: Int, itemResetView: ItemResetView) {
drawableItemView(menuScreenView?.topItemViewId ?: MenuView.TOP_ITEM_ID, holder, item, position, itemResetView)
}
override fun itemTopClick(item: ListBean?, position: Int) {
print("B选中了$position")
}
})
menuScreenView?.menuStyles?.selectIndex = 0
menuScreenView?.initView(mBinding.model.listScreen, null)
// menuScreenView?.initView(mBinding.model.listScreen, mBinding.model.secondLists)
}
menuScreenView?.showView(menuScreenView, menuScreenView, menuSortView)
}
fun drawableItemView(id: Int, holder: MenuAdapter<BaseMenuDataBean>.MenuViewHolder, item: ListBean, position: Int, itemResetView: ItemResetView) {
val itemView: View = holder.itemView
val textView: DrawableTextView = itemView.findViewById(id) as DrawableTextView
textView.gravity = Gravity.CENTER_VERTICAL
textView.setPadding(DimenUtils.dp2px(10), 0, 0, 0)
textView.text = item.name
itemResetView.reChange(position, textView)
}
}
<file_sep>package com.yixuninfo.m2.ui.main
import android.support.annotation.IdRes
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.BaseView
import com.yixuninfo.m2.data.bean.CustomerBean
import com.yixuninfo.m2.data.bean.ListBean
import com.yixuninfo.m2.data.dp.ServerInfoDP
import com.yixuninfo.m2.data.provider.DialogProgressDataProvider
import com.yixuninfo.m2.ui.view.menu.MenuSecondBean
import com.yixuninfo.m2.utils.CommonUtils
import com.yixuninfo.m2.utils.cache.CacheKey
import java.util.*
/**
* Created by tangxin on 2017/9/29.
*/
class CustomerFragmentViewModel {
@IdRes
private val sort = 503
@IdRes
private val list = 44
var serverInfoDP = ServerInfoDP()
//页码
var pageNumber = 1
// 页码条数
var pageSize = 10
var sortValue = ""
var screenType = ""
var screenValue = ""
lateinit var customerFragmentView: CustomerFragmentView
var ids: IntArray? = null
get() = intArrayOf(list, sort)
var mainData: CustomerBean = CustomerBean()
var names: MutableList<String> = mutableListOf("排序", "筛选")
var iconDown: IntArray = intArrayOf(R.mipmap.paixu_down, R.mipmap.shaixuan_down)
var iconUp: IntArray = intArrayOf(R.mipmap.paixu_up, R.mipmap.shaixuan_up)
var listSort = listOf<ListBean>(ListBean("最新创建", "1"), ListBean("最新更新", "2"),
ListBean("最新跟进", "3"), ListBean("按标星", "4"))
var listScreen = listOf<ListBean>(ListBean("客户状态", "1"), ListBean("意向等级", "2"),
ListBean("跟进时间", "3"), ListBean("首次接触方式", "4"), ListBean("归属人", "5"))
var secondLists = listOf<MenuSecondBean<ListBean>>(MenuSecondBean(listScreen[0].name, listScreen[0].id,
getCustomerStatusLists()), MenuSecondBean(listScreen[1].name, listScreen[1].id,
listOf<ListBean>(ListBean("全部", "0"), ListBean("A+", "1"),
ListBean("A", "2"), ListBean("B", "3"), ListBean("C", "4"),
ListBean("D", "5"))), MenuSecondBean(listScreen[2].name, listScreen[2].id,
listOf<ListBean>(ListBean("全部", "0"), ListBean("3天未跟进", "1"),
ListBean("一周未跟进", "2"), ListBean("两周未跟进", "3"),
ListBean("一月未跟进", "4"), ListBean("两月未跟进", "5"),
ListBean("两月未跟进2", "6"), ListBean("两月未跟进3", "7"),
ListBean("两月未跟进4", "8"))),
MenuSecondBean(listScreen[3].name, listScreen[3].id, getTypes()), MenuSecondBean(listScreen[4].name, listScreen[4].id, getConsultants()))
private fun getCustomerStatusLists(): List<ListBean> {
val lists = mutableListOf<ListBean>(ListBean("全部", "0"), ListBean("问询", "1"),
ListBean("来访", "2"), ListBean("认购", "4"), ListBean("签约", "5"),
ListBean("无意向", "6"))
// if (1 == CommonUtils.getData(CacheKey.isPayDeposit).toInt()) {
// lists.add(3, ListBean("认筹", "3"))
// }
return lists
}
fun domain() {
serverInfoDP.getCustomerFragmentData(getDictionaryListSubmit(), getConsultantListSubmit(), getCustomerSubmit(), object : DialogProgressDataProvider<BaseView, CustomerBean>(customerFragmentView) {
override fun dataSuccess(result: CustomerBean?) {
mainData = result ?: CustomerBean()
}
})
}
private fun getCustomerSubmit(): Map<String, Any> {
val map = HashMap<String, String>()
map.put("version", CommonUtils.getPackageManager(5).toString())
map.put("merchantId", CommonUtils.getData(CacheKey.merchantId))
map.put("token", CommonUtils.getData(CacheKey.loginToken))
map.put("userId", CommonUtils.getData(CacheKey.loginUserId))
map.put("terminalCode", CommonUtils.getPackageManager(2).toString())
map.put("projectCode", CommonUtils.getData(CacheKey.projectCode))
map.put("type", "")// 此处不传
map.put("sort", sortValue)
if (CommonUtils.isNotEmpty(screenType)) {
map.put("screenType", screenType)
} else {
map.put("screenType", "")
}
if (CommonUtils.isNotEmpty(screenValue)) {
map.put("screenValue", screenValue)
} else {
map.put("screenValue", "")
}
map.put("pageNumber", "$pageNumber")
map.put("pageSize", "$pageSize")
return map
}
private fun getConsultantListSubmit(): Map<String, Any> {
val map = HashMap<String, String>()
map.put("resourcesCode", "menu_0006_0001")
map.put("searchType", "0")
map.put("version", CommonUtils.getPackageManager(5).toString())
map.put("merchantId", CommonUtils.getData(CacheKey.merchantId))
map.put("token", CommonUtils.getData(CacheKey.loginToken))
map.put("userId", CommonUtils.getData(CacheKey.loginUserId))
map.put("terminalCode", CommonUtils.getPackageManager(2).toString())
map.put("projectCode", CommonUtils.getData(CacheKey.projectCode))
map.put("projectId", CommonUtils.getData(CacheKey.projectId))
return map
}
private fun getDictionaryListSubmit(): Map<String, Any> {
val map = HashMap<String, String>()
map.put("version", CommonUtils.getPackageManager(5).toString())
map.put("merchantId", CommonUtils.getData(CacheKey.merchantId))
map.put("projectId", CommonUtils.getData(CacheKey.projectId))
map.put("token", CommonUtils.getData(CacheKey.loginToken))
map.put("userId", CommonUtils.getData(CacheKey.loginUserId))
map.put("terminalCode", CommonUtils.getPackageManager(2).toString())
map.put("busNumType", "contactWay")
map.put("busNum", "01")
return map
}
private fun getTypes(): List<ListBean> {
val lists = mutableListOf<ListBean>()
mainData.getDictionaryListBean?.rqBusNumList?.forEach {
lists.add(ListBean(it.dictionaryName, it.dictionaryId))
}
return lists
}
private fun getConsultants(): List<ListBean> {
val lists = mutableListOf<ListBean>()
mainData.getConsultantListBean?.consultantList?.forEach {
lists.add(ListBean(it.consultantUserName, it.consultantUserId))
}
return lists
}
}<file_sep>package com.yixuninfo.m2.ui.view;
/**
* Created by tangxin on 2017/9/27.
*/
public interface Constant {
int RELATION_MENU_FIRST = 1;
int RELATION_MENU_SECOND_ART = 2;
}<file_sep>package com.yixuninfo.m2.data.bean
import java.util.*
/**
*
* **Title:** 登录
*
* **Description:**
* @author tangxin
* *
* @date 2016年12月9日 上午10:35:33
*/
class LoginBean {
var result: String = ""// 返回码
var token: String = ""// 会话token
var userId: String = ""// 会员编号
var merchantId: String = ""// 商家Id
var userName: String = ""// 会员名称
var errorCode: String = ""// 错误代码
var errorMsg: String = ""// 错误信息
var roleName: String = ""// 权限(置业顾问,案场经理)
var roleType: String = ""// 角色类型 String(1) 角色类型:1、经纪人;2、案场经理 N
var isHomePage: String = ""// 菜单权限:首页 String(1) 首页:0 无 1 有
var isCustomer: String = ""// 菜单权限:客户 String(1) 客户:0 无 1 有
var isHouses: String = ""// 菜单权限:房源 String(1) 房源:0 无 1 有
var isAnalysis: String = ""// 菜单权限:分析 String(1) 分析:0 无 1 有
var isMine: String = "" // 菜单权限:我的 String(1) 我的:0 无 1 有
var isAddCustomer: String = ""// 客户菜单权限下按钮权限:客户新增 String(1) 客户新增:0 无 1 有
var isDistributionCustomer: String = "" // 客户菜单权限下按钮权限:客户分配 String(1) 客户分配:0
// 无 1 有
var isGradesMine: String = ""// 我的菜单权限下按钮权限:我的成绩 String(1) 我的成绩:0 无 1 有
var isHeroListMine: String = ""// 我的菜单权限下按钮权限:个人英雄榜 String(1) 个人英雄榜:0 无 1 有
var isTeamRankingMine: String = ""// 我的菜单权限下按钮权限:团队排名 String(1) 团队排名:0 无 1 有
var isPhoneInformation: String = ""// 是否已开通电话平台 String(1)
// 是否已开通电话平台:0、未开通;2、已开通
var isPaydeposit: String = ""// 是否已开通认筹功能:0、未开通;1、已开通
lateinit var loginHandImgList: ArrayList<LoginHandImgList>
class LoginHandImgList {
var handLogoImg: String = ""// 首页图片URL String(255) 首页图片URL N
}
}
<file_sep>package com.yixuninfo.m2.utils.city
import java.util.*
class CityProvinceCityDistrict {
var provinceCityDistrict: ArrayList<ProvinceCityDistrict>? = null
var result: String = ""
class ProvinceCityDistrict {
var province: String = ""
var provinceId: String = ""
var cities: ArrayList<Cities>? = null
class Cities {
var city: String = ""
var cityId: String = ""
var districts: ArrayList<Districts>? = null
}
class Districts {
var district: String = ""
var districtId: String = ""
}
}
}
<file_sep>package com.yixuninfo.m2.ui.main
import com.yixuninfo.m2.base.BaseView
import com.yixuninfo.m2.data.bean.GetProjectBusinessListBean
/**
* Created by tangxin on 2017/9/28.
*/
interface MainFragmentView : BaseView {
fun resetData(getProjectBusinessListBean: GetProjectBusinessListBean)
}<file_sep>package com.yixuninfo.m2.data.bean
/**
* Created by tangxin on 2017/9/27.
*/
class MainBean {
var getProjectList: GetProjectListBean? = null
var getVersion: GetVersionBean? = null
var getProjectBusinessList: GetProjectBusinessListBean? = null
}<file_sep>package com.yixuninfo.m2.base;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.StaggeredGridLayoutManager;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ProgressBar;
import android.widget.TextView;
import com.yixuninfo.m2.R;
import java.util.List;
public abstract class BaseRecyclerViewAdapter<T> extends RecyclerView.Adapter<RecyclerView.ViewHolder> {
public static final int TYPE_ITEM = 0;
public static final int TYPE_FOOTER = 1;
protected boolean mIsShowFooter;
protected List<T> mList;
public int loadStatus = 0;
public static final int LOADING_TYPE_ERROR = -1;//加载出错;
public static final int LOADING_TYPE_NO_MORE = 0;//加载出错;
public static final int LOADING_TYPE_LOADING = 1;//加载中;
public BaseRecyclerViewAdapter(List<T> list) {
mList = list;
}
@Override
public RecyclerView.ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
return null;
}
@Override
public void onBindViewHolder(RecyclerView.ViewHolder holder, int position) {
ViewGroup.LayoutParams layoutParams = holder.itemView.getLayoutParams();
if (holder instanceof BaseRecyclerViewAdapter.FooterViewHolder) {
FooterViewHolder footHolder = (FooterViewHolder) holder;
footHolder.changeStatus(loadStatus);
if (layoutParams != null) {//可以改成且判断
if (layoutParams instanceof StaggeredGridLayoutManager.LayoutParams) {
StaggeredGridLayoutManager.LayoutParams params = (StaggeredGridLayoutManager.LayoutParams) holder.itemView
.getLayoutParams();
params.setFullSpan(true);
}
}
} else {
onBindItemViewHolder(holder, position);
}
}
public abstract void onBindItemViewHolder(RecyclerView.ViewHolder holder, int position);
protected View getView(ViewGroup parent, int layoutId) {
return LayoutInflater.from(parent.getContext()).inflate(layoutId, parent, false);
}
@Override
public int getItemCount() {
if (mList == null) {
return 0;
}
int itemSize = mList.size();
if (mIsShowFooter) {
itemSize += 1;
}
return itemSize;
}
protected boolean isFooterPosition(int position) {
return (getItemCount() - 1) == position;
}
public void add(int position, T item) {
mList.add(position, item);
notifyItemInserted(position);
}
public void addMore(List<T> data) {
int startPosition = mList.size();
mList.addAll(data);
notifyItemRangeInserted(startPosition, mList.size());
}
public void delete(int position) {
mList.remove(position);
notifyDataSetChanged();
}
public void resetAll(List<T> data) {
mList.clear();
mIsShowFooter = false;
if (data != null) {
mList.addAll(data);
}
notifyDataSetChanged();
}
@Override
public int getItemViewType(int position) {
if (mList != null) {
if (position == mList.size()) {
return TYPE_FOOTER;
} else {
return TYPE_ITEM;
}
} else {
return TYPE_ITEM;
}
}
public List<T> getList() {
return mList;
}
public void setList(List<T> items) {
mList = items;
}
public void showFooter(int loadingTypeNoMore) {
loadStatus = loadingTypeNoMore;
mIsShowFooter = true;
notifyItemInserted(getItemCount());
}
public void changeFooterStatus(int loadingTypeNoMore) {
loadStatus = loadingTypeNoMore;
notifyDataSetChanged();
}
public void hideFooter() {
mIsShowFooter = false;
notifyItemRemoved(getItemCount());
}
public class FooterViewHolder extends RecyclerView.ViewHolder {
ProgressBar progressView;
TextView tvContent;
public FooterViewHolder(View view) {
super(view);
progressView = (ProgressBar) view.findViewById(R.id.progress_view);
tvContent = (TextView) view.findViewById(R.id.tv_content);
}
public void changeStatus(int loadStatus) {
switch (loadStatus) {
case LOADING_TYPE_NO_MORE:
tvContent.setText("没有更多数据...");
progressView.setVisibility(View.GONE);
break;
case LOADING_TYPE_LOADING:
tvContent.setText("正在加载...");
progressView.setVisibility(View.VISIBLE);
break;
case LOADING_TYPE_ERROR:
tvContent.setText("加载出错,请检查网络状况");
progressView.setVisibility(View.GONE);
break;
}
}
}
}<file_sep>package com.yixuninfo.m2.ui.main
import android.support.annotation.IdRes
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.BaseView
import com.yixuninfo.m2.data.bean.GetProjectBusinessListBean
import com.yixuninfo.m2.data.bean.GetProjectListBean
import com.yixuninfo.m2.data.bean.MainBean
import com.yixuninfo.m2.data.dp.ServerInfoDP
import com.yixuninfo.m2.data.provider.DialogProgressDataProvider
import com.yixuninfo.m2.ui.view.MenuPopu.BaseMenuData
import com.yixuninfo.m2.utils.CommonUtils
import com.yixuninfo.m2.utils.cache.CacheKey
/**
* Created by tangxin on 2017/9/26.
*/
class HomePageViewModel {
companion object {
@IdRes
val TAB_ID_MAIN = 281
@IdRes
val TAB_ID_CUSTOMER = 29
@IdRes
val TAB_ID_HOUSES = 526
@IdRes
val TAB_ID_ANALYSIS = 779
@IdRes
val TAB_ID_MINE = 909
}
private var serverInfoDP = ServerInfoDP()
lateinit var homePageView: HomePageView
var projectList: List<GetProjectListBean.ProjectList> = ArrayList()
val ids: IntArray
get() = getIds(TAB_ID_MAIN, TAB_ID_CUSTOMER, TAB_ID_HOUSES, TAB_ID_ANALYSIS, TAB_ID_MINE)
var images: MutableList<Int> = mutableListOf()
var imagesUp: MutableList<Int> = mutableListOf()
var tabNames: MutableList<String> = mutableListOf()
private fun getIds(vararg id: Int): IntArray {
return id
}
fun initMenuData(names: Array<String>) {
if (isHaven(CacheKey.isHomePage)) {
images.add(R.mipmap.ic_main)
imagesUp.add(R.mipmap.ic_main_up)
tabNames.add(names[0])
}
if (isHaven(CacheKey.isCustomer)) {
images.add(R.mipmap.ic_customer)
imagesUp.add(R.mipmap.ic_customer_up)
tabNames.add(names[1])
}
if (isHaven(CacheKey.isHouses)) {
images.add(R.mipmap.ic_houses)
imagesUp.add(R.mipmap.ic_house_up)
tabNames.add(names[2])
}
if (isHaven(CacheKey.isAnalysis)) {
images.add(R.mipmap.ic_analysis)
imagesUp.add(R.mipmap.ic_analysis_up)
tabNames.add(names[3])
}
if (isHaven(CacheKey.isMine)) {
images.add(R.mipmap.ic_mine)
imagesUp.add(R.mipmap.ic_mine_up)
tabNames.add(names[4])
}
}
private fun isHaven(key: String): Boolean {
return when (CommonUtils.getData(key)) {
"0" -> false
"1" -> true
else -> false
}
}
fun doMain() {
val map = HashMap<String, Any>()
map.put("version", CommonUtils.getPackageManager(5).toString())
map.put("merchantId", CommonUtils.getData(CacheKey.merchantId))
map.put("token", CommonUtils.getData(CacheKey.loginToken))
map.put("userId", CommonUtils.getData(CacheKey.loginUserId))
map.put("terminalCode", CommonUtils.getPackageManager(2).toString())
val projectSubmitMap = map
projectSubmitMap.put("projectCode", CommonUtils.getData(CacheKey.projectCode))
val versionSubmitMap = map
versionSubmitMap.put("versionType", "1")
serverInfoDP.getMainData(projectSubmitMap, versionSubmitMap, object : DialogProgressDataProvider<BaseView, MainBean>(homePageView) {
override fun dataSuccess(result: MainBean) {
if ("1" == result.getProjectList?.result) {
projectList = result.getProjectList?.projectList ?: ArrayList()
if (projectList.isNotEmpty()) {
homePageView.initTitle(projectList[0].projectName)
CommonUtils.setData(CacheKey.projectCode, projectList[0].projectCode)
CommonUtils.setData(CacheKey.projectId, projectList[0].projectId)
}
} else {
homePageView.showToast("项目列表数据获取失败")
}
if ("1" == result.getVersion?.result) {
when (result.getVersion?.versionStatus) {
"1" -> {//可升级
}
"2" -> {//强制升级
}
}
} else {
homePageView.showToast("版本信息数据获取失败")
}
if ("1" == result.getProjectBusinessList?.result) {
homePageView.initMainFragment(result.getProjectBusinessList ?: GetProjectBusinessListBean())
} else {
homePageView.showToast("业务参数列表获取失败")
}
}
})
}
fun selectProjectItem(itemData: BaseMenuData) {
CommonUtils.setData(CacheKey.projectName, itemData.menuName)
CommonUtils.setData(CacheKey.projectId, itemData.menuId)
CommonUtils.setData(CacheKey.projectCode, itemData.menuCode)
homePageView.initTitle(itemData.menuName)
homePageView.reload()
}
}<file_sep>package com.qiaofang.assistant.view
import android.content.Context
import android.os.Handler
import android.support.v7.widget.AppCompatTextView
import android.view.Gravity
import android.view.LayoutInflater
import android.view.View
import android.widget.Toast
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.MMApplication
/**
* Created by tangxin on 2017/6/9.
*/
class CenterToast {
var mCenterToast: Toast? = null
var mView: View? = null
var tvMessage: AppCompatTextView? = null
private var mHandler = Handler()
private var toastRunnable: Runnable = Runnable {
mCenterToast?.cancel()
}
fun showCenterToast(message: String) {
showCenterToast(MMApplication.sContext, message, 2000)
}
private fun initView(mContext: Context) {
mView = LayoutInflater.from(mContext).inflate(R.layout.toast_layout, null)
tvMessage = mView?.findViewById(R.id.tv_toast_message) as AppCompatTextView
}
private fun showCenterToast(mContext: Context, message: String, duration: Int) {
if (mView == null) {
initView(mContext)
}
tvMessage?.text = message
mHandler.removeCallbacks(toastRunnable)
if (mCenterToast == null) {
mCenterToast = Toast(mContext)
mCenterToast?.view = mView
mCenterToast?.setGravity(Gravity.CENTER, 0, 0)
mCenterToast?.duration = duration
} else {
mCenterToast?.view = mView
}
mHandler.postDelayed(toastRunnable, duration.toLong())
mCenterToast?.show()
}
}<file_sep>package com.yixuninfo.m2.ui.main.adapter;
import android.databinding.DataBindingUtil;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.yixuninfo.m2.R;
import com.yixuninfo.m2.base.BaseRecyclerViewAdapter;
import com.yixuninfo.m2.data.bean.GetProjectBusinessListBean;
import com.yixuninfo.m2.databinding.ItemBusinessLayoutBinding;
import com.yixuninfo.m2.ui.view.recyclerview.ItemClickListener;
import com.yixuninfo.m2.utils.log.LogUtils;
import java.util.List;
/**
* Created by tangxin on 2017/9/28.
*/
public class BusinessAdapter extends BaseRecyclerViewAdapter<GetProjectBusinessListBean.HomePageList> {
private ItemBusinessLayoutBinding mBinding;
private ItemClickListener<GetProjectBusinessListBean.HomePageList> itemClickListener;
public void setItemClickListener(ItemClickListener<GetProjectBusinessListBean.HomePageList> itemClickListener) {
this.itemClickListener = itemClickListener;
}
public BusinessAdapter(List<GetProjectBusinessListBean.HomePageList> list) {
super(list);
}
@Override
public RecyclerView.ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
if (viewType == TYPE_FOOTER) {
return new FooterViewHolder(getView(parent, R.layout.item_load_more_footer));
} else {
mBinding = DataBindingUtil.inflate(LayoutInflater.from(parent.getContext()), R.layout.item_business_layout, parent, false);
return new BusinessAdapter.ViewHolder(mBinding);
}
}
@Override
public void onBindItemViewHolder(RecyclerView.ViewHolder holder, final int position) {
ViewHolder itemViewHolder = (ViewHolder) holder;
final GetProjectBusinessListBean.HomePageList homePageList = getList().get(position);
itemViewHolder.mBinding.setData(homePageList);
itemViewHolder.mBinding.getRoot().setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (itemClickListener != null) {
itemClickListener.clickItem(position, homePageList);
} else {
LogUtils.d("Business", "Business item onclick is null");
}
}
});
}
public class ViewHolder extends RecyclerView.ViewHolder {
ItemBusinessLayoutBinding mBinding;
ViewHolder(ItemBusinessLayoutBinding mBinding) {
super(mBinding.getRoot());
this.mBinding = mBinding;
}
}
}
<file_sep>package com.yixuninfo.m2.utils.permission
import android.app.Activity
import com.tbruyelle.rxpermissions.RxPermissions
/**
* Created by tangxin on 2017/8/30.
*/
class PermissionUtils(val a: Activity) {
/**建议权限做单个校验,除初始化,后面使用到哪些就针对哪些权限做申请(需要多个权限同时校验,requestEach传入数组 类型 :String...)*/
fun checkPermission(permissionNames: String, permissionResult: PermissionResult) {
val rxPermissions = RxPermissions(a)
rxPermissions.requestEach(permissionNames).subscribe {
if (it.granted) {//通过
permissionResult.success(it.name)
} else if (it.shouldShowRequestPermissionRationale) {//拒绝(未勾选不再提醒)
permissionResult.refuse(it.name)
} else {//拒绝(已勾选不再提醒)
permissionResult.refuseNoReminders(it.name)
}
}
}
}<file_sep>package com.yixuninfo.m2.ui.login
import com.yixuninfo.m2.base.BaseData
import com.yixuninfo.m2.base.BaseView
import com.yixuninfo.m2.data.bean.LoginBean
import com.yixuninfo.m2.data.dp.ServerInfoDP
import com.yixuninfo.m2.data.http.ErrorInfo
import com.yixuninfo.m2.data.provider.DialogProgressDataProvider
import com.yixuninfo.m2.databinding.ActivityLoginBinding
import com.yixuninfo.m2.utils.CommonUtils
import com.yixuninfo.m2.utils.cache.CacheKey
import java.util.*
/**
* Created by tangxin on 2017/8/30.
*/
class LoginViewModel(var activityLoginBinding: ActivityLoginBinding, var loginView: LoginView) {
fun submit() {
if ("" == activityLoginBinding.data.loginName.get()) {
CommonUtils.showToast("用户名不能为空")
} else {
if ("" == activityLoginBinding.data.loginPwd.get()) {
CommonUtils.showToast("密码不能为空")
} else if (activityLoginBinding.data.loginPwd.get().length < 6) {
CommonUtils.showToast("密码至少6位")
} else if (activityLoginBinding.data.loginPwd.get().length > 20) {
CommonUtils.showToast("密码最多20位")
} else {
// 本地校验完成,可以实行登陆操作
val map = HashMap<String, Any>()
map.put("version", CommonUtils.getPackageManager(5).toString())
map.put("loginName", activityLoginBinding.data.loginName.get().trim({ it <= ' ' }))
map.put("loginPwd", activityLoginBinding.data.loginPwd.get())
map.put("terminalCode", CommonUtils.getPackageManager(2).toString())
val serverInfoDataProvider = ServerInfoDP()
serverInfoDataProvider.getLogin(map, object : DialogProgressDataProvider<BaseView, BaseData<LoginBean>>(loginView) {
override fun dataSuccess(result: BaseData<LoginBean>?) {
if ("1" == result?.resData?.result) {
CommonUtils.setData(CacheKey.isLogin, "1")
CommonUtils.setData(CacheKey.loginToken, result.resData.token)
CommonUtils.setData(CacheKey.loginUserId, result.resData.userId)
CommonUtils.setData(CacheKey.loginUserName, result.resData.userName)
CommonUtils.setData(CacheKey.merchantId, result.resData.merchantId)
CommonUtils.setData(CacheKey.isPhoneInformation, result.resData.isPhoneInformation)
CommonUtils.setData(CacheKey.isPayDeposit, result.resData.isPaydeposit)
CommonUtils.setData(CacheKey.isHomePage, result.resData.isHomePage)
CommonUtils.setData(CacheKey.isCustomer, result.resData.isCustomer)
CommonUtils.setData(CacheKey.isHouses, result.resData.isHouses)
CommonUtils.setData(CacheKey.isAnalysis, result.resData.isAnalysis)
CommonUtils.setData(CacheKey.isMine, result.resData.isMine)
CommonUtils.setData(CacheKey.isAddCustomer, result.resData.isAddCustomer)
CommonUtils.setData(CacheKey.isDistributionCustomer, result.resData.isDistributionCustomer)
CommonUtils.setData(CacheKey.isGradesMine, result.resData.isGradesMine)
CommonUtils.setData(CacheKey.isHeroListMine, result.resData.isHeroListMine)
CommonUtils.setData(CacheKey.isTeamRankingMine, result.resData.isTeamRankingMine)
CommonUtils.setData(CacheKey.projectLoginCode, activityLoginBinding.data.loginName.get())
CommonUtils.setData(CacheKey.tips, "")
try {
CommonUtils.setData(CacheKey.logoImg, result.resData.loginHandImgList[0].handLogoImg)
} catch (e: Exception) {
print(e)
}
loginView.nextStart()
} else {
loginView.showToast(result?.resData?.errorMsg ?: "接口错误,请联系开发")
}
}
override fun dataError(errorInfo: ErrorInfo?) {
loginView.showToast(errorInfo?.message ?: "接口错误,请联系开发")
}
});
}
}
}
}<file_sep>package com.yixuninfo.m2.test
import android.annotation.SuppressLint
import android.os.Bundle
import android.view.Gravity
import android.view.View
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.BaseActivity
import com.yixuninfo.m2.data.bean.ListBean
import com.yixuninfo.m2.ui.main.CustomerFragmentViewModel
import com.yixuninfo.m2.ui.view.DrawableTextView
import com.yixuninfo.m2.ui.view.menu.*
import com.yixuninfo.m2.utils.DimenUtils
class TestMainActivity : BaseActivity() {
private var menuScreenView: MenuView<ListBean>? = null
private var model = CustomerFragmentViewModel()
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_test_main)
initToolbar(R.id.top_toolbar)
findViewById(R.id.menu).setOnClickListener { v ->
showScreenMenu(v)
}
}
private fun showScreenMenu(view: View) {
if (null == menuScreenView) {
menuScreenView = MenuView(this, view)
val menuStyle = MenuStyles()
menuStyle.isOpenSecondMenu = true
menuScreenView?.menuStyles = menuStyle
menuScreenView?.setTopItemView(object : ItemView<ListBean> {
@SuppressLint("RtlHardcoded")
override fun BindViewHolder(holder: MenuAdapter<BaseMenuDataBean>.MenuViewHolder, item: ListBean, position: Int, itemResetView: ItemResetView) {
drawableItemView(menuScreenView?.topItemViewId ?: MenuView.TOP_ITEM_ID, holder, item, position, itemResetView)
}
override fun itemTopClick(item: ListBean, position: Int) {
print("A选中了$position")
menuScreenView?.notifySecond(model.secondLists[position], position)
}
})
menuScreenView?.setSecondItemView(object : ItemView<ListBean> {
override fun BindViewHolder(holder: MenuAdapter<BaseMenuDataBean>.MenuViewHolder, item: ListBean, position: Int, itemResetView: ItemResetView) {
drawableItemView(menuScreenView?.topItemViewId ?: MenuView.TOP_ITEM_ID, holder, item, position, itemResetView)
}
override fun itemTopClick(item: ListBean?, position: Int) {
print("B选中了$position")
}
})
menuScreenView?.menuStyles?.selectIndex = 0
menuScreenView?.initView(model.listScreen, null)
}
menuScreenView?.showView(menuScreenView, menuScreenView)
}
fun drawableItemView(id: Int, holder: MenuAdapter<BaseMenuDataBean>.MenuViewHolder, item: ListBean, position: Int, itemResetView: ItemResetView) {
val itemView: View = holder.itemView
val textView: DrawableTextView = itemView.findViewById(id) as DrawableTextView
textView.gravity = Gravity.CENTER_VERTICAL
textView.setPadding(DimenUtils.dp2px(10), 0, 0, 0)
textView.text = item.name
itemResetView.reChange(position, textView)
}
}
<file_sep>package com.yixuninfo.m2.ui.view.menu;
import android.annotation.SuppressLint;
import android.graphics.Color;
import android.support.v7.widget.RecyclerView;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.yixuninfo.m2.ui.view.DrawableTextView;
import java.util.List;
/**
* Created by tangxin on 2017/9/30.
*/
public class MenuAdapter<T extends BaseMenuDataBean> extends RecyclerView.Adapter<RecyclerView.ViewHolder> {
private List<T> mData;
private int topView;
private ItemView itemView;
private MenuStyles menuStyles;
private boolean isSecond = false;
MenuAdapter(List<T> mData, int topView, ItemView itemView, MenuStyles menuStyles) {
this.mData = mData;
this.topView = topView;
this.itemView = itemView;
this.menuStyles = menuStyles;
isSecond = false;
}
MenuAdapter(List<T> mData, int topView, ItemView itemView, MenuStyles menuStyles, boolean isSecond) {
this.mData = mData;
this.topView = topView;
this.itemView = itemView;
this.menuStyles = menuStyles;
this.isSecond = isSecond;
}
public void notify(List<T> mData, boolean isSecond, int secondIndex) {
this.mData = mData;
this.isSecond = isSecond;
this.menuStyles.setSelectSecondIndex(secondIndex);
notifyDataSetChanged();
}
@Override
public MenuViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View view = LayoutInflater.from(parent.getContext()).inflate(topView, parent, false);
return new MenuViewHolder(view);
}
@Override
public void onBindViewHolder(final RecyclerView.ViewHolder holder, @SuppressLint("RecyclerView") final int position) {
if (null != itemView) {
itemView.BindViewHolder((MenuViewHolder) holder, mData.get(position), position, new ItemResetView() {
@Override
public void reChange(int position, DrawableTextView view) {
if (isSecond) {
reChangeSecondView(view, menuStyles.getSelectSecondIndex() == position);
} else {
reChangeView(view, menuStyles.getSelectIndex() == position);
}
}
});
} else {
Log.d("onClick", "onClick: item View is null");
}
holder.itemView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (null != itemView) {
itemView.itemTopClick(mData.get(position), position);
if (isSecond) {
menuStyles.setSelectSecondIndex(position);
} else {
menuStyles.setSelectIndex(position);
}
notifyDataSetChanged();
} else {
Log.d("onClick", "onClick: item View is null");
}
}
});
if (isSecond) {
holder.itemView.setBackgroundColor(Color.parseColor(position == menuStyles.getSelectSecondIndex() ? menuStyles.getSecondSelectMenuBg() : menuStyles.getSecondMenuBg()));
} else {
holder.itemView.setBackgroundColor(Color.parseColor(position == menuStyles.getSelectIndex() ? menuStyles.getTopItemBg() : menuStyles.getTopMenuBg()));
}
}
private void reChangeView(DrawableTextView view, boolean isSelect) {
view.setTextColor(Color.parseColor(isSelect ? menuStyles.getTopSelectTextColor() : menuStyles.getTopDefTextColor()));
}
private void reChangeSecondView(DrawableTextView view, boolean isSelect) {
view.setTextColor(Color.parseColor(isSelect ? menuStyles.getSecondSelectTextColor() : menuStyles.getSecondDefTextColor()));
}
@Override
public int getItemCount() {
return mData.size();
}
public class MenuViewHolder extends RecyclerView.ViewHolder {
MenuViewHolder(View itemView) {
super(itemView);
}
}
}
<file_sep>package com.yixuninfo.m2.utils
import android.content.Context
import com.yixuninfo.m2.R
/**
* Created by tangxin on 2017/9/28.
*/
class BackUtils {
companion object {
private var sExitTime: Long = 0
fun doubleClickQuitApp(context: Context): Boolean {
return if (System.currentTimeMillis() - sExitTime > 2000) {
CommonUtils.showToast(context.getString(R.string.back_app_tips))
sExitTime = System.currentTimeMillis()
false
} else {
true
}
}
}
}<file_sep>package com.yixuninfo.m2.utils;
import android.Manifest;
import android.annotation.SuppressLint;
import android.content.Context;
import android.content.pm.PackageInfo;
import android.content.pm.PackageManager;
import android.support.v4.app.ActivityCompat;
import android.telephony.TelephonyManager;
import android.text.TextUtils;
import java.io.File;
import java.text.SimpleDateFormat;
import java.util.Date;
/**
* Created by tangxin on 2017/8/30.
*/
public class Version {
private PackageInfo getPackageInfo(Context context) {
PackageInfo packageInfo = new PackageInfo();
try {
packageInfo = context.getPackageManager().getPackageInfo(context.getPackageName(), 0);
} catch (PackageManager.NameNotFoundException e) {
e.printStackTrace();
}
return packageInfo;
}
@SuppressLint("HardwareIds")
private String getDeviceId(Context context) {
String deviceId = "";
TelephonyManager tm = (TelephonyManager) context.getSystemService(Context.TELEPHONY_SERVICE);
if (null != tm) {
if (ActivityCompat.checkSelfPermission(context, Manifest.permission.READ_PHONE_STATE) != PackageManager.PERMISSION_GRANTED) {
deviceId = tm.getDeviceId();
}
}
if (TextUtils.isEmpty(deviceId)) {
deviceId = Installation.UUID_id(context);
}
return deviceId;
}
private String getInstallTime(Context context) {
File file = new File(getPackageInfo(context).applicationInfo.sourceDir);
SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss");
return formatter.format(new Date(file.lastModified()));
}
private String getAndroidVersionCode() {
return android.os.Build.VERSION.RELEASE;
}
private String getTerminalCode(Context context) {
return String.format("%s|1|%s|%s", getDeviceId(context), getAndroidVersionCode(), getInstallTime(context));
}
}
<file_sep>package com.yixuninfo.m2.data.bean
import com.yixuninfo.m2.ui.view.MenuPopu.BaseMenuData
/**
* **Title:** 首页项目列表接口
*/
class GetProjectListBean {
var result: String = ""// 返回码
var errorCode: String = ""// 错误代码
var errorMsg: String = ""// 错误信息
var projectList: List<ProjectList> = ArrayList()
class ProjectList : BaseMenuData() {
var projectCode: String = ""// 项目code
var projectName: String = ""// 项目名称
var projectId: String = ""// 项目ID
fun setData() {
menuName = projectName
menuCode = projectCode
menuId = projectId
}
}
}
<file_sep>package com.yixuninfo.m2.ui.login
import com.yixuninfo.m2.base.BaseView
/**
* Created by tangxin on 2017/9/19.
*/
interface LoginView : BaseView {
fun nextStart()
}<file_sep>package com.yixuninfo.m2.ui.view.menu;
import com.yixuninfo.m2.ui.view.DrawableTextView;
/**
* Created by tangxin on 2017/10/10.
*/
public interface ItemResetView {
void reChange(int position, DrawableTextView view);
}
<file_sep>package com.yixuninfo.m2.data.bean
/**
* **Title:** 查询新版本接口
*/
class GetVersionBean {
var result: String = ""// 返回码
var errorCode: String = ""// 错误代码
var errorMsg: String = ""// 错误信息
var versionId: String = ""// 版本ID
var versionNum: String = ""// 新版本号
var versionName: String = ""// 新版本名
var versionType: String = ""// 版本类型
var versionDes: String = ""// 版本说明
var versionStatus: String = ""// 版本状态
var versionUrlAddress: String = ""// 版本URL地址
}
<file_sep>package com.yixuninfo.m2.data.http;
import com.yixuninfo.m2.data.provider.DataProvider;
import java.io.IOException;
import okhttp3.ResponseBody;
import rx.Observer;
public class ScalarsSubscriber implements Observer<ResponseBody> {
protected DataProvider<String> mDataProvider;
public ScalarsSubscriber(DataProvider<String> dataProvider) {
mDataProvider = dataProvider;
}
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
RetrofitFactory.processError(e, mDataProvider);
}
@Override
public void onNext(ResponseBody t) {
if (t != null) {
try {
mDataProvider.dataSuccess(t.string());
} catch (IOException e) {
e.printStackTrace();
onError(e);
}
} else {
onError(null);
}
}
}
<file_sep>package com.yixuninfo.m2.data.bean;
/**
* Created by tangxin on 2017/10/11.
*/
public class StatusBean {
private String dictionaryId;// 对应字典ID
private String dictionaryName;// 对应字典名称
public String getDictionaryId() {
return dictionaryId;
}
public void setDictionaryId(String dictionaryId) {
this.dictionaryId = dictionaryId;
}
public String getDictionaryName() {
return dictionaryName;
}
public void setDictionaryName(String dictionaryName) {
this.dictionaryName = dictionaryName;
}
}
<file_sep>package com.yixuninfo.m2.ui.login;
import android.content.Intent
import android.databinding.DataBindingUtil
import android.os.Bundle
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.BaseActivity
import com.yixuninfo.m2.data.bean.LoginSubmitBean
import com.yixuninfo.m2.databinding.ActivityLoginBinding
import com.yixuninfo.m2.ui.main.HomePageActivity
class LoginActivity : BaseActivity(), LoginView {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
val activityLoginBinding: ActivityLoginBinding = DataBindingUtil.setContentView(this, R.layout.activity_login)
activityLoginBinding.data = LoginSubmitBean()
activityLoginBinding.model = LoginViewModel(activityLoginBinding, this)
}
override fun nextStart() {
startActivity(Intent(this@LoginActivity, HomePageActivity::class.java))
finish()
}
}
<file_sep>package com.yixuninfo.m2.utils.permission
/**
* Created by tangxin on 2017/8/30.
*/
interface PermissionResult {
/**校验成功*/
fun success(permission: String)
/**拒绝权限(未点击不再提示)*/
fun refuse(permission: String)
/**拒绝权限并已点击不再提示(需要到设置界面权限修改)*/
fun refuseNoReminders(permission: String)
}<file_sep>package com.yixuninfo.m2.data.bean;
import java.util.ArrayList;
/**
* Created by tangxin on 2017/10/11.
*/
public class GetDictionaryListBean {
ArrayList<StatusBean> rqBusNumList;
public ArrayList<StatusBean> getRqBusNumList() {
return rqBusNumList;
}
public void setRqBusNumList(ArrayList<StatusBean> rqBusNumList) {
this.rqBusNumList = rqBusNumList;
}
}
<file_sep>package com.yixuninfo.m2.ui.view
import android.databinding.BindingAdapter
import android.view.View
import android.widget.ImageView
import android.widget.TextView
import com.yixuninfo.m2.R
/**
* Created by tangxin on 2017/9/28.
*/
@BindingAdapter("businessIcon")
fun setBusinessIcon(view: ImageView, status: String?) {
when (status) {
"逾期未签约" -> view.setImageResource(R.mipmap.hom_ico1)
"逾期未付款" -> view.setImageResource(R.mipmap.hom_ico2)
"待处理客户" -> view.setImageResource(R.mipmap.hom_ico3)
"公告通知", "公告" -> view.setImageResource(R.mipmap.hom_ico4)
"审核列表", "审核申请" -> view.setImageResource(R.mipmap.hom_ico5)
"提醒" -> view.setImageResource(R.mipmap.hom_ico6)
"下次跟进提醒" -> view.setImageResource(R.mipmap.hom_ico7_720)
"未完善信息客户" -> view.setImageResource(R.mipmap.infor)
else -> view.visibility = View.INVISIBLE
}
}
@BindingAdapter("businessCount")
fun setBusinessCount(view: TextView, number: String?) {
when (number) {
null, "", "null" -> view.visibility = View.INVISIBLE
else -> {
when (number.toInt()) {
in 0..99 -> view.text = number
else -> view.text = "..."
}
}
}
}<file_sep>package com.yixuninfo.m2.data.http;
import com.yixuninfo.m2.utils.ConfigInfo;
public final class UriUtils {
public static String sRootUri;
public static final String sCommonUri = "/emm-openapi/appapi/";
static {
if (ConfigInfo.isPreview) {
sRootUri = "http://192.168.16.187:8086";
} else {
sRootUri = "http://m2.openapi.uat.qiaofangyun.com";
}
}
}
<file_sep>package com.yixuninfo.m2.ui.view.MenuPopu;
import android.content.Context;
import android.graphics.drawable.Drawable;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.yixuninfo.m2.R;
import com.yixuninfo.m2.ui.view.DrawableTextView;
import com.yixuninfo.m2.utils.log.LogUtils;
import java.util.List;
/**
* Created by tangxin on 2017/4/25.
*/
public class MenuPopupAdapter extends RecyclerView.Adapter<MenuPopupAdapter.ViewHolder> {
private static final String TAG = "menu adapter";
private int index = 0;
private static final int BOUNDS_LEFT = 0;
private static final int BOUNDS_TOP = 0;
private Drawable drawableRight;
private Drawable drawableLeftUp;
private Drawable drawableLeftDown;
private List<BaseMenuData> mData;
private Context context;
private MyItemClickListener mItemClickListener;
private int icon;
MenuPopupAdapter(List<BaseMenuData> mData, Context context, int icon) {
this.mData = mData;
this.context = context;
this.icon = icon;
if (null == drawableRight) {
drawableRight = context.getResources().getDrawable(R.drawable.ic_right_done);
drawableRight.setBounds(BOUNDS_LEFT, BOUNDS_TOP, drawableRight.getMinimumWidth(), drawableRight.getMinimumHeight());
}
if (-1 != icon) {
if (null == drawableLeftUp || null == drawableLeftDown) {
drawableLeftUp = context.getResources().getDrawable(R.mipmap.ioc_1_cose);
drawableLeftDown = context.getResources().getDrawable(R.mipmap.iocn_project1);
drawableLeftUp.setBounds(BOUNDS_LEFT, BOUNDS_TOP, drawableRight.getMinimumWidth(), drawableRight.getMinimumHeight());
drawableLeftDown.setBounds(BOUNDS_LEFT, BOUNDS_TOP, drawableRight.getMinimumWidth(), drawableRight.getMinimumHeight());
}
}
}
public void setData(List<BaseMenuData> mData) {
this.mData = mData;
notifyDataSetChanged();
}
void setIndex(int index) {
if (index > mData.size()) {
index = mData.size() - 1;
}
this.index = index;
notifyDataSetChanged();
}
@Override
public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View view = LayoutInflater.from(parent.getContext()).inflate(R.layout.item_menu, parent, false);
return new ViewHolder(view, mItemClickListener);
}
@Override
public void onBindViewHolder(ViewHolder holder, final int position) {
holder.tvType.setTextColor(index == position ? context.getResources().getColor(R.color.orange) : context.getResources().getColor(R.color.title));
holder.tvType.setCompoundDrawables(-1 != icon ? (index == position ? drawableLeftUp : drawableLeftDown) : null, null, index == position ? drawableRight : null, null);
holder.tvType.setText(String.format(" %s", mData.get(position).getMenuName()));
holder.tvType.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (null != mItemClickListener) {
mItemClickListener.onItemClick(v, position, mData.get(position));
} else {
LogUtils.d(TAG, "......mItemClickListener is null......");
}
}
});
}
void setOnItemClickListener(MyItemClickListener listener) {
this.mItemClickListener = listener;
}
@Override
public int getItemCount() {
return null == mData ? 0 : mData.size();
}
public class ViewHolder extends RecyclerView.ViewHolder {
DrawableTextView tvType;
public ViewHolder(View itemView, MyItemClickListener listener) {
super(itemView);
tvType = (DrawableTextView) itemView.findViewById(R.id.tv_floor_height);
}
}
interface MyItemClickListener {
void onItemClick(View view, int position, BaseMenuData itemData);
}
}
<file_sep>package com.yixuninfo.m2.ui.main
import com.yixuninfo.m2.base.BaseView
/**
* Created by tangxin on 2017/9/29.
*/
interface CustomerFragmentView : BaseView {
}<file_sep>package com.yixuninfo.m2.utils
/**
* Created by tangxin on 2017/8/30.
*/
interface DialogCallback {
fun sure()
fun cancel()
}<file_sep>package com.yixuninfo.m2.ui.view.menu;
import java.util.List;
/**
* Created by tangxin on 2017/10/11.
*/
public class MenuSecondBean<T extends BaseMenuDataBean> {
private String key;
private String keyId;
private int index = 0;
private List<T> list;
public MenuSecondBean() {
}
public MenuSecondBean(String key, String keyId, List<T> list) {
this.key = key;
this.keyId = keyId;
this.list = list;
}
public int getIndex() {
return index;
}
public void setIndex(int index) {
this.index = index;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getKeyId() {
return keyId;
}
public void setKeyId(String keyId) {
this.keyId = keyId;
}
public List<T> getList() {
return list;
}
public void setList(List<T> list) {
this.list = list;
}
}
<file_sep>package com.yixuninfo.m2.data.bean
/**
* Created by tangxin on 2017/10/11.
*/
class CustomerBean {
var getDictionaryListBean: GetDictionaryListBean? = null
var getConsultantListBean: GetConsultantListBean? = null
var getCustomerListBean: GetCustomerListBean? = null
}
<file_sep>package com.yixuninfo.m2.ui.main
import android.os.Bundle
import android.view.View
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.BaseFragment
class HousesFragment : BaseFragment() {
override fun getLayoutId(): Int {
return R.layout.fragment_houses_layout
}
override fun initViews(mFragmentView: View?, savedInstanceState: Bundle?) {
}
}
<file_sep>package com.yixuninfo.m2.ui.main
import android.annotation.SuppressLint
import android.databinding.DataBindingUtil
import android.graphics.drawable.Drawable
import android.os.Bundle
import android.support.v4.app.Fragment
import android.support.v4.content.ContextCompat
import android.view.View
import android.widget.ImageView
import android.widget.RelativeLayout
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.BaseActivity
import com.yixuninfo.m2.data.bean.GetProjectBusinessListBean
import com.yixuninfo.m2.data.bean.GetProjectListBean
import com.yixuninfo.m2.databinding.ActivityHomePageBinding
import com.yixuninfo.m2.ui.view.DrawableTextView
import com.yixuninfo.m2.ui.view.MenuLayout
import com.yixuninfo.m2.ui.view.MenuPopu.BaseMenuData
import com.yixuninfo.m2.ui.view.MenuPopu.MenuPopupWindow
import com.yixuninfo.m2.utils.BackUtils
open class HomePageActivity : BaseActivity(), HomePageView {
private lateinit var tvTitle: DrawableTextView
private lateinit var defTabName: Array<String>
private lateinit var ivMore: ImageView
private lateinit var rootView: RelativeLayout
private val mFragmentArray = arrayOf<Fragment>(MainFragment(), CustomerFragment(), HousesFragment(), AnalysisFragment(), MineFragment())
private var menuWindows: MenuPopupWindow? = null
var menuIndex = 0
private var fragmentIndex = ""
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
val mBinding: ActivityHomePageBinding = DataBindingUtil.setContentView(this@HomePageActivity, R.layout.activity_home_page)
defTabName = resources.getStringArray(R.array.home_tab)//初始化tab原始数据
tvTitle = mBinding.topToolbar.findViewById(R.id.tv_title) as DrawableTextView
ivMore = mBinding.topToolbar.findViewById(R.id.iv_more) as ImageView
rootView = mBinding.topToolbar.findViewById(R.id.root_view) as RelativeLayout
mBinding.model = HomePageViewModel()
mBinding.model.homePageView = this
mBinding.model.initMenuData(defTabName)//根据模块权限处理得到UI最后的效果数据
initMenu(mBinding)//初始化menu
mBinding.model.doMain()
viewBtn(mBinding)
}
private fun initMenu(mBinding: ActivityHomePageBinding) {
mBinding.tbMenu.setDrawables(MenuLayout.MenuIconType.MENU_TOP_ICON)//设置tab图标位置
.setTabIdArray(mBinding.model.ids)//设置tab对应ID
.setImagesDown(mBinding.model.images.toIntArray())//设置tab默认图片
.setImagesUp(mBinding.model.imagesUp.toIntArray())//设置tab选中图片
.setTabName(mBinding.model.tabNames.toTypedArray())//设置tab名称
mBinding.tbMenu.setOnMenuClickListener { _, view ->
fragmentIndex = (view as DrawableTextView).text.toString()
switchFragment(fragmentIndex)// item点击事件
mBinding.tbMenu.updateMenu(view)//手动update(设置手动之后,默认切换方式将停用)
}
val defView = mBinding.tbMenu.view.findViewById(mBinding.model.ids[0])//获取默认view
mBinding.tbMenu.update(defView, true, 0)//设置选中效果
fragmentIndex = defTabName[0]
switchFragment(fragmentIndex)//加载默认tab对应的fragment
}
private fun switchFragment(fragment: Fragment, isSwitch: Boolean) {
val frgTransaction = supportFragmentManager.beginTransaction()
if (isSwitch) {
if (!fragment.isAdded) {
frgTransaction.add(R.id.fra_empty_content, fragment)
}
frgTransaction.show(fragment).commitNowAllowingStateLoss()
} else {
frgTransaction.hide(fragment)
frgTransaction.commitNowAllowingStateLoss()
}
}
private fun showProjectWindows(mBinding: ActivityHomePageBinding, projectList: List<GetProjectListBean.ProjectList>) {
if (null == menuWindows) {
projectList.forEach {
it.setData()
}
menuWindows = MenuPopupWindow(1, this, projectList, menuIndex, tvTitle)
menuWindows?.setMenuItemClick(object : MenuPopupWindow.MenuItemClick {
override fun onItemClickListener(view: View?, position: Int, itemData: BaseMenuData) {
menuIndex = position
mBinding.model.selectProjectItem(itemData)
}
override fun onSecondaryItemClickListener(view: View?, position: Int, itemData: BaseMenuData) {
}
})
}
if (menuWindows?.popupMenu?.isShowing == true) {
menuWindows?.dismiss()
} else {
menuWindows?.showPopupMenu(menuIndex)
}
}
override fun reload() {
when (fragmentIndex) {
defTabName[0] -> (mFragmentArray[0] as MainFragment).reload()
defTabName[1] -> (mFragmentArray[1] as CustomerFragment).reload()
defTabName[2] -> (mFragmentArray[2] as HousesFragment).reload()
defTabName[3] -> (mFragmentArray[3] as AnalysisFragment).reload()
defTabName[4] -> (mFragmentArray[4] as MineFragment).reload()
}
}
private fun viewBtn(mBinding: ActivityHomePageBinding) {
tvTitle.setOnClickListener {
showProjectWindows(mBinding, mBinding.model.projectList)
}
ivMore.setOnClickListener {
when (fragmentIndex) {
defTabName[1] -> {
}
defTabName[2] -> {
}
defTabName[3] -> {
}
}
}
}
override fun initMainFragment(getProjectBusinessListBean: GetProjectBusinessListBean) {
(mFragmentArray[0] as MainFragment).initData(getProjectBusinessListBean)
}
@SuppressLint("NewApi")
private fun switchFragment(tabNames: String) {//加载fragment方法
mFragmentArray.forEachIndexed { index, fragment ->
when (tabNames) {
defTabName[0] -> {
switchFragment(fragment, index == 0)
hideMore()
}
defTabName[1] -> {
switchFragment(fragment, index == 1)
showMore()
moreIcon(R.mipmap.ic_menu)
}
defTabName[2] -> {
switchFragment(fragment, index == 2)
showMore()
moreIcon(R.mipmap.ic_pic)
}
defTabName[3] -> {
switchFragment(fragment, index == 3)
showMore()
moreIcon(R.mipmap.ic_share)
}
defTabName[4] -> {
switchFragment(fragment, index == 4)
hideMore()
}
}
}
val iconLeft: Drawable
when (tabNames) {
defTabName[0] -> {
tvTitle.setTextColor(ContextCompat.getColor(this, R.color.album_White))
iconLeft = ContextCompat.getDrawable(this, R.drawable.ic_arrow_drop_down_white_24dp)
iconLeft.setBounds(0, 0, iconLeft.getMinimumWidth(), iconLeft.getMinimumHeight());
tvTitle.setCompoundDrawables(null, null, iconLeft, null);
rootView.background = ContextCompat.getDrawable(this, R.drawable.shape_title_bar)
}
else -> {
tvTitle.setTextColor(ContextCompat.getColor(this, R.color.album_ColorPrimaryBlack))
iconLeft = ContextCompat.getDrawable(this, R.drawable.ic_arrow_drop_down_black_24dp)
iconLeft.setBounds(0, 0, iconLeft.getMinimumWidth(), iconLeft.getMinimumHeight());
tvTitle.setCompoundDrawables(null, null, iconLeft, null);
rootView.background = ContextCompat.getDrawable(this, R.drawable.shape_title_bar_white)
}
}
}
override fun onBackPressed() {//back
if (BackUtils.doubleClickQuitApp(this)) {
super.onBackPressed()
}
}
override fun initTitle(content: String) {
tvTitle.text = content
}
private fun showMore() {
ivMore.visibility = View.VISIBLE
}
private fun hideMore() {
ivMore.visibility = View.GONE
}
private fun moreIcon(id: Int) {
ivMore.setImageResource(id)
}
}
<file_sep>package com.yixuninfo.m2.ui.dialog;
import android.app.Dialog;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.v4.app.FragmentManager;
import android.support.v7.app.AlertDialog;
import android.view.View;
import android.view.WindowManager;
import android.widget.Button;
public class CustomDialogFragment extends BaseDialogFragment {
private View customView;
private Button btnPositive;
private boolean isPositiveEnabled = false;
private boolean isShowCancelBtn = true;
public boolean isShowCancelBtn() {
return isShowCancelBtn;
}
public void setShowCancelBtn(boolean showCancelBtn) {
isShowCancelBtn = showCancelBtn;
}
public void setPositiveEnabled(boolean positiveEnabled) {
isPositiveEnabled = positiveEnabled;
}
public Button getBtnPositive() {
return btnPositive;
}
public void setBtnPositive(Button btnPositive) {
this.btnPositive = btnPositive;
}
public View getCustomView() {
return customView;
}
public void setCustomView(View customView) {
this.customView = customView;
}
@Override
public void show(FragmentManager fragmentManager) {
show(fragmentManager, "CustomDialogFragment");
}
@NonNull
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
super.onCreateDialog(savedInstanceState);
builder.setView(customView);
setCancelable(false);
dialog = builder.create();
dialog.getWindow().clearFlags(WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE | WindowManager.LayoutParams.FLAG_ALT_FOCUSABLE_IM);
dialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE);
dialog.show();
btnPositive = dialog.getButton(AlertDialog.BUTTON_POSITIVE);
Button btnNegative = dialog.getButton(AlertDialog.BUTTON_NEGATIVE);
btnNegative.setVisibility(isShowCancelBtn ? View.VISIBLE : View.INVISIBLE);
btnPositive.setEnabled(isPositiveEnabled);
return dialog;
}
}
<file_sep>package com.yixuninfo.m2.data.provider;
import com.yixuninfo.m2.data.http.ErrorInfo;
public interface DataProvider<T> {
//数据请求之前
void beforeRequest();
void dataSuccess(T result);
void dataError(ErrorInfo errorInfo);
void complete();
void dataEmpty(String errorMessage);
}
<file_sep>package com.yixuninfo.m2.utils.city;
import android.content.Context;
import android.content.res.AssetManager;
import com.alibaba.fastjson.JSONObject;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
/**
* Created by tangxin on 2017/8/29.
*/
public class CityDataBean {
private static ArrayList<addressInfo2> addressList2;
private static ArrayList<addressInfo> addressList;
/**
* 省市区集合
*/
public static ArrayList<CityProvinceCityDistrict.ProvinceCityDistrict> city_provinceCityDistricts;
public static void initJsonData(Context c) {
String str = getJson(c, "newgetProvinceCity.json");
JSONObject jsonObj = JSONObject.parseObject(str);
JSONObject getResData = JSONObject.parseObject(jsonObj.getString("resData"));
if (getResData == null) {
return;
}
CityProvinceCityDistrict mInfo = JSONObject.parseObject(getResData.toJSONString(),
CityProvinceCityDistrict.class);
if (mInfo.getResult().equals("1")) {
city_provinceCityDistricts = mInfo.getProvinceCityDistrict();
for (int i = 0; i < city_provinceCityDistricts.size(); i++) {
if (city_provinceCityDistricts.get(i).getCities().size() <= 0) {
city_provinceCityDistricts.remove(i);
i--;
}
}
for (int k = 0; k < city_provinceCityDistricts.size(); k++) {
if (null != city_provinceCityDistricts.get(k).getCities()
&& city_provinceCityDistricts.get(k).getCities().size() > 0) {
for (int j = 0; j < city_provinceCityDistricts.get(k).getCities().size(); j++) {
if (city_provinceCityDistricts.get(k).getCities().get(j).getDistricts().size() <= 0) {
city_provinceCityDistricts.get(k).getCities().remove(j);
for (int i = 0; i < city_provinceCityDistricts.size(); i++) {
if (city_provinceCityDistricts.get(i).getCities().size() <= 0) {
city_provinceCityDistricts.remove(i);
i--;
k--;
}
}
j--;
}
}
}
}
for (int i = 0; i < city_provinceCityDistricts.size(); i++) {
if ("上海市".equals(city_provinceCityDistricts.get(i).getProvince())
|| "上海".equals(city_provinceCityDistricts.get(i).getProvince())) {
ArrayList<CityProvinceCityDistrict.ProvinceCityDistrict> city = new ArrayList<CityProvinceCityDistrict.ProvinceCityDistrict>();
city.add(city_provinceCityDistricts.get(i));
city_provinceCityDistricts.addAll(0, city);
city_provinceCityDistricts.remove(i + 1);
break;
}
}
}
for (int i = 0; i < city_provinceCityDistricts.size(); i++) {
for (int o = 0; o < city_provinceCityDistricts.get(i).getCities().size(); o++) {
for (int u = 0; u < addressList2.size(); u++) {
if (addressList2.get(u).getCityId()
.equals(city_provinceCityDistricts.get(i).getCities().get(o).getCityId())) {
city_provinceCityDistricts.get(i).getCities().get(o).setCity(addressList2.get(u).getCity());
}
}
for (int p = 0; p < city_provinceCityDistricts.get(i).getCities().get(o).getDistricts().size(); p++) {
for (int y = 0; y < addressList.size(); y++) {
if (addressList
.get(y)
.getDistrictId()
.equals(city_provinceCityDistricts.get(i).getCities().get(o).getDistricts().get(p)
.getDistrictId())) {
city_provinceCityDistricts.get(i).getCities().get(o).getDistricts().get(p)
.setDistrict(addressList.get(y).getDistrict());
}
}
}
}
}
}
private addressInfo setItemInfo(String name, String code) {
addressInfo info = new addressInfo();
info.setDistrict(name);
info.setDistrictId(code);
return info;
}
private static String getJson(Context mContext, String fileName) {
StringBuilder sb = new StringBuilder();
AssetManager am = mContext.getAssets();
try {
BufferedReader br = new BufferedReader(new InputStreamReader(
am.open(fileName)));
String next = "";
while (null != (next = br.readLine())) {
sb.append(next);
}
} catch (IOException e) {
e.printStackTrace();
sb.delete(0, sb.length());
}
return sb.toString().trim();
}
public class addressInfo2 {
private String cityId;
private String city;
private String getCityId() {
return cityId;
}
private void setCityId(String cityId) {
this.cityId = cityId;
}
private String getCity() {
return city;
}
private void setCity(String city) {
this.city = city;
}
}
public class addressInfo {
private String district;
private String districtId;
private String getDistrict() {
return district;
}
private void setDistrict(String district) {
this.district = district;
}
private String getDistrictId() {
return districtId;
}
private void setDistrictId(String districtId) {
this.districtId = districtId;
}
}
}
<file_sep>package com.yixuninfo.m2.data.bean;
import com.yixuninfo.m2.ui.view.menu.BaseMenuDataBean;
/**
* Created by tangxin on 2017/9/30.
*/
public class ListBean extends BaseMenuDataBean {
private String name;
private String id;
public ListBean(String name, String id) {
this.name = name;
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
}
<file_sep>package com.yixuninfo.m2.ui.view.recyclerview
/**
* Created by tangxin on 2017/9/28.
*/
interface ItemClickListener<in T> {
fun clickItem(position: Int, data: T)
}<file_sep>package com.yixuninfo.m2.utils.cache
/**
* Created by tangxin on 2017/9/19.
*/
class CacheKey {
companion object {
val isLogin = "IS_LOGIN" //是否登录标识 1已登录 0未登录
val loginToken = "<PASSWORD>TOKEN" //登录用户token
val loginUserId = "LOGIN_USER_ID"//登录用户ID
val loginUserName = "LOGIN_USER_NAME"//用户名
val projectLoginCode = "PROJECT_LOGIN_CODE"// 登录账号
val merchantId = "MERCHANT_ID"//商户ID
val isPhoneInformation = "IS_PHONE_INFORMATION"//是否开通电话平台 0、未开通
val isPayDeposit = "IS_PAY_DEPOSIT"//是否开通认筹
val logoImg = "LOGO_IMG"//首页title图标链接
val isHomePage = "IS_HOME_PAGE"//菜单权限:首页 String(1) 首页:0 无 1 有
val isCustomer = "IS_CUSTOMER"//菜单权限:客户 String(1) 客户:0 无 1 有
val isHouses = "IS_HOUSES"//菜单权限:房源 String(1) 房源:0 无 1 有
val isAnalysis = "IS_ANALYSIS"//菜单权限:分析 String(1) 分析:0 无 1 有
val isMine = "IS_MINE"//菜单权限:我的 String(1) 我的:0 无 1 有
val isAddCustomer = "IS_ADD_CUSTOMER"//客户菜单权限下按钮权限:客户新增 String(1) 客户新增:0 无 1 有
val isGradesMine = "IS_GRADES_MINE"//我的菜单权限下按钮权限:我的成绩 String(1) 我的成绩:0 无 1 有
val isHeroListMine = "IS_HERO_LIST_MINE"//我的菜单权限下按钮权限:个人英雄榜 String(1) 个人英雄榜:0 无 1 有
val isTeamRankingMine = "IS_TEAM_RANKING_MINE"//我的菜单权限下按钮权限:团队排名 String(1) 团队排名:0 无 1 有
val tips = "TIPS"//是否需要提示(绿城校验提示) true需要 false不需要
val isDistributionCustomer = "IS_DISTRIBUTION_CUSTOMER"//客户菜单权限下按钮权限:客户分配 String(1) 客户分配:0无 1 有
var KEY_INDEX = "index"
val projectCode = "ProjectCode"
val projectId = "projectId"
val projectName = "projectName"
}
}<file_sep>package com.yixuninfo.m2.ui.main
import com.yixuninfo.m2.base.BaseView
import com.yixuninfo.m2.data.bean.GetProjectBusinessListBean
/**
* Created by tangxin on 2017/9/27.
*/
interface HomePageView : BaseView {
fun initTitle(content: String)
fun reload()
fun initMainFragment(getProjectBusinessListBean: GetProjectBusinessListBean)
}<file_sep>package com.yixuninfo.m2.ui.view.menu;
/**
* Created by tangxin on 2017/9/30.
*/
public class BaseMenuDataBean {
}
<file_sep>package com.yixuninfo.m2.base;
import com.alibaba.fastjson.JSONObject;
import java.util.HashMap;
import java.util.Map;
/**
* Created by tangxin on 2017/9/25.
*/
public class BaseDP {
protected String getParams(Map<String, Object> map) {
Map<String, Object> paramsMap = new HashMap<String, Object>();
paramsMap.put("params", map);
return new JSONObject(paramsMap).toString();
}
}
<file_sep>package com.yixuninfo.m2.ui.view.MenuPopu
/**
* Created by tangxin on 2017/9/28.
*/
open class BaseMenuData {
var menuName: String = ""
var menuId: String = ""
var menuCode: String = ""
}<file_sep>package com.yixuninfo.m2.data.bean
import android.databinding.BaseObservable
import android.databinding.ObservableField
/**
* Created by tangxin on 2017/8/30.
*/
class LoginSubmitBean : BaseObservable() {
var loginName: ObservableField<String> = ObservableField("")
var loginPwd: ObservableField<String> = ObservableField("")
}<file_sep>package com.yixuninfo.m2.ui.main
import android.databinding.DataBindingUtil
import android.os.Bundle
import android.support.v7.widget.LinearLayoutManager
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import com.yixuninfo.m2.R
import com.yixuninfo.m2.base.BaseFragment
import com.yixuninfo.m2.data.bean.GetProjectBusinessListBean
import com.yixuninfo.m2.databinding.FragmentMainLayoutBinding
import com.yixuninfo.m2.ui.main.adapter.BusinessAdapter
import com.yixuninfo.m2.ui.view.recyclerview.ItemClickListener
/**
* Created by tangxin on 2017/9/26.
*/
class MainFragment : BaseFragment(), MainFragmentView {
private lateinit var mBinding: FragmentMainLayoutBinding
private var adapter: BusinessAdapter? = null
override fun getLayoutId(): Int {
return R.layout.fragment_main_layout
}
override fun onCreateView(inflater: LayoutInflater?, container: ViewGroup?, savedInstanceState: Bundle?): View? {
mBinding = DataBindingUtil.inflate<FragmentMainLayoutBinding>(inflater, layoutId, container, false)
initViews(mBinding.root, savedInstanceState)
return mBinding.root
}
override fun initViews(mFragmentView: View?, savedInstanceState: Bundle?) {
mBinding.model = MainFragmentViewModel(this)
initAdapter()
}
private fun initAdapter() {
val layoutManager = LinearLayoutManager(activity)
mBinding.rvBusiness.layoutManager = layoutManager
adapter = BusinessAdapter(ArrayList<GetProjectBusinessListBean.HomePageList>())
mBinding.rvBusiness.adapter = adapter
adapter?.setItemClickListener(object : ItemClickListener<GetProjectBusinessListBean.HomePageList> {
override fun clickItem(position: Int, data: GetProjectBusinessListBean.HomePageList) {
}
})
}
override fun resetData(getProjectBusinessListBean: GetProjectBusinessListBean) {
initData(getProjectBusinessListBean)
}
fun initData(getProjectBusinessListBean: GetProjectBusinessListBean) {
adapter?.resetAll(getProjectBusinessListBean.homePageList)
}
override fun reload() {
mBinding.model.getProjectBusinessList()
}
}
<file_sep>package com.yixuninfo.m2.data.bean
/**
*
*
* **Title:** 首页业务数量接口
*
* **Description:**
* @author tangxin
* @date 2016年12月9日 上午10:32:18
*/
class GetProjectBusinessListBean {
var result: String = ""// 返回码
var errorCode: String = ""// 错误代码
var errorMsg: String = ""// 错误信息
var customerNum: String = ""// (待处理客户)数量
var signedNum: String = ""// (逾期未签约)数量
var paymentNum: String = ""// (逾期未付款(该状态统计放二期))数量
var auditNum: String = ""// (审核申请)数量
var noticeNum: String = ""// (公告)数量
var remindNum: String = ""// (提醒)数量
var homePageList: List<HomePageList>? = null
class HomePageList {
var homePageName: String = ""// 首页业务名称 String(50) 首页业务名称
var homePageNoReadNum: String = ""// 首页业务未读数量 String(19) 首页业务未读数量
}
}
<file_sep>package com.yixuninfo.m2.ui
import android.Manifest.permission.READ_PHONE_STATE
import android.content.Intent
import android.os.Bundle
import android.support.v7.app.AppCompatActivity
import android.view.View
import android.view.WindowManager
import android.view.animation.AlphaAnimation
import android.view.animation.Animation
import android.widget.TextView
import com.yixuninfo.m2.R
import com.yixuninfo.m2.ui.login.LoginActivity
import com.yixuninfo.m2.ui.main.HomePageActivity
import com.yixuninfo.m2.utils.CommonUtils
import com.yixuninfo.m2.utils.DialogCallback
import com.yixuninfo.m2.utils.DialogUtils
import com.yixuninfo.m2.utils.cache.CacheKey
import com.yixuninfo.m2.utils.permission.PermissionResult
import com.yixuninfo.m2.utils.permission.PermissionUtils
class SplashActivity : AppCompatActivity() {
internal lateinit var startView: View
var tvVersionName: TextView? = null
val alphaAnimationDuration: Long = 2000
val fromAlpha: Float = 0.5f
val toAlpha: Float = 1.0f
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
window.setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN)
startView = View.inflate(this, R.layout.activity_splash, null)
setContentView(startView)
tvVersionName = findViewById(R.id.tv_version_name) as TextView
checkPermission()
}
fun animation() {
tvVersionName?.text = resources.getString(R.string.app_version_name)
val alphaAnimation = AlphaAnimation(fromAlpha, toAlpha)
alphaAnimation.duration = alphaAnimationDuration
startView.animation = alphaAnimation
alphaAnimation.setAnimationListener(object : Animation.AnimationListener {
override fun onAnimationStart(animation: Animation) {}
override fun onAnimationRepeat(animation: Animation) {}
override fun onAnimationEnd(animation: Animation) {
when (CommonUtils.getData(CacheKey.isLogin)) {
"1" -> {
startActivity(Intent(this@SplashActivity, HomePageActivity::class.java))
}
else -> {
startActivity(Intent(this@SplashActivity, LoginActivity::class.java))
}
}
finish()
}
})
}
private fun checkPermission() {
val permissionUtils = PermissionUtils(this@SplashActivity)
permissionUtils.checkPermission(READ_PHONE_STATE, object : PermissionResult {
override fun success(permission: String) {
animation();
}
override fun refuse(permission: String) {
showTips()
}
override fun refuseNoReminders(permission: String) {
showTips()
}
})
}
fun showTips() {
DialogUtils(this@SplashActivity).promptNoCancelDialog("权限提示",
"您已拒绝读取手机标识权限,将导致部分功能异常,请到 设置->权限管理->M2案场管理 打开权限后重试",
object : DialogCallback {
override fun cancel() {
}
override fun sure() {
finish()
}
})
}
}
<file_sep>package com.yixuninfo.m2.ui.dialog;
import android.app.Dialog;
import android.content.DialogInterface;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.v4.app.DialogFragment;
import android.support.v4.app.FragmentManager;
import android.support.v7.app.AlertDialog;
import android.text.TextUtils;
public abstract class BaseDialogFragment extends DialogFragment {
protected String mTitle = "Loading...";
protected String mMessage;
protected String mConfirmText;
protected String mCancelText;
protected DialogInterface.OnClickListener positiveCallback;
protected DialogInterface.OnClickListener negativeCallback;
protected AlertDialog dialog;
protected AlertDialog.Builder builder;
private boolean isShowCancel = true; //默认展示取消按钮
public String getTitle() {
return mTitle;
}
public BaseDialogFragment setTitle(String mTitle) {
this.mTitle = mTitle;
return this;
}
public String getMessage() {
return mMessage;
}
public BaseDialogFragment setMessage(String mMessage) {
this.mMessage = mMessage;
return this;
}
public String getConfirmText() {
return mConfirmText;
}
public BaseDialogFragment setConfirmText(String mConfirmText) {
this.mConfirmText = mConfirmText;
return this;
}
public String getCancelText() {
return mCancelText;
}
public BaseDialogFragment setCancelText(String mCancelText) {
this.mCancelText = mCancelText;
return this;
}
public DialogInterface.OnClickListener getPositiveCallback() {
return positiveCallback;
}
public BaseDialogFragment setPositiveCallback(DialogInterface.OnClickListener positiveCallback) {
this.positiveCallback = positiveCallback;
return this;
}
public DialogInterface.OnClickListener getNegativeCallback() {
return negativeCallback;
}
public BaseDialogFragment setNegativeCallback(DialogInterface.OnClickListener negativeCallback) {
this.negativeCallback = negativeCallback;
return this;
}
@NonNull
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
builder = new AlertDialog.Builder(getActivity());
if (!TextUtils.isEmpty(getTitle())) {
builder.setTitle(mTitle);
}
if (!TextUtils.isEmpty(getMessage())) {
builder.setMessage(mMessage);
}
mConfirmText = mConfirmText == null ? "确定" : mConfirmText;
mCancelText = mCancelText == null ? "取消" : mCancelText;
builder.setPositiveButton(mConfirmText, positiveCallback);
if (isShowCancel) {
builder.setNegativeButton(mCancelText, negativeCallback);
}
dialog = builder.create();
return dialog;
}
public abstract void show(FragmentManager fragmentManager);
public boolean isShowCancel() {
return isShowCancel;
}
public void setShowCancel(boolean showCancel) {
isShowCancel = showCancel;
}
}
<file_sep>package com.yixuninfo.m2.base;
/**
* Created by tangxin on 2017/9/11.
*/
public class BaseData<T> {
public T resData;
}
|
2a10645c83b9efc561c27c6a624359e3899414fc
|
[
"Java",
"Kotlin"
] | 54
|
Java
|
xintanggithub/pattern
|
f143c8c37de26cbf9eaf1c3eb58659ba479017e1
|
3b66fb0ec04a2f7ec615b604661b5965f8f8b78d
|
refs/heads/master
|
<file_sep>import React from "react";
import { InformationProvider } from "./InformationContext";
import Divider from "./divider";
import Nav from "./nav";
import Gallery from "./gallery";
import { BrowserRouter as HashRouter, Switch, Route } from "react-router-dom";
import MovieDetail from "./movieDetail";
import RandomGallery from "./random-gallery";
function App() {
return (
<InformationProvider>
<HashRouter>
<Switch>
<div>
<Route path="/all" component={RandomGallery} />
<Route path="/" exact component={Divider} />
<Route path="/" exact component={Nav} />
<Route path="/gallery" component={Gallery} />
<Route path="/movie/:id" component={MovieDetail} />
</div>
</Switch>
</HashRouter>
</InformationProvider>
);
}
export default App;
<file_sep>import React, { useState, useEffect } from "react";
import "./app.css";
import Nav from "./nav";
const MovieDetail = ({ match }) => {
const error =
"http://www.macedonrangeshalls.com.au/wp-content/uploads/2017/10/image-not-found.png";
const APP_KEY = "<KEY>";
const [title, setTitle] = useState("");
const [date, setDate] = useState("");
const [poster, setPoster] = useState("");
const [info, setInfo] = useState("");
const [id, setId] = useState("");
const [imdb, setImdb] = useState("");
const imdbLink = `https://www.imdb.com/title/${imdb}/?ref_=nv_sr_1?ref_=nv_sr_1`;
useEffect(() => {
getMovie();
}, []);
const getMovie = async () => {
try {
const response = await fetch(
`https://api.themoviedb.org/3/search/movie?api_key=${APP_KEY}&language=en-US&query=${
match.params.id
}&page=1&include_adult=false`
);
const data = await response.json();
setTitle(data.results[0].title);
setPoster(data.results[0].poster_path);
setDate(data.results[0].release_date);
setInfo(data.results[0].overview);
setId(data.results[0].id);
console.log(data);
} catch (error) {
console.log("error:", error);
setInfo("INFORAMTION NOT FOUND");
}
// setTitle(data.results[0].title);
// setPoster(data.results[0].poster_path);
// setDate(data.results[0].release_date);
// setInfo(data.results[0].overview);
// setId(data.results[0].id);
// console.log(data);
};
useEffect(() => {
getID();
}, [getMovie]);
const getID = async () => {
const response2 = await fetch(
`https://api.themoviedb.org/3/movie/${id}/external_ids?api_key=${APP_KEY}`
);
const data2 = await response2.json();
setImdb(data2.imdb_id);
};
return (
<>
<Nav />
<div className="details-background">
<div className="details-card">
<div className="details-left">
{poster ? (
<img
className="details-img"
srcset={`https://image.tmdb.org/t/p/w500${poster}`}
/>
) : (
<img
className="details-img"
src="https://media.wired.com/photos/5a0201b14834c514857a7ed7/master/w_582,c_limit/1217-WI-APHIST-01.jpg"
/>
)}
</div>
<div className="details-right">
<h1>
{title}
<a href={imdbLink}>
<h3>IMDB</h3>
</a>
</h1>
<h4>Release Date: {date}</h4>
<div className="details-info">
<h5>{info}</h5>
</div>
</div>
</div>
</div>
</>
);
};
export default MovieDetail;
<file_sep>import React, { useContext, useState } from "react";
import { informationContext } from "./InformationContext";
import Card from "react-bootstrap/Card";
import Image from "react-bootstrap/Image";
import Recomendation from "./recomendation";
import Nav from "./nav";
import "./app.css";
const Gallery = () => {
const [Information, setInformation] = useContext(informationContext);
const [On, setOn] = useState(false);
const [MovieName, setMovieName] = useState("");
const [ID, setID] = useState("");
const [painter, setPainter] = useState("");
const handleClick = (movie, paint) => {
setOn(!On);
setMovieName(movie);
setPainter(paint);
};
return (
<>
<Nav />
<Card.Body className="all-position">
<Card.Header className="gallery-header">
<div>
<h1>Paintings:</h1>
</div>
</Card.Header>
<div className="gallery">
{Information.map(info => (
<Card border="light text-white" className="card-marg">
<div className="paint-hover-card">
<Image className="shadow" src={info.src} rounded />
<Card.ImgOverlay
onClick={() => handleClick(info.movie, info.painter)}
>
<div className="paint-card-flex">
{info.movie.map(mov => (
<h3 className="paint-card-item">{mov}</h3>
))}
</div>
</Card.ImgOverlay>
</div>
</Card>
))}
</div>
</Card.Body>
<Recomendation
handleClick={handleClick}
on={On}
movieName={MovieName}
ID={ID}
painter={painter}
/>
</>
);
};
export default Gallery;
<file_sep>export const information = [
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/4/45/Jacques-Louis_David_-_La_Mort_de_Marat.jpg/300px-Jacques-Louis_David_-_La_Mort_de_Marat.jpg",
movie: ["About Schmidt"],
painter: "<NAME> "
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0b/Sandro_Botticelli_-_La_nascita_di_Venere_-_Google_Art_Project_-_edited.jpg/400px-Sandro_Botticelli_-_La_nascita_di_Venere_-_Google_Art_Project_-_edited.jpg",
movie: ["The Adventures of Baron Munchausen "],
painter: "<NAME>"
},
{
src: "https://www.tate.org.uk/art/images/work/T/T03/T03613_10.jpg",
movie: ["<NAME>"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/6/61/Dido_Elizabeth_Belle.jpg",
movie: ["Belle"],
painter: "<NAME>"
},
{
src: "http://www.artchive.com/artchive/b/balthus/balthus_golden_days.jpg",
movie: ["Black Moon", "My Golden Days"],
painter: "<NAME>"
},
{
src: "https://upload.wikimedia.org/wikipedia/en/a/a2/Christinasworld.jpg",
movie: ["Days of Heaven"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/The_Blue_Boy.jpg/800px-The_Blue_Boy.jpg",
movie: ["Django Unchained"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/en/9/9e/The_Empire_of_Light_Guggenheim.jpg",
movie: ["The Exorcist"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Meisje_met_de_parel.jpg/800px-Meisje_met_de_parel.jpg",
movie: ["Girl with a Pearl Earring"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/en/6/66/Ascending_and_Descending.jpg",
movie: ["Inception"],
painter: "<NAME>"
},
{
src:
"http://www.meiselgallery.com/LKMG/imagesDB/Kacere_Jutta-2_1973_oil-on-canvas.jpg",
movie: ["Lost in Translation"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/4/48/The_Calling_of_Saint_Matthew-Caravaggo_%281599-1600%29.jpg/1024px-The_Calling_of_Saint_Matthew-Caravaggo_%281599-1600%29.jpg",
movie: ["Mean Streets"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/9/94/John_Everett_Millais_-_Ophelia_-_Google_Art_Project.jpg/1920px-John_Everett_Millais_-_Ophelia_-_Google_Art_Project.jpg",
movie: ["Melancholy"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/50/Pieter_Bruegel_the_Elder_-_The_Tower_of_Babel_%28Vienna%29_-_Google_Art_Project.jpg/1280px-Pieter_Bruegel_the_Elder_-_The_Tower_of_Babel_%28Vienna%29_-_Google_Art_Project.jpg",
movie: ["Metropolis"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/Francisco_de_Goya%2C_Saturno_devorando_a_su_hijo_%281819-1823%29.jpg/800px-Francisco_de_Goya%2C_Saturno_devorando_a_su_hijo_%281819-1823%29.jpg",
movie: ["Pan's Labyrinth"],
painter: "<NAME>"
},
{
src:
"https://uploads6.wikiart.org/images/edward-hopper/house-by-the-railroad.jpg",
movie: ["Psycho"],
painter: "<NAME>"
},
{
src: "https://www.edwardhopper.net/images/paintings/newyork-movie.jpg",
movie: ["Road to Perdition"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Edvard_Munch%2C_1893%2C_The_Scream%2C_oil%2C_tempera_and_pastel_on_cardboard%2C_91_x_73_cm%2C_National_Gallery_of_Norway.jpg/800px-Edvard_Munch%2C_1893%2C_The_Scream%2C_oil%2C_tempera_and_pastel_on_cardboard%2C_91_x_73_cm%2C_National_Gallery_of_Norway.jpg",
movie: ["Scream"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/eb/Fragonard%2C_The_Swing.jpg/800px-Fragonard%2C_The_Swing.jpg",
movie: ["Tangled"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/b/b1/Van-willem-vincent-gogh-die-kartoffelesser-03850.jpg",
movie: ["The Turin Horse"],
painter: "<NAME>"
},
{
src:
"https://cdn.onebauer.media/one/empire-legacy/uploaded/Benjamin-Robert-Haydon-Napoleon-At-St-Helena.jpg?quality=50&format=jpg",
movie: ["The Duellists"],
painter: "<NAME>"
},
{
src:
"https://imgc.artprintimages.com/img/print/self-portrait-1889_u-l-pg53fb0.jpg?h=550&w=550",
movie: ["Loving Vincent"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/b/bc/The_Last_Supper_Leonardo_Da_Vinci_High_Resolution_size_32x16.jpg/1920px-The_Last_Supper_Leonardo_Da_Vinci_High_Resolution_size_32x16.jpg",
movie: ["Inherent Vice"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/2/22/Flandrin%2C_Hippolyte_%281805-1864%29_-_Jeune_homme_nu_assis.._1855_-_Louvre.jpg",
movie: ["There Will Be Blood"],
painter: "<NAME>"
},
{
src: "https://live.staticflickr.com/6120/6211000063_2271d06b9d_b.jpg",
movie: ["Passion"],
painter: "<NAME>"
},
{
src:
"https://www.nrm.org/wp2016/wp-content/uploads/2011/09/Freedom-from-Fear_web.jpg",
movie: ["Empire of the Sun"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/d8/Pieter_Bruegel_the_Elder_-_Hunters_in_the_Snow_%28Winter%29_-_Google_Art_Project.jpg/1200px-Pieter_Bruegel_the_Elder_-_Hunters_in_the_Snow_%28Winter%29_-_Google_Art_Project.jpg",
movie: ["The Mirror"],
painter: "<NAME>"
},
{
src:
"https://paintingandframe.com/uploadpic/jacques_louis_david/big/napoleon_crossing_the_alps.jpg",
movie: ["<NAME>"],
painter: "<NAME>"
},
{
src:
"https://uploads1.wikiart.org/images/odd-nerdrum/dawn-1990.jpg!Large.jpg",
movie: ["The Cell"],
painter: "Odd Nerdrum"
},
{
src:
"https://i2.wp.com/paintersonpaintings.com/wp-content/uploads/2018/12/269297.jpg?resize=741%2C587&ssl=1",
movie: ["The Imaginarium of Doctor Parnassus"],
painter: "<NAME>"
},
{
src: "https://arthive.com/res/media/img/oy800/work/b7d/537925.jpg",
movie: ["<NAME>"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/4/4e/Vel%C3%A1zquez_-_de_Breda_o_Las_Lanzas_%28Museo_del_Prado%2C_1634-35%29.jpg/1200px-Vel%C3%A1zquez_-_de_Breda_o_Las_Lanzas_%28Museo_del_Prado%2C_1634-35%29.jpg",
movie: ["Alatriste"],
painter: "<NAME>"
},
{
src:
"https://cv.vic.gov.au/media/oldmedia/6794/65071_iimage01_1360px_300dpi.jpg",
movie: ["Picnic at Hanging Rock"],
painter: "<NAME>"
},
{
src:
"https://150ans150oeuvres.uqam.ca/wp-content/uploads/2017/08/new-1965-Colville.jpg",
movie: ["Moonrise Kingdom"],
painter: "<NAME>"
},
{
src:
"https://upload.wikimedia.org/wikipedia/commons/thumb/3/32/Caspar_David_Friedrich_-_Abtei_im_Eichwald_-_Google_Art_Project.jpg/1200px-Caspar_David_Friedrich_-_Abtei_im_Eichwald_-_Google_Art_Project.jpg",
movie: ["The Revenant"],
painter: "<NAME>"
},
{
src: "https://live.staticflickr.com/4508/37520544501_63216f8e2d_b.jpg",
movie: ["The Truman Show"],
painter: "<NAME>"
},
{
src: "https://sooluciona.com/wp-content/uploads/2018/10/los_elefantes.jpg",
movie: ["Mad Max: Fury Road"],
painter: "<NAME>"
},
{
src: "http://gatesofvienna.net/wp-content/uploads/2015/01/prisoners.jpg",
movie: ["A Clockwork Orange"],
painter: "<NAME>"
},
{
src:
"https://www.vincentvangogh.org/images/paintings/wheat-field-with-crows.jpg",
movie: ["Dreams"],
painter: "<NAME>"
},
{
src:
"https://www.aci-iac.ca/content/art-books/25/alex-colville-pacific.jpg",
movie: ["Heat"],
painter: "<NAME>"
}
];
<file_sep>import React, { useState, useEffect } from "react";
import "./app.css";
import ListGroup from "react-bootstrap/ListGroup";
import Button from "react-bootstrap/Button";
import { Link } from "react-router-dom";
const Recomendation = ({ on, movieName, handleClick, painter }) => {
const wikiLink = `https://en.wikipedia.org/wiki/${painter}`;
return (
<div>
{on && (
<div className="yo">
<ListGroup variant="flush" className="list">
<ListGroup.Item className="overlay">
<div className="flex-btn">
<Button
onClick={handleClick}
className="close-btn"
variant="dark"
>
X
</Button>
</div>
{movieName.map(movie => (
<div className="item" className="border">
<div>
<Link
to={`/movie/${movie}`}
style={{ textDecoration: "none", color: "black" }}
>
<h2 className="rec-mov">{movie}</h2>
<Button className="rec" variant="primary">
Movie Details
</Button>
</Link>
{painter && (
<a href={wikiLink}>
<h5 className="link">Artist: {painter}</h5>
</a>
)}
</div>
</div>
))}
</ListGroup.Item>
</ListGroup>
;
</div>
)}
</div>
);
};
export default Recomendation;
<file_sep>import React,{useState, createContext} from 'react';
import {information} from './information'
export const informationContext = createContext();
const INFO = [...information]
export const InformationProvider = (props) =>{
const [Information, setInformation] = useState(INFO);
return(
<informationContext.Provider value={[Information,setInformation]}>
{props.children}
</informationContext.Provider>
);
}
<file_sep>import React, { useContext, useState } from "react";
import { informationContext } from "./InformationContext";
import Container from "react-bootstrap/Container";
import Row from "react-bootstrap/Row";
import Col from "react-bootstrap/Col";
import Card from "react-bootstrap/Card";
import Image from "react-bootstrap/Image";
import Button from "react-bootstrap/Button";
import Recomendation from "./recomendation";
import { Link } from "react-router-dom";
import { information_all } from "./information-all";
import "./app.css";
const Divider = () => {
const [Information, setInformation] = useContext(informationContext);
const [On, setOn] = useState(false);
const [MovieName, setMovieName] = useState("");
const [painter, setPainter] = useState("");
const [slike, setSlike] = useState(information_all);
const handleClick = (movie, paint) => {
setOn(!On);
setMovieName(movie);
setPainter(paint);
};
return (
<>
<Container fluid>
<Row>
<Col className="left">
<Card className="text-center">
<Card.Header>
<Button className="border" variant="light" size="lg" block>
<Link to="/gallery" style={{ textDecoration: "none" }}>
<h3>Paintings</h3>
</Link>
</Button>
</Card.Header>
<Card.Body>
<div className="flex1">
{Information.slice(0, 9).map(info => (
<Card border="light text-white">
<div className="paint-hover-card">
<Image className="shadow" src={info.src} rounded />
<Card.ImgOverlay
onClick={() => handleClick(info.movie, info.painter)}
>
<div className="paint-card-flex">
{info.movie.map(mov => (
<h3 className="paint-card-item">{mov}</h3>
))}
</div>
</Card.ImgOverlay>
</div>
</Card>
))}
</div>
</Card.Body>
</Card>
</Col>
<Col className="right">
<Card className="text-center">
<Card.Header>
<Button className="border" variant="light" size="lg" block>
<Link to="/all" style={{ textDecoration: "none" }}>
<h3>All</h3>
</Link>
</Button>
</Card.Header>
<Card.Body>
<div className="flex2">
{slike.slice(0, 9).map(info => (
<Card border="light text-white">
<div className="hover-card">
<Image className="shadow" src={info.src} rounded />
<Card.ImgOverlay
onClick={() => handleClick(info.movie)}
>
<div className="card-flex">
{info.movie.slice(0, 3).map(mov => (
<h3 className="card-item">{mov}</h3>
))}
</div>
</Card.ImgOverlay>
</div>
</Card>
))}
</div>
</Card.Body>
</Card>
</Col>
</Row>
</Container>
<Recomendation
handleClick={handleClick}
on={On}
movieName={MovieName}
painter={painter}
/>
</>
);
};
export default Divider;
<file_sep>export const information_all = [
{
src: "https://i.redd.it/9wurbayrdbx11.png",
movie: [
"Blade runner",
"Blade runner 2049",
"The Fifth Element",
"Cloud Atlas",
"Spirited Away"
]
},
{
src: "https://i.redd.it/9m1jvpqnrhs01.jpg",
movie: [
"The Mist",
"Valhalla Rising",
"An American Werewolf in London",
"Skyfall"
]
},
{
src: "https://i.redd.it/d39kixr1es611.png",
movie: [
"Shutter Island",
"The Light Between Oceans",
"The Ring",
"The Exorcism of Emily Rose "
]
},
{
src: "https://i.imgur.com/flA5peu.jpg",
movie: ["Drive", "Nightcrawler", "Locke", "Taxi Driver"]
},
{
src: "https://i.redd.it/7na9joo7nj0z.jpg",
movie: ["Inception", "Vanilla Sky", "Gattaca"]
},
{
src: "https://i.redd.it/2r35p5spp3311.jpg",
movie: [
"The Sandlot",
"Stand by Me",
"Little Rascals",
"The War of the Buttons"
]
},
{
src: "https://i.redd.it/eouxce95d2011.jpg",
movie: ["Casablanca", "The Third Man", "Dick Tracy"]
},
{
src: "https://i.redd.it/dhuwb0w4go611.jpg",
movie: [
"Old Boy",
"Ghost In The Shell",
"Only God Forgives ",
"Blade Runner",
"Blade Runner 2049"
]
},
{
src: "https://i.redd.it/riddz50k5xaz.jpg",
movie: [
"Fear and Loathing in Las Vegas",
"<NAME>",
"Only God Forgives ",
"Duel"
]
},
{
src: "https://i.redd.it/645rpydnmpqy.jpg",
movie: [
"Minority Report",
"Jupiter Ascending",
"Tron",
"Duel",
"Cloud Atlas"
]
},
{
src:
"https://i.pinimg.com/originals/ac/19/f5/ac19f5389aa190fe504496362402c1e2.jpg",
movie: ["The Birdcage", "Miami Vice", "Scarface"]
}
];
<file_sep>import React, { useState } from "react";
import { information_all } from "./information-all";
import Card from "react-bootstrap/Card";
import Image from "react-bootstrap/Image";
import Recomendation from "./recomendation";
import Nav from "./nav";
import "./app.css";
const RandomGallery = () => {
const [On, setOn] = useState(false);
const [MovieName, setMovieName] = useState("");
const [ID, setID] = useState("");
const [slike, setSlike] = useState(information_all);
const handleClick = (movie, id) => {
setOn(!On);
setMovieName(movie);
setID(id);
};
return (
<>
<Nav />
<Card.Body className="all-position">
<Card.Header className="gallery-header">
<div>
<h1>All:</h1>
</div>
</Card.Header>
<div className="gallery">
{slike.map(info => (
<Card border="light text-white" className="card-marg">
<div className="hover-card">
<Image className="shadow" src={info.src} rounded />
<Card.ImgOverlay onClick={() => handleClick(info.movie)}>
<div className="card-flex">
{info.movie.slice(0, 3).map(mov => (
<h3 className="card-item">{mov}</h3>
))}
</div>
</Card.ImgOverlay>
</div>
</Card>
))}
</div>
</Card.Body>
<Recomendation
handleClick={handleClick}
on={On}
movieName={MovieName}
ID={ID}
/>
</>
);
};
export default RandomGallery;
|
3762223df007140cc475d66bed83b0a5cab988e4
|
[
"JavaScript"
] | 9
|
JavaScript
|
TheMisko/OneFrame
|
c7a63a976a35a7608a028d5c2e2fad9f962a8b57
|
82932fed1d600ad9e0e24af4e4753a5516554d24
|
refs/heads/master
|
<repo_name>millerjs/yatou<file_sep>/src/yatou.cc
#include <algorithm>
#include <arpa/inet.h>
#include <cstddef>
#include <fcntl.h>
#include <iostream>
#include <limits>
#include <linux/if.h>
#include <linux/if_tun.h>
#include <map>
#include <netdb.h>
#include <netinet/in.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include "yatou.h"
#define DEBUG true
#define DEFAULT_MAX_CONNECTIONS 1024
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef unsigned long ip4_addr_t;
/*
* set_if_ip(): Sets the ip for the interface in *ifr and sets the
* subnet mask if provided
*/
void Yatou::set_if_ip(struct ifreq *ifr,
int sockfd,
const char* inet_address,
const char* subnet)
{
struct sockaddr_in inet_addr, subnet_mask;
/// prepare address
inet_addr.sin_family = AF_INET;
if (inet_pton(AF_INET, inet_address, &(inet_addr.sin_addr)) != 1) {
throw Yatou::Error("Invalid inet_address");
}
/// prepare subnet mask
subnet_mask.sin_family = AF_INET;
if (inet_pton(AF_INET, subnet, &(subnet_mask.sin_addr)) != 1) {
throw Yatou::Error("Invalid subnet mask");
}
/// put addr in ifr structure
memcpy(&(ifr->ifr_addr), &inet_addr, sizeof (struct sockaddr));
if (ioctl(sockfd, SIOCSIFADDR, ifr) < 0) {
throw Yatou::Error("Unable to set IP address");
}
/// put mask in ifr structure
if (subnet) {
memcpy(&(ifr->ifr_addr), &subnet_mask, sizeof (struct sockaddr));
if(ioctl(sockfd, SIOCSIFNETMASK, ifr) < 0) {
throw Yatou::Error("Unable to set subnet mask");
}
}
}
/*
* set_if_up(): Marks the interface "up"
*/
void Yatou::set_if_up(struct ifreq *ifr, int sockfd)
{
ifr->ifr_flags |= IFF_UP;
ifr->ifr_flags |= IFF_RUNNING;
if (ioctl(sockfd, SIOCSIFFLAGS, ifr) < 0) {
throw Yatou::Error("SIOCSIFFLAGS");
}
}
/*
* Yatou::create_tun() - Creates a new TUN device
*/
struct ifreq Yatou::create_tun(std::string device_name)
{
char tun_device_name[IFNAMSIZ];
const char *clonedev = "/dev/net/tun";
struct ifreq ifr;
// open the clone device
yatou_fd_ = open(clonedev, O_RDWR);
if (yatou_fd_ < 0) {
throw Error("Unable to open clone device");
}
// prepare ifr struct
memset(&ifr, 0, sizeof(ifr));
// set the flags to create a TUN device
ifr.ifr_flags = IFF_TUN;
// prepare the name for the TUN device
strcpy(tun_device_name, device_name.c_str());
strncpy(ifr.ifr_name, tun_device_name, IFNAMSIZ);
// create the TUN device
if (ioctl(yatou_fd_, TUNSETIFF, (void *) &ifr) < 0) {
throw Error(std::string("Unable to create TUN device"));
}
// remove persistent status
if(ioctl(yatou_fd_, TUNSETPERSIST, 0) < 0){
throw Error(std::string("Failed to disable TUN persist"));
}
return ifr;
}
/*
* Yatou::setup_tun() - Creates TUN device
*
* returns: file descriptor to TUN clone
*/
void Yatou::setup()
{
// Setup has already succeeded, don't retry
if (yatou_fd_) { return; }
struct ifreq ifr = create_tun("yatou");
int sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
throw Yatou::Error("Cannot open udp socket");
}
set_if_ip(&ifr, sock, "10.0.0.1", "255.255.255.0");
set_if_up(&ifr, sock);
}
void print_hex(char* buffer, size_t len)
{
int width = 0;
for (size_t i = 0; i < len; i++, width++) {
printf("%0x ", (unsigned char)buffer[i]);
if (width >= 40) {
printf("\n");
width = 0;
}
}
printf("\n");
}
void printn(char* buffer, size_t len)
{
int width = 0;
for (size_t i = 0; i < len; i++, width++) {
printf("%c", (unsigned char)buffer[i]);
if (width >= 40) {
printf("\n");
width = 0;
}
}
printf("\n");
}
void Yatou::teardown()
{
if (yatou_fd_) {
close(yatou_fd_);
}
}
void startServer()
{
Yatou::setup();
int nread;
char buffer[2048];
int count = 0;
std::cout << "reading from fd" << std::endl;
while(1) {
nread = read(Yatou::yatou_fd_, buffer, sizeof(buffer));
if(nread < 0) {
perror("Reading from interface");
close(Yatou::yatou_fd_);
exit(1);
}
printf("%d: Read %d bytes from device\n", count, nread);
print_hex(buffer, nread);
if (count++ > 10) {
break;
}
}
Yatou::teardown();
}
int main()
{
try {
startServer();
} catch (Yatou::Error error) {
std::cout << error.toString() << std::endl;
Yatou::teardown();
}
Yatou::teardown();
}
<file_sep>/src/yatou.h
#ifndef _YATOU_H
#define _YATOU_H
namespace Yatou {
typedef uint16_t port_t;
typedef uint32_t socket_id_t;
const socket_id_t MAX_SOCKET_T = std::numeric_limits<socket_id_t>::max();
/* TUN file descriptor, this is the special file descriptor that
* the caller will use to talk
*/
int yatou_fd_ = 0;
class Error {
private:
std::string message_;
public:
Error(std::string message) : message_(message) { }
std::string toString() { return message_; }
};
struct ifreq create_tun(std::string device_name);
void set_if_ip(struct ifreq *ifr,
int sockfd,
const char* inet_address,
const char* subnet);
void Yatou::set_if_up(struct ifreq *ifr, int sockfd);
void setup();
void teardown();
}
#endif // _YATOU_H
<file_sep>/Makefile
C++ := g++
APP = yatou
ifndef os
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
os = LINUX
endif
ifeq ($(UNAME_S),Darwin)
os = UNIX
endif
endif
################################################################################
# Paths
################################################################################
# The name of the library
SO_NAME = yatou.so
# The full instal path of the library
SO_PATH = $(abspath $(dir mkfile_path))/$(SO_NAME)
# The name of the binary
BIN_NAME = yatou
# Include directories
INC_DIRS = include/
################################################################################
# Compiler flags
################################################################################
# Construct include flags from INC_DIRS and dependencies
INCLUDES = $(patsubst %,-I%,$(INC_DIRS))
# Optimization flags
OPTFLAGS = -finline-functions -O3
# Error flags
ERRFLAGS = -Wall
# File system flags
FSFLAGS = -D_LARGE_FILE_SOURCE=1
# Misc flags
MISCFLAGS = -g -fPIC -static
# Construct compiler flags
CCFLAGS = $(INCLUDES) $(OPTFLAGS) $(FSFLAGS) $(ERRFLAGS) $(MISCFLAGS) -D$(os)
ifeq ($(os), LINUX)
# LD linking flags
LDFLAGS = -lstdc++ -lpthread -lm $(patsubst %,-L%,$(INC_DIRS))
# Other linking flags
LINK_FLAGS = -shared -Wl,-soname,lyatou.so
endif
ifeq ($(os), UNIX)
# LD linking flags
LDFLAGS = -lstdc++ $(patsubst %,-L%,$(INC_DIRS))
# Other linking flags
LINK_FLAGS = -shared -Wl,-install_name,lyaout.so
endif
OBJECTS = src/yatou.o
ifndef arch
arch = IA32
endif
ifeq ($(arch), IA32)
CCFLAGS += -DIA32
endif
ifeq ($(arch), POWERPC)
CCFLAGS += -mcpu=powerpc
endif
ifeq ($(arch), IA64)
CCFLAGS += -DIA64
endif
ifeq ($(arch), SPARC)
CCFLAGS += -DSPARC
endif
ifeq ($(os), SUNOS)
LDFLAGS += -lrt -lsocket
endif
all: $(APP)
%.o: %.cc
$(C++) $(CCFLAGS) $(LDFLAGS) $< -o $@ -c
yatou.so: $(OBJECTS) $(DEP_OBJS)
$(C++) $(LINK_FLAGS) -o $(SO_PATH) $(DEP_OBJS) $(OBJECTS) $(LIBS)
yatou: $(OBJECTS) $(DEP_OBJS)
$(C++) -o $(BIN_NAME) $(DEP_OBJS) $(OBJECTS) $(LIBS)
clean:
rm -f $(OBJECTS) $(DEP_OBJS) $(SO_NAME) $(BIN_NAME)
<file_sep>/README.md
# Yatou
*Yet another TCP over UDP*
As the name suggests, this is a toy implementation of TCP to refamiliarize
myself with deeper aspects of the network stack.
As the name does not suggest, I'm attempting to do this using TUN/TAP
functionality, not over UDP, but happen to like `yatou` over `yatat`
¯\_(ツ)_/¯. The real question here however, is why are you even
reading this README?
### Running
Currently doesn't really do anything and has no tests.
```
make
sudo ./yatou
```
|
e0ab17170505b5b538dc622a45ab0f0b3d10fe75
|
[
"Markdown",
"Makefile",
"C++"
] | 4
|
C++
|
millerjs/yatou
|
648bb03965c82aefd0a084cb4add4aeb06023efe
|
402234b37a41ea25f3df1de0c886da82ab7a189e
|
refs/heads/master
|
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using Twilio;
using Newtonsoft.Json;
using System.Net.Http;
using System.Net;
using Newtonsoft.Json.Linq;
namespace yqltext2
{
public partial class sms : System.Web.UI.Page
{
private object httpClient;
private string stockticker2;
protected void Page_Load(object sender, EventArgs e)
//public void ProcessRequest(HttpContext context)
{
string stockticker = "Hey there";
stockticker = HttpContext.Current.Request.QueryString["Body"];
if (string.IsNullOrEmpty(stockticker) == true)
{
stockticker = "Pizza 91011";
}
string[] st = stockticker.Split(' ');
string zip = "temp";
int stringLegnth = st.Length;
if (stringLegnth > 1)
{
zip = st[1];
}
string item = st[0];
string localsearchurl = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20local.search%20where%20zip%3D%27" + zip + "%27%20and%20query%3D%27" + item + "%27&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback=";
Uri urlCheck = new Uri(localsearchurl);
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(urlCheck);
request.Timeout = 15000;
HttpWebResponse response;
bool result = true;
try
{
response = (HttpWebResponse)request.GetResponse();
}
catch (Exception)
{
result = false;
}
string results = "";
if (result == true)
{
using (WebClient wc = new WebClient())
{
results = wc.DownloadString(localsearchurl.ToString());
}
JObject dataObject = JObject.Parse(results);
object arraynull = (object)dataObject["query"]["results"];
string arrayTwoNull = Convert.ToString(arraynull);
string jsonthreearray = "";
if (string.IsNullOrWhiteSpace(arrayTwoNull))
{
jsonthreearray = "Not valid location/zip";
}
else
{
string jsonarray = (string)dataObject["query"]["results"]["Result"][0]["Title"];
string jsontwoarray = (string)dataObject["query"]["results"]["Result"][0]["Address"];
jsonthreearray = jsonarray + " " + jsontwoarray;
}
Response.ContentType = "text/xml";
var twiml = new Twilio.TwiML.TwilioResponse();
twiml.Message(jsonthreearray);
Response.Write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + twiml.ToString());
}
else
{
string answer = "Not valid location/zip";
Response.ContentType = "text/xml";
var twiml = new Twilio.TwiML.TwilioResponse();
twiml.Message(answer);
Response.Write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + twiml.ToString());
}
}
}
}
<file_sep>using System;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using Twilio;
namespace yqltext2
{
public partial class WebForm1 : System.Web.UI.Page
{
protected void Page_Load(object sender, EventArgs e)
{
}
protected void SendMessage_OnClick(object sender, EventArgs e)
{
string AccountSid = "<KEY>";
string AuthToken = "5852d80434a97cb6209d26c32e26be19";
var client = new TwilioRestClient(AccountSid, AuthToken);
client.SendSmsMessage("+13238157900", ToNumber.Text, Message.Text, "");
}
}
}
|
8bb63d264ec391cdcfbe93078557ab172f1b3ce3
|
[
"C#"
] | 2
|
C#
|
kenkaohy/LocalSearchCS
|
9824337ac4cfe87bad863de6f5a56a2b04901d5a
|
cbd9116349e256b55571e7da341484dcbff4d4f1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.