RDKit
RDKit是用C ++和Python编写的化学信息学和机器学习的工具包。
RDKit提供各种功能,如不同的化学I/O格式,包括SMILES/SMARTS,结构数据格式(SDF),Thor数据树(TDT),Sybyl线符号(SLN),Corina mol2和蛋白质数据库(PDB)。子结构搜索; 标准SMILES; 手性支持;化学转化;化学反应;分子序列化;相似性/多样性选择;二维药效团;分层子图/片段分析; Bemis和Murcko骨架;逆合成组合分析程序(RECAP); 多分子最大共同亚结构;功能图;基于形状的相似性;基于RMSD的分子分子比对;基于形状的对齐;使用Open3-DALIGN算法的无监督分子-分子比对;与PyMOL进行3D可视化集成;功能组过滤;分子描述符库;相似图;机器学习等。
mol2vec 是一种无监督的机器学习方法,用于学习分子结构的矢量表示。
pip install git+https://github.com/samoturk/mol2vec
分别采用RDkit计算的分子指纹和mol2vec产生的分子矢量作为输入特征,基于逻辑回归对靶标抑制剂活性进行二分类,比较不同方法产生输入特征的优劣。
#导入依赖库
import numpy as npimport pandas as pdimport seaborn as snsimport matplotlib.pyplot as pltfrom rdkit import Chem from rdkit.Chem import Drawfrom rdkit.Chem import Descriptorsimport warningswarnings.filterwarnings("ignore")%matplotlib inline
#载入数据,并查看data = pd.read_csv('HIV.csv')data.head()
#查看靶标数目分布sns.countplot(data = data, x='HIV_active', orient='v')plt.ylabel('HIM active')plt.xlabel('Count of values')plt.show()
#SMILES 转 MOLdata['mol'] = data['smiles'].apply(lambda x: Chem.MolFromSmiles(x))
#计算分子描述符data['tpsa'] = data['mol'].apply(lambda x: Descriptors.TPSA(x))data['mol_w'] = data['mol'].apply(lambda x: Descriptors.ExactMolWt(x))data['num_valence_electrons'] = data['mol'].apply(lambda x: Descriptors.NumValenceElectrons(x))data['num_heteroatoms'] = data['mol'].apply(lambda x: Descriptors.NumHeteroatoms(x))
#预处理数据并划分数据集from sklearn.model_selection import train_test_split
y = data.HIV_active.valuesX = data.drop(columns=['smiles', 'activity','HIV_active', 'mol'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.20, random_state=1)
#定义ROC 和 AUC 绘图函数from sklearn.metrics import auc, roc_curvedef evaluation_class(model, X_test, y_test): prediction = model.predict_proba(X_test) preds = model.predict_proba(X_test)[:,1] fpr, tpr, threshold = roc_curve(y_test, preds) roc_auc = auc(fpr, tpr)
plt.title('ROC Curve') plt.plot(fpr, tpr, 'g', label = 'AUC = %0.3f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show()
print('ROC AUC score:', round(roc_auc, 4))
#建立逻辑回归预测模型,且结果绘图from sklearn.linear_model import LogisticRegressionfrom sklearn.preprocessing import StandardScaler
X_train = StandardScaler().fit_transform(X_train)X_test = StandardScaler().fit_transform(X_test)
lr = LogisticRegression()lr.fit(X_train, y_train)
evaluation_class(lr, X_test, y_test)
#加载预先经过word2vec训练的模型from gensim.models import word2vecmodel = word2vec.Word2Vec.load('model_300dim.pkl')
#导入mol2vec,计算分子矢量特征,训练模型且结果绘图
from mol2vec.features import mol2alt_sentence, mol2sentence, MolSentence, DfVec, sentences2vecfrom gensim.models import word2vec
#Constructing sentencesdata['sentence'] = data.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
#Extracting embeddings to a numpy.array#Note that we always should mark unseen='UNK' in sentence2vec() so that model is taught how to handle unknown substructuresdata['mol2vec'] = [DfVec(x) for x in sentences2vec(data['sentence'], model, unseen='UNK')]X_mol = np.array([x.vec for x in data['mol2vec']])X_mol = pd.DataFrame(X_mol)
#Concatenating matrices of featuresnew_data = pd.concat((X, X_mol), axis=1)
X_train, X_test, y_train, y_test = train_test_split(new_data, y, test_size=.20, random_state=1)
X_train = StandardScaler().fit_transform(X_train)X_test = StandardScaler().fit_transform(X_test)
lr = LogisticRegression()lr.fit(X_train, y_train)
evaluation_class(lr, X_test, y_test)
参考
1.Jaeger, S., Fulle, S.(2018). Mol2vec: Unsupervised machine learning approach with chemical intuition. Journal of chemical information and modeling, 58(1), 27-35. URL = {http://dx.doi.org/10.1021/acs.jcim.7b00616},
2. .https://wiki.nci.nih.gov/display/NCIDTPdata/AIDS+Antiviral+Screen+Data
3. https://mol2vec.readthedocs.io/en/latest/