Compare outcomes from differential analysis based on different imputation methods#

  • load scores based on 10_1_ald_diff_analysis

Hide code cell source

import logging
from pathlib import Path

import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from IPython.display import display

import pimmslearn
import pimmslearn.databases.diseases

logger = pimmslearn.logging.setup_nb_logger()

plt.rcParams['figure.figsize'] = (2, 2)
fontsize = 5
pimmslearn.plotting.make_large_descriptors(fontsize)
logging.getLogger('fontTools').setLevel(logging.ERROR)

# catch passed parameters
args = None
args = dict(globals()).keys()

Parameters#

Default and set parameters for the notebook.

folder_experiment = 'runs/appl_ald_data/plasma/proteinGroups'

target = 'kleiner'
model_key = 'VAE'
baseline = 'RSN'
out_folder = 'diff_analysis'
selected_statistics = ['p-unc', '-Log10 pvalue', 'qvalue', 'rejected']

disease_ontology = 5082  # code from https://disease-ontology.org/
# split diseases notebook? Query gene names for proteins in file from uniprot?
annotaitons_gene_col = 'PG.Genes'
# Parameters
disease_ontology = 10652
folder_experiment = "runs/alzheimer_study"
target = "AD"
baseline = "PI"
model_key = "Median"
out_folder = "diff_analysis"
annotaitons_gene_col = "None"

Add set parameters to configuration

Hide code cell source

params = pimmslearn.nb.get_params(args, globals=globals())
args = pimmslearn.nb.Config()
args.folder_experiment = Path(params["folder_experiment"])
args = pimmslearn.nb.add_default_paths(args,
                                 out_root=(
                                     args.folder_experiment
                                     / params["out_folder"]
                                     / params["target"]
                                     / f"{params['baseline']}_vs_{params['model_key']}"))
args.update_from_dict(params)
args.scores_folder = scores_folder = (args.folder_experiment
                                      / params["out_folder"]
                                      / params["target"]
                                      / 'scores')
args.freq_features_observed = args.folder_experiment / 'freq_features_observed.csv'
args
root - INFO     Removed from global namespace: folder_experiment
root - INFO     Removed from global namespace: target
root - INFO     Removed from global namespace: model_key
root - INFO     Removed from global namespace: baseline
root - INFO     Removed from global namespace: out_folder
root - INFO     Removed from global namespace: selected_statistics
root - INFO     Removed from global namespace: disease_ontology
root - INFO     Removed from global namespace: annotaitons_gene_col
root - INFO     Already set attribute: folder_experiment has value runs/alzheimer_study
root - INFO     Already set attribute: out_folder has value diff_analysis
{'annotaitons_gene_col': 'None',
 'baseline': 'PI',
 'data': PosixPath('runs/alzheimer_study/data'),
 'disease_ontology': 10652,
 'folder_experiment': PosixPath('runs/alzheimer_study'),
 'freq_features_observed': PosixPath('runs/alzheimer_study/freq_features_observed.csv'),
 'model_key': 'Median',
 'out_figures': PosixPath('runs/alzheimer_study/figures'),
 'out_folder': PosixPath('runs/alzheimer_study/diff_analysis/AD/PI_vs_Median'),
 'out_metrics': PosixPath('runs/alzheimer_study'),
 'out_models': PosixPath('runs/alzheimer_study'),
 'out_preds': PosixPath('runs/alzheimer_study/preds'),
 'scores_folder': PosixPath('runs/alzheimer_study/diff_analysis/AD/scores'),
 'selected_statistics': ['p-unc', '-Log10 pvalue', 'qvalue', 'rejected'],
 'target': 'AD'}

Excel file for exports#

files_out = dict()
writer_args = dict(float_format='%.3f')

fname = args.out_folder / 'diff_analysis_compare_methods.xlsx'
files_out[fname.name] = fname
writer = pd.ExcelWriter(fname)
logger.info("Writing to excel file: %s", fname)
root - INFO     Writing to excel file: runs/alzheimer_study/diff_analysis/AD/PI_vs_Median/diff_analysis_compare_methods.xlsx

Load scores#

Load baseline model scores#

Show all statistics, later use selected statistics

Hide code cell source

fname = args.scores_folder / f'diff_analysis_scores_{args.baseline}.pkl'
scores_baseline = pd.read_pickle(fname)
scores_baseline
model PI
var SS DF F p-unc np2 -Log10 pvalue qvalue rejected
protein groups Source
A0A024QZX5;A0A087X1N8;P35237 AD 0.274 1 0.399 0.528 0.002 0.277 0.670 False
age 0.212 1 0.309 0.579 0.002 0.237 0.712 False
Kiel 2.676 1 3.897 0.050 0.020 1.303 0.121 False
Magdeburg 5.765 1 8.393 0.004 0.042 2.376 0.016 True
Sweden 8.898 1 12.955 0.000 0.064 3.391 0.002 True
... ... ... ... ... ... ... ... ... ...
S4R3U6 AD 0.074 1 0.070 0.791 0.000 0.102 0.872 False
age 0.829 1 0.785 0.377 0.004 0.424 0.538 False
Kiel 0.040 1 0.038 0.846 0.000 0.073 0.909 False
Magdeburg 3.442 1 3.263 0.072 0.017 1.140 0.163 False
Sweden 6.688 1 6.340 0.013 0.032 1.899 0.041 True

7105 rows × 8 columns

Load selected comparison model scores#

Hide code cell source

fname = args.scores_folder / f'diff_analysis_scores_{args.model_key}.pkl'
scores_model = pd.read_pickle(fname)
scores_model
model Median
var SS DF F p-unc np2 -Log10 pvalue qvalue rejected
protein groups Source
A0A024QZX5;A0A087X1N8;P35237 AD 0.830 1 6.377 0.012 0.032 1.907 0.039 True
age 0.001 1 0.006 0.939 0.000 0.027 0.966 False
Kiel 0.106 1 0.815 0.368 0.004 0.435 0.532 False
Magdeburg 0.219 1 1.680 0.197 0.009 0.707 0.343 False
Sweden 1.101 1 8.461 0.004 0.042 2.392 0.016 True
... ... ... ... ... ... ... ... ... ...
S4R3U6 AD 0.051 1 0.119 0.730 0.001 0.136 0.829 False
age 1.214 1 2.845 0.093 0.015 1.030 0.194 False
Kiel 0.861 1 2.018 0.157 0.010 0.804 0.289 False
Magdeburg 0.216 1 0.506 0.478 0.003 0.321 0.631 False
Sweden 3.965 1 9.288 0.003 0.046 2.580 0.011 True

7105 rows × 8 columns

Combined scores#

show only selected statistics for comparsion

Hide code cell source

scores = scores_model.join(scores_baseline, how='outer')[[args.baseline, args.model_key]]
scores = scores.loc[:, pd.IndexSlice[scores.columns.levels[0].to_list(),
                                     args.selected_statistics]]
scores
model Median PI
var p-unc -Log10 pvalue qvalue rejected p-unc -Log10 pvalue qvalue rejected
protein groups Source
A0A024QZX5;A0A087X1N8;P35237 AD 0.012 1.907 0.039 True 0.528 0.277 0.670 False
Kiel 0.368 0.435 0.532 False 0.050 1.303 0.121 False
Magdeburg 0.197 0.707 0.343 False 0.004 2.376 0.016 True
Sweden 0.004 2.392 0.016 True 0.000 3.391 0.002 True
age 0.939 0.027 0.966 False 0.579 0.237 0.712 False
... ... ... ... ... ... ... ... ... ...
S4R3U6 AD 0.730 0.136 0.829 False 0.791 0.102 0.872 False
Kiel 0.157 0.804 0.289 False 0.846 0.073 0.909 False
Magdeburg 0.478 0.321 0.631 False 0.072 1.140 0.163 False
Sweden 0.003 2.580 0.011 True 0.013 1.899 0.041 True
age 0.093 1.030 0.194 False 0.377 0.424 0.538 False

7105 rows × 8 columns

Models in comparison (name mapping)

Hide code cell source

models = pimmslearn.nb.Config.from_dict(
    pimmslearn.pandas.index_to_dict(scores.columns.get_level_values(0)))
vars(models)
{'Median': 'Median', 'PI': 'PI'}

Describe scores#

Hide code cell source

scores.describe()
model Median PI
var p-unc -Log10 pvalue qvalue p-unc -Log10 pvalue qvalue
count 7,105.000 7,105.000 7,105.000 7,105.000 7,105.000 7,105.000
mean 0.259 2.475 0.334 0.259 2.472 0.336
std 0.303 4.536 0.332 0.301 5.305 0.328
min 0.000 0.000 0.000 0.000 0.000 0.000
25% 0.003 0.332 0.013 0.004 0.343 0.016
50% 0.114 0.943 0.228 0.123 0.910 0.246
75% 0.465 2.503 0.620 0.454 2.409 0.605
max 1.000 57.961 1.000 1.000 144.895 1.000

One to one comparison of by feature:#

Hide code cell source

scores = scores.loc[pd.IndexSlice[:, args.target], :]
scores.to_excel(writer, 'scores', **writer_args)
scores
/tmp/ipykernel_77139/3761369923.py:2: FutureWarning: Starting with pandas version 3.0 all arguments of to_excel except for the argument 'excel_writer' will be keyword-only.
  scores.to_excel(writer, 'scores', **writer_args)
model Median PI
var p-unc -Log10 pvalue qvalue rejected p-unc -Log10 pvalue qvalue rejected
protein groups Source
A0A024QZX5;A0A087X1N8;P35237 AD 0.012 1.907 0.039 True 0.528 0.277 0.670 False
A0A024R0T9;K7ER74;P02655 AD 0.033 1.478 0.087 False 0.045 1.351 0.111 False
A0A024R3W6;A0A024R412;O60462;O60462-2;O60462-3;O60462-4;O60462-5;Q7LBX6;X5D2Q8 AD 0.736 0.133 0.832 False 0.078 1.106 0.174 False
A0A024R644;A0A0A0MRU5;A0A1B0GWI2;O75503 AD 0.259 0.587 0.418 False 0.499 0.302 0.644 False
A0A075B6H7 AD 0.053 1.278 0.124 False 0.091 1.040 0.195 False
... ... ... ... ... ... ... ... ... ...
Q9Y6R7 AD 0.175 0.756 0.315 False 0.175 0.756 0.318 False
Q9Y6X5 AD 0.291 0.536 0.455 False 0.096 1.019 0.203 False
Q9Y6Y8;Q9Y6Y8-2 AD 0.083 1.079 0.178 False 0.083 1.079 0.182 False
Q9Y6Y9 AD 0.520 0.284 0.667 False 0.452 0.345 0.604 False
S4R3U6 AD 0.730 0.136 0.829 False 0.791 0.102 0.872 False

1421 rows × 8 columns

And the descriptive statistics of the numeric values:

Hide code cell source

scores.describe()
model Median PI
var p-unc -Log10 pvalue qvalue p-unc -Log10 pvalue qvalue
count 1,421.000 1,421.000 1,421.000 1,421.000 1,421.000 1,421.000
mean 0.283 1.311 0.368 0.251 1.400 0.333
std 0.302 1.599 0.325 0.290 1.600 0.314
min 0.000 0.000 0.000 0.000 0.000 0.000
25% 0.017 0.310 0.051 0.012 0.371 0.038
50% 0.171 0.767 0.309 0.124 0.907 0.247
75% 0.490 1.760 0.640 0.426 1.934 0.583
max 1.000 14.393 1.000 0.999 21.057 1.000

and the boolean decision values

Hide code cell source

scores.describe(include=['bool', 'O'])
model Median PI
var rejected rejected
count 1421 1421
unique 2 2
top False False
freq 1069 1023

Load frequencies of observed features#

Hide code cell source

freq_feat = pd.read_csv(args.freq_features_observed, index_col=0)
freq_feat.columns = pd.MultiIndex.from_tuples([('data', 'frequency'),])
freq_feat
data
frequency
protein groups
A0A024QZX5;A0A087X1N8;P35237 186
A0A024R0T9;K7ER74;P02655 195
A0A024R3W6;A0A024R412;O60462;O60462-2;O60462-3;O60462-4;O60462-5;Q7LBX6;X5D2Q8 174
A0A024R644;A0A0A0MRU5;A0A1B0GWI2;O75503 196
A0A075B6H7 91
... ...
Q9Y6R7 197
Q9Y6X5 173
Q9Y6Y8;Q9Y6Y8-2 197
Q9Y6Y9 119
S4R3U6 126

1421 rows × 1 columns

Compare shared features#

Hide code cell source

scores_common = (scores
                 .dropna()
                 .reset_index(-1, drop=True)
                 ).join(
    freq_feat, how='left'
)
scores_common
Median PI data
p-unc -Log10 pvalue qvalue rejected p-unc -Log10 pvalue qvalue rejected frequency
protein groups
A0A024QZX5;A0A087X1N8;P35237 0.012 1.907 0.039 True 0.528 0.277 0.670 False 186
A0A024R0T9;K7ER74;P02655 0.033 1.478 0.087 False 0.045 1.351 0.111 False 195
A0A024R3W6;A0A024R412;O60462;O60462-2;O60462-3;O60462-4;O60462-5;Q7LBX6;X5D2Q8 0.736 0.133 0.832 False 0.078 1.106 0.174 False 174
A0A024R644;A0A0A0MRU5;A0A1B0GWI2;O75503 0.259 0.587 0.418 False 0.499 0.302 0.644 False 196
A0A075B6H7 0.053 1.278 0.124 False 0.091 1.040 0.195 False 91
... ... ... ... ... ... ... ... ... ...
Q9Y6R7 0.175 0.756 0.315 False 0.175 0.756 0.318 False 197
Q9Y6X5 0.291 0.536 0.455 False 0.096 1.019 0.203 False 173
Q9Y6Y8;Q9Y6Y8-2 0.083 1.079 0.178 False 0.083 1.079 0.182 False 197
Q9Y6Y9 0.520 0.284 0.667 False 0.452 0.345 0.604 False 119
S4R3U6 0.730 0.136 0.829 False 0.791 0.102 0.872 False 126

1421 rows × 9 columns

Annotate decisions in Confusion Table style:#

Hide code cell source

def annotate_decision(scores, model, model_column):
    return scores[(model_column, 'rejected')].replace({False: f'{model} (no) ', True: f'{model} (yes)'})


annotations = None
for model, model_column in models.items():
    if annotations is not None:
        annotations += ' - '
        annotations += annotate_decision(scores_common,
                                         model=model, model_column=model_column)
    else:
        annotations = annotate_decision(
            scores_common, model=model, model_column=model_column)
annotations.name = 'Differential Analysis Comparison'
annotations.value_counts()
Differential Analysis Comparison
Median (no)  - PI (no)    964
Median (yes) - PI (yes)   293
Median (no)  - PI (yes)   105
Median (yes) - PI (no)     59
Name: count, dtype: int64

List different decisions between models#

Hide code cell source

mask_different = (
    (scores_common.loc[:, pd.IndexSlice[:, 'rejected']].any(axis=1))
    & ~(scores_common.loc[:, pd.IndexSlice[:, 'rejected']].all(axis=1))
)
_to_write = scores_common.loc[mask_different]
_to_write.to_excel(writer, 'differences', **writer_args)
logger.info("Writen to Excel file under sheet 'differences'.")
_to_write
/tmp/ipykernel_77139/1417621106.py:6: FutureWarning: Starting with pandas version 3.0 all arguments of to_excel except for the argument 'excel_writer' will be keyword-only.
  _to_write.to_excel(writer, 'differences', **writer_args)
root - INFO     Writen to Excel file under sheet 'differences'.
Median PI data
p-unc -Log10 pvalue qvalue rejected p-unc -Log10 pvalue qvalue rejected frequency
protein groups
A0A024QZX5;A0A087X1N8;P35237 0.012 1.907 0.039 True 0.528 0.277 0.670 False 186
A0A075B6R2 0.005 2.343 0.017 True 0.189 0.723 0.335 False 164
A0A075B7B8 0.001 3.270 0.003 True 0.406 0.391 0.565 False 57
A0A087WU43;A0A087WX17;A0A087WXI5;P12830;P12830-2 0.003 2.515 0.012 True 0.602 0.220 0.731 False 134
A0A087WWT2;Q9NPD7 0.008 2.099 0.027 True 0.037 1.435 0.095 False 193
... ... ... ... ... ... ... ... ... ...
Q9ULZ9 0.432 0.364 0.591 False 0.004 2.394 0.016 True 171
Q9UP79 0.002 2.739 0.008 True 0.420 0.376 0.578 False 135
Q9UPU3 0.171 0.767 0.309 False 0.001 2.992 0.005 True 163
Q9UQ52 0.001 2.922 0.005 True 0.050 1.302 0.121 False 188
Q9Y653;Q9Y653-2;Q9Y653-3 0.871 0.060 0.924 False 0.013 1.879 0.042 True 177

164 rows × 9 columns

Plot qvalues of both models with annotated decisions#

Prepare data for plotting (qvalues)

Hide code cell source

var = 'qvalue'
to_plot = [scores_common[v][var] for v in models.values()]
for s, k in zip(to_plot, models.keys()):
    s.name = k.replace('_', ' ')
to_plot.append(scores_common['data'])
to_plot.append(annotations)
to_plot = pd.concat(to_plot, axis=1)
to_plot
Median PI frequency Differential Analysis Comparison
protein groups
A0A024QZX5;A0A087X1N8;P35237 0.039 0.670 186 Median (yes) - PI (no)
A0A024R0T9;K7ER74;P02655 0.087 0.111 195 Median (no) - PI (no)
A0A024R3W6;A0A024R412;O60462;O60462-2;O60462-3;O60462-4;O60462-5;Q7LBX6;X5D2Q8 0.832 0.174 174 Median (no) - PI (no)
A0A024R644;A0A0A0MRU5;A0A1B0GWI2;O75503 0.418 0.644 196 Median (no) - PI (no)
A0A075B6H7 0.124 0.195 91 Median (no) - PI (no)
... ... ... ... ...
Q9Y6R7 0.315 0.318 197 Median (no) - PI (no)
Q9Y6X5 0.455 0.203 173 Median (no) - PI (no)
Q9Y6Y8;Q9Y6Y8-2 0.178 0.182 197 Median (no) - PI (no)
Q9Y6Y9 0.667 0.604 119 Median (no) - PI (no)
S4R3U6 0.829 0.872 126 Median (no) - PI (no)

1421 rows × 4 columns

List of features with the highest difference in qvalues

Hide code cell source

# should it be possible to run not only RSN?
to_plot['diff_qvalue'] = (to_plot[str(args.baseline)] - to_plot[str(args.model_key)]).abs()
to_plot.loc[mask_different].sort_values('diff_qvalue', ascending=False)
Median PI frequency Differential Analysis Comparison diff_qvalue
protein groups
Q6NUJ2 0.972 0.003 165 Median (no) - PI (yes) 0.969
P22748 0.042 0.999 159 Median (yes) - PI (no) 0.957
D3YTG3;H0Y897;Q7Z7G0;Q7Z7G0-2;Q7Z7G0-3;Q7Z7G0-4 0.969 0.011 58 Median (no) - PI (yes) 0.957
Q6P4E1;Q6P4E1-4;Q6P4E1-5 0.978 0.040 178 Median (no) - PI (yes) 0.938
P52758 0.937 0.000 119 Median (no) - PI (yes) 0.937
... ... ... ... ... ...
P14621;U3KPX8;U3KQL2 0.042 0.065 188 Median (yes) - PI (no) 0.022
Q6P9A2 0.067 0.047 168 Median (no) - PI (yes) 0.020
Q9P2E7;Q9P2E7-2 0.058 0.042 196 Median (no) - PI (yes) 0.016
A0A0A0MTP9;F8VZI9;Q9BWQ8 0.046 0.061 193 Median (yes) - PI (no) 0.015
Q9BUJ0 0.045 0.051 185 Median (yes) - PI (no) 0.006

164 rows × 5 columns

Differences plotted with created annotations#

Hide code cell source

figsize = (4, 4)
size = 5
fig, ax = plt.subplots(figsize=figsize)
x_col = to_plot.columns[0]
y_col = to_plot.columns[1]
ax = sns.scatterplot(data=to_plot,
                     x=x_col,
                     y=y_col,
                     s=size,
                     hue='Differential Analysis Comparison',
                     ax=ax)
_ = ax.legend(fontsize=fontsize,
              title_fontsize=fontsize,
              markerscale=0.4,
              title='',
              )
ax.set_xlabel(f"qvalue for {x_col}")
ax.set_ylabel(f"qvalue for {y_col}")
ax.hlines(0.05, 0, 1, color='grey', linestyles='dotted')
ax.vlines(0.05, 0, 1, color='grey', linestyles='dotted')
sns.move_legend(ax, "upper right")
files_out[f'diff_analysis_comparision_1_{args.model_key}'] = (
    args.out_folder /
    f'diff_analysis_comparision_1_{args.model_key}')
fname = files_out[f'diff_analysis_comparision_1_{args.model_key}']
pimmslearn.savefig(fig, name=fname)
pimmslearn.plotting - INFO     Saved Figures to runs/alzheimer_study/diff_analysis/AD/PI_vs_Median/diff_analysis_comparision_1_Median
../../../_images/239e226a74e7ca54e31ad3c85fb91a91059c6a3d27befd270349938bed36dbbd.png
  • also showing how many features were measured (“observed”) by size of circle

Hide code cell source

fig, ax = plt.subplots(figsize=figsize)
ax = sns.scatterplot(data=to_plot,
                     x=to_plot.columns[0],
                     y=to_plot.columns[1],
                     size='frequency',
                     s=size,
                     sizes=(5, 20),
                     hue='Differential Analysis Comparison')
_ = ax.legend(fontsize=fontsize,
              title_fontsize=fontsize,
              markerscale=0.6,
              title='',
              )
ax.set_xlabel(f"qvalue for {x_col}")
ax.set_ylabel(f"qvalue for {y_col}")
ax.hlines(0.05, 0, 1, color='grey', linestyles='dotted')
ax.vlines(0.05, 0, 1, color='grey', linestyles='dotted')
sns.move_legend(ax, "upper right")
files_out[f'diff_analysis_comparision_2_{args.model_key}'] = (
    args.out_folder / f'diff_analysis_comparision_2_{args.model_key}')
pimmslearn.savefig(
    fig, name=files_out[f'diff_analysis_comparision_2_{args.model_key}'])
pimmslearn.plotting - INFO     Saved Figures to runs/alzheimer_study/diff_analysis/AD/PI_vs_Median/diff_analysis_comparision_2_Median
../../../_images/c7f6bd9f06821ea479bfa2aa7df23d1c85101652837752e11602b441df7a4a52.png

Only features contained in model#

  • this block exist due to a specific part in the ALD analysis of the paper

Hide code cell source

scores_model_only = scores.reset_index(level=-1, drop=True)
_diff = scores_model_only.index.difference(scores_common.index)
if not _diff.empty:
    scores_model_only = (scores_model_only
                         .loc[
                             _diff,
                             args.model_key]
                         .sort_values(by='qvalue', ascending=True)
                         .join(freq_feat.squeeze().rename(freq_feat.columns.droplevel()[0])
                               )
                         )
    display(scores_model_only)
else:
    scores_model_only = None
    logger.info("No features only in new comparision model.")

if not _diff.empty:
    scores_model_only.to_excel(writer, 'only_model', **writer_args)
    display(scores_model_only.rejected.value_counts())
    scores_model_only_rejected = scores_model_only.loc[scores_model_only.rejected]
    scores_model_only_rejected.to_excel(
        writer, 'only_model_rejected', **writer_args)
root - INFO     No features only in new comparision model.

DISEASES DB lookup#

Query diseases database for gene associations with specified disease ontology id.

Hide code cell source

data = pimmslearn.databases.diseases.get_disease_association(
    doid=args.disease_ontology, limit=10000)
data = pd.DataFrame.from_dict(data, orient='index').rename_axis('ENSP', axis=0)
data = data.rename(columns={'name': args.annotaitons_gene_col}).reset_index(
).set_index(args.annotaitons_gene_col)
data
pimmslearn.databases.diseases - WARNING  There are more associations available
ENSP score
None
APP ENSP00000284981 5.000
PSEN1 ENSP00000326366 5.000
APOE ENSP00000252486 5.000
PSEN2 ENSP00000355747 5.000
TREM2 ENSP00000362205 4.825
... ... ...
hsa-miR-760 hsa-miR-760 0.682
PCDH11Y ENSP00000355419 0.682
JPH1 ENSP00000344488 0.682
RCN1 ENSP00000054950 0.682
RNF157 ENSP00000269391 0.682

10000 rows × 2 columns

Shared features#

ToDo: new script -> DISEASES DB lookup

Hide code cell source

feat_name = scores.index.names[0]  # first index level is feature name
if args.annotaitons_gene_col in scores.index.names:
    logger.info(f"Found gene annotation in scores index:  {scores.index.names}")
else:
    logger.info(f"No gene annotation in scores index:  {scores.index.names}"
                " Exiting.")
    import sys
    sys.exit(0)
root - INFO     No gene annotation in scores index:  ['protein groups', 'Source'] Exiting.
/home/runner/work/pimms/pimms/project/.snakemake/conda/43fbe714d68d8fe6f9b0c93f5652adb3_/lib/python3.12/site-packages/IPython/core/interactiveshell.py:3756: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.
  warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
An exception has occurred, use %tb to see the full traceback.

SystemExit: 0

Hide code cell source

gene_to_PG = (scores.droplevel(
    list(set(scores.index.names) - {feat_name, args.annotaitons_gene_col})
)
    .index
    .to_frame()
    .reset_index(drop=True)
    .set_index(args.annotaitons_gene_col)
)
gene_to_PG.head()

Hide code cell source

disease_associations_all = data.join(
    gene_to_PG).dropna().reset_index().set_index(feat_name).join(annotations)
disease_associations_all

only by model#

Hide code cell source

idx = disease_associations_all.index.intersection(scores_model_only.index)
disease_assocications_new = disease_associations_all.loc[idx].sort_values(
    'score', ascending=False)
disease_assocications_new.head(20)

Hide code cell source

mask = disease_assocications_new.loc[idx, 'score'] >= 2.0
disease_assocications_new.loc[idx].loc[mask]

Only by model which were significant#

Hide code cell source

idx = disease_associations_all.index.intersection(
    scores_model_only_rejected.index)
disease_assocications_new_rejected = disease_associations_all.loc[idx].sort_values(
    'score', ascending=False)
disease_assocications_new_rejected.head(20)

Hide code cell source

mask = disease_assocications_new_rejected.loc[idx, 'score'] >= 2.0
disease_assocications_new_rejected.loc[idx].loc[mask]

Shared which are only significant for by model#

mask = (scores_common[(str(args.model_key), 'rejected')] & mask_different)
mask.sum()

Hide code cell source

idx = disease_associations_all.index.intersection(mask.index[mask])
disease_assocications_shared_rejected_by_model = (disease_associations_all.loc[idx].sort_values(
    'score', ascending=False))
disease_assocications_shared_rejected_by_model.head(20)

Hide code cell source

mask = disease_assocications_shared_rejected_by_model.loc[idx, 'score'] >= 2.0
disease_assocications_shared_rejected_by_model.loc[idx].loc[mask]

Only significant by RSN#

mask = (scores_common[(str(args.baseline), 'rejected')] & mask_different)
mask.sum()

Hide code cell source

idx = disease_associations_all.index.intersection(mask.index[mask])
disease_assocications_shared_rejected_by_RSN = (
    disease_associations_all
    .loc[idx]
    .sort_values('score', ascending=False))
disease_assocications_shared_rejected_by_RSN.head(20)

Hide code cell source

mask = disease_assocications_shared_rejected_by_RSN.loc[idx, 'score'] >= 2.0
disease_assocications_shared_rejected_by_RSN.loc[idx].loc[mask]

Write to excel#

Hide code cell source

disease_associations_all.to_excel(
    writer, sheet_name='disease_assoc_all', **writer_args)
disease_assocications_new.to_excel(
    writer, sheet_name='disease_assoc_new', **writer_args)
disease_assocications_new_rejected.to_excel(
    writer, sheet_name='disease_assoc_new_rejected', **writer_args)

Outputs#

Hide code cell source

writer.close()
files_out