File size: 4,821 Bytes
d972a8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os
import torch
import datetime
import numpy as np

# Import modules with alternative names
from util import set_random_seed as seed_generator
from util import poly_lr as learning_rate_adjuster
from loader import get_val_loader as acquire_validation_dataset
from config import ConfigurationManager as Configurator
from model import model as DeepLearningModel

# Configure image loading behavior
from PIL import ImageFile

ImageFile.LOAD_TRUNCATED_IMAGES = True


def generate_validation_settings():
    """Create specialized configuration for model assessment"""
    settings = Configurator().parse()
    settings.isTrain = False
    settings.isVal = True
    return settings


def assess_model_performance(
        validation_datasets,
        neural_network,
        results_directory
):
    """Evaluate neural network performance across validation datasets"""
    neural_network.eval()
    aggregate_correct = aggregate_samples = 0

    with torch.no_grad():
        for dataset in validation_datasets:
            ai_correct = natural_correct = 0

            dataset_identifier = dataset['name']
            ai_data_loader = dataset['val_ai_loader']
            ai_count = dataset['ai_size']
            natural_data_loader = dataset['val_nature_loader']
            natural_count = dataset['nature_size']

            print(f"[Evaluating dataset: {dataset_identifier}]")

            # Analyze AI-generated images
            for image_batch, target_labels in ai_data_loader:
                image_batch = image_batch.cuda()
                target_labels = target_labels.cuda()

                predictions = neural_network(image_batch)
                prediction_scores = torch.sigmoid(predictions).flatten()

                # Determine correct classifications
                correct_predictions = (
                        ((prediction_scores > 0.5) & (target_labels == 1)) |
                        ((prediction_scores < 0.5) & (target_labels == 0))
                )
                ai_correct += correct_predictions.sum().item()

            ai_performance = ai_correct / ai_count
            print(f"(1) AI Classification Accuracy: {ai_performance:.4f}")

            # Analyze natural images
            for image_batch, target_labels in natural_data_loader:
                image_batch = image_batch.cuda()
                target_labels = target_labels.cuda()

                predictions = neural_network(image_batch)
                prediction_scores = torch.sigmoid(predictions).flatten()

                correct_predictions = (
                        ((prediction_scores > 0.5) & (target_labels == 1)) |
                        ((prediction_scores < 0.5) & (target_labels == 0))
                )
                natural_correct += correct_predictions.sum().item()

            natural_performance = natural_correct / natural_count
            print(f"(2) Natural Image Accuracy: {natural_performance:.4f}")

            # Compute dataset-level performance
            dataset_performance = (ai_correct + natural_correct) / (ai_count + natural_count)
            aggregate_correct += ai_correct + natural_correct
            aggregate_samples += ai_count + natural_count

            print(f"Subset Performance: {dataset_performance:.4f}")

    # Compute overall performance
    overall_performance = aggregate_correct / aggregate_samples
    print(f"[Global Accuracy: {overall_performance:.4f}]")


def configure_computation_device(device_id):
    """Set computational hardware environment"""
    os.environ["CUDA_VISIBLE_DEVICES"] = device_id
    print(f"Selected computation device: GPU {device_id}")


def execute_evaluation_procedure():
    """Main evaluation workflow execution"""
    # Initialize random number generation
    seed_generator()

    # Load configuration settings
    primary_config = Configurator().parse()
    validation_config = generate_validation_settings()

    # Prepare validation data
    print('Preparing validation datasets...')
    validation_datasets = acquire_validation_dataset(validation_config)

    # Configure hardware environment
    configure_computation_device(primary_config.gpu_id)

    # Initialize neural architecture
    network_instance = DeepLearningModel().cuda()

    # Load pre-trained parameters if specified
    if primary_config.load is not None:
        network_instance.load_state_dict(torch.load(primary_config.load))
        print(f'Loaded model parameters from {primary_config.load}')

    # Create results storage location
    results_path = primary_config.save_path
    if not os.path.exists(results_path):
        os.makedirs(results_path)

    print("Commencing model evaluation")
    assess_model_performance(validation_datasets, network_instance, results_path)


if __name__ == '__main__':
    execute_evaluation_procedure()