taquynhnga commited on
Commit
0343e34
1 Parent(s): 63bdcb1

Update pages/2_SmoothGrad.py

Browse files
Files changed (1) hide show
  1. pages/2_SmoothGrad.py +20 -41
pages/2_SmoothGrad.py CHANGED
@@ -16,40 +16,23 @@ BACKGROUND_COLOR = '#bcd0e7'
16
 
17
 
18
  st.title('Feature attribution with SmoothGrad')
19
- st.write('Which features are responsible for the current prediction? ')
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  imagenet_df = pd.read_csv('./data/ImageNet_metadata.csv')
22
 
23
  # --------------------------- LOAD function -----------------------------
24
 
25
- # @st.cache(allow_output_mutation=True)
26
- # @st.cache_data
27
- # def load_images(image_ids):
28
- # images = []
29
- # for image_id in image_ids:
30
- # dataset = load_dataset(image_id//10000)
31
- # images.append(dataset[image_id%10000])
32
- # return images
33
-
34
- # @st.cache(allow_output_mutation=True, suppress_st_warning=True, show_spinner=False)
35
- # @st.cache_resource
36
- # def load_model(model_name):
37
- # with st.spinner(f"Loading {model_name} model! This process might take 1-2 minutes..."):
38
- # if model_name == 'ResNet':
39
- # model_file_path = 'microsoft/resnet-50'
40
- # feature_extractor = AutoFeatureExtractor.from_pretrained(model_file_path, crop_pct=1.0)
41
- # model = AutoModelForImageClassification.from_pretrained(model_file_path)
42
- # model.eval()
43
- # elif model_name == 'ConvNeXt':
44
- # model_file_path = 'facebook/convnext-tiny-224'
45
- # feature_extractor = AutoFeatureExtractor.from_pretrained(model_file_path, crop_pct=1.0)
46
- # model = AutoModelForImageClassification.from_pretrained(model_file_path)
47
- # model.eval()
48
- # else:
49
- # model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=True)
50
- # model.eval()
51
- # feature_extractor = None
52
- # return model, feature_extractor
53
 
54
  images = []
55
  image_ids = []
@@ -90,18 +73,14 @@ for i, model_name in enumerate(selected_models):
90
 
91
 
92
  # DISPLAY ----------------------------------
93
- header_cols = st.columns([1, 1] + [2]*len(selected_models))
94
- header_cols[0].markdown(f'<div style="text-align: center;margin-bottom: 10px;background-color:{BACKGROUND_COLOR};"><b>Image ID</b></div>', unsafe_allow_html=True)
95
- header_cols[1].markdown(f'<div style="text-align: center;margin-bottom: 10px;background-color:{BACKGROUND_COLOR};"><b>Original Image</b></div>', unsafe_allow_html=True)
96
- for i, model_name in enumerate(selected_models):
97
- header_cols[i + 2].markdown(f'<div style="text-align: center;margin-bottom: 10px;background-color:{BACKGROUND_COLOR};"><b>{model_name}</b></div>', unsafe_allow_html=True)
98
-
99
- grids = make_grid(cols=2+len(selected_models)*2, rows=len(image_ids)+1)
100
- # grids[0][0].write('Image ID')
101
- # grids[0][1].write('Original image')
102
-
103
- # for i, model_name in enumerate(selected_models):
104
- # models[model_name], feature_extractors[model_name] = load_model(model_name)
105
 
106
 
107
  @st.cache(allow_output_mutation=True)
 
16
 
17
 
18
  st.title('Feature attribution with SmoothGrad')
19
+ st.write("""> **Which features are responsible for the current prediction?**
20
+
21
+ In machine learning, it is helpful to identify the significant features of the input (e.g., pixels for images) that affect the model's prediction.
22
+ If the model makes an incorrect prediction, we might want to determine which features contributed to the mistake.
23
+ To do this, we can generate a feature importance mask, which is a grayscale image with the same size as the original image.
24
+ The brightness of each pixel in the mask represents the importance of that feature to the model's prediction.
25
+
26
+ There are various methods to calculate an image sensitivity mask for a specific prediction.
27
+ One simple way is to use the gradient of a class prediction neuron concerning the input pixels, indicating how the prediction is affected by small pixel changes.
28
+ However, this method usually produces a noisy mask.
29
+ To reduce the noise, the [SmoothGrad](https://arxiv.org/abs/1706.03825) technique is used, which adds Gaussian noise to multiple copies of the image and averages the resulting gradients.
30
+ """)
31
 
32
  imagenet_df = pd.read_csv('./data/ImageNet_metadata.csv')
33
 
34
  # --------------------------- LOAD function -----------------------------
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  images = []
38
  image_ids = []
 
73
 
74
 
75
  # DISPLAY ----------------------------------
76
+ if run_button:
77
+ header_cols = st.columns([1, 1] + [2]*len(selected_models))
78
+ header_cols[0].markdown(f'<div style="text-align: center;margin-bottom: 10px;background-color:{BACKGROUND_COLOR};"><b>Image ID</b></div>', unsafe_allow_html=True)
79
+ header_cols[1].markdown(f'<div style="text-align: center;margin-bottom: 10px;background-color:{BACKGROUND_COLOR};"><b>Original Image</b></div>', unsafe_allow_html=True)
80
+ for i, model_name in enumerate(selected_models):
81
+ header_cols[i + 2].markdown(f'<div style="text-align: center;margin-bottom: 10px;background-color:{BACKGROUND_COLOR};"><b>{model_name}</b></div>', unsafe_allow_html=True)
82
+
83
+ grids = make_grid(cols=2+len(selected_models)*2, rows=len(image_ids)+1)
 
 
 
 
84
 
85
 
86
  @st.cache(allow_output_mutation=True)