Hello Konstantin Komarov, The patch aa30eccb24e5: "fs/ntfs3: Fallocate (FALLOC_FL_INSERT_RANGE) implementation" from Jun 21, 2022, leads to the following Smatch static checker warning: fs/ntfs3/attrib.c:2230 attr_insert_range() error: uninitialized symbol 'alloc_size'. fs/ntfs3/attrib.c 2089 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) 2090 { 2091 int err = 0; 2092 struct runs_tree *run = &ni->file.run; 2093 struct ntfs_sb_info *sbi = ni->mi.sbi; 2094 struct ATTRIB *attr = NULL, *attr_b; 2095 struct ATTR_LIST_ENTRY *le, *le_b; 2096 struct mft_inode *mi, *mi_b; 2097 CLST vcn, svcn, evcn1, len, next_svcn; 2098 u64 data_size, alloc_size; 2099 u32 mask; 2100 __le16 a_flags; 2101 2102 if (!bytes) 2103 return 0; 2104 2105 le_b = NULL; 2106 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 2107 if (!attr_b) 2108 return -ENOENT; 2109 2110 if (!is_attr_ext(attr_b)) { 2111 /* It was checked above. See fallocate. */ 2112 return -EOPNOTSUPP; 2113 } 2114 2115 if (!attr_b->non_res) { 2116 data_size = le32_to_cpu(attr_b->res.data_size); 2117 mask = sbi->cluster_mask; /* cluster_size - 1 */ 2118 } else { 2119 data_size = le64_to_cpu(attr_b->nres.data_size); 2120 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; 2121 } 2122 2123 if (vbo > data_size) { 2124 /* Insert range after the file size is not allowed. */ 2125 return -EINVAL; 2126 } 2127 2128 if ((vbo & mask) || (bytes & mask)) { 2129 /* Allow to insert only frame aligned ranges. */ 2130 return -EINVAL; 2131 } 2132 2133 vcn = vbo >> sbi->cluster_bits; 2134 len = bytes >> sbi->cluster_bits; 2135 2136 down_write(&ni->file.run_lock); 2137 2138 if (!attr_b->non_res) { 2139 err = attr_set_size(ni, ATTR_DATA, NULL, 0, run, 2140 data_size + bytes, NULL, false, &attr); 2141 if (err) 2142 goto out; 2143 if (!attr->non_res) { 2144 /* Still resident. */ 2145 char *data = Add2Ptr(attr, attr->res.data_off); 2146 2147 memmove(data + bytes, data, bytes); 2148 memset(data, 0, bytes); 2149 err = 0; 2150 goto out; 2151 } 2152 /* Resident files becomes nonresident. */ 2153 le_b = NULL; 2154 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, 2155 &mi_b); 2156 if (!attr_b) 2157 return -ENOENT; 2158 if (!attr_b->non_res) { 2159 err = -EINVAL; 2160 goto out; 2161 } 2162 data_size = le64_to_cpu(attr_b->nres.data_size); 2163 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); "alloc_size" uninitialized on the else path (no else path). 2164 } 2165 2166 /* 2167 * Enumerate all attribute segments and shift start vcn. 2168 */ 2169 a_flags = attr_b->flags; 2170 svcn = le64_to_cpu(attr_b->nres.svcn); 2171 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 2172 2173 if (svcn <= vcn && vcn < evcn1) { 2174 attr = attr_b; 2175 le = le_b; 2176 mi = mi_b; 2177 } else if (!le_b) { 2178 err = -EINVAL; 2179 goto out; 2180 } else { 2181 le = le_b; 2182 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 2183 &mi); 2184 if (!attr) { 2185 err = -EINVAL; 2186 goto out; 2187 } 2188 2189 svcn = le64_to_cpu(attr->nres.svcn); 2190 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2191 } 2192 2193 run_truncate(run, 0); /* clear cached values. */ 2194 err = attr_load_runs(attr, ni, run, NULL); 2195 if (err) 2196 goto out; 2197 2198 if (!run_insert_range(run, vcn, len)) { 2199 err = -ENOMEM; 2200 goto out; 2201 } 2202 2203 /* Try to pack in current record as much as possible. */ 2204 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn); 2205 if (err) 2206 goto out; 2207 2208 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 2209 run_truncate_head(run, next_svcn); 2210 2211 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) && 2212 attr->type == ATTR_DATA && !attr->name_len) { 2213 le64_add_cpu(&attr->nres.svcn, len); 2214 le64_add_cpu(&attr->nres.evcn, len); 2215 if (le) { 2216 le->vcn = attr->nres.svcn; 2217 ni->attr_list.dirty = true; 2218 } 2219 mi->dirty = true; 2220 } 2221 2222 /* 2223 * Update primary attribute segment in advance. 2224 * pointer attr_b may become invalid (layout of mft is changed) 2225 */ 2226 if (vbo <= ni->i_valid) 2227 ni->i_valid += bytes; 2228 2229 attr_b->nres.data_size = le64_to_cpu(data_size + bytes); --> 2230 attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes); ^^^^^^^^^^ 2231 2232 /* ni->valid may be not equal valid_size (temporary). */ 2233 if (ni->i_valid > data_size + bytes) 2234 attr_b->nres.valid_size = attr_b->nres.data_size; 2235 else 2236 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid); 2237 mi_b->dirty = true; 2238 2239 if (next_svcn < evcn1 + len) { 2240 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, 2241 next_svcn, evcn1 + len - next_svcn, 2242 a_flags, NULL, NULL); 2243 if (err) 2244 goto out; 2245 } 2246 2247 ni->vfs_inode.i_size += bytes; 2248 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 2249 mark_inode_dirty(&ni->vfs_inode); 2250 2251 out: 2252 run_truncate(run, 0); /* clear cached values. */ 2253 2254 up_write(&ni->file.run_lock); 2255 if (err) 2256 make_bad_inode(&ni->vfs_inode); 2257 2258 return err; 2259 } regards, dan carpenter