* caller calls fsw_block_release.
*/
-fsw_status_t fsw_block_get(struct VOLSTRUCTNAME *vol, fsw_u32 phys_bno, fsw_u32 cache_level, void **buffer_out)
+fsw_status_t fsw_block_get(struct VOLSTRUCTNAME *vol, fsw_u64 phys_bno, fsw_u32 cache_level, void **buffer_out)
{
fsw_status_t status;
fsw_u32 i, discard_level, new_bcache_size;
cache_level = MAX_CACHE_LEVEL;
if (vol->bcache_size > 0 && vol->bcache == NULL) {
- /* driver set the initial cache size */
+ /* driver set the initial cache size */
status = fsw_alloc(vol->bcache_size * sizeof(struct fsw_blockcache), &vol->bcache);
- if(status)
- return status;
- for( i = 0; i < vol->bcache_size; i++) {
+ if(status)
+ return status;
+ for (i = 0; i < vol->bcache_size; i++) {
vol->bcache[i].refcount = 0;
vol->bcache[i].cache_level = 0;
- vol->bcache[i].phys_bno = (fsw_u32)FSW_INVALID_BNO;
+ vol->bcache[i].phys_bno = (fsw_u64)FSW_INVALID_BNO;
vol->bcache[i].data = NULL;
- }
- i = 0;
- goto miss;
+ }
+ i = 0;
+ goto miss;
}
// check block cache
// find a free entry in the cache table
for (i = 0; i < vol->bcache_size; i++) {
- if (vol->bcache[i].phys_bno == (fsw_u32)FSW_INVALID_BNO)
+ if (vol->bcache[i].phys_bno == (fsw_u64)FSW_INVALID_BNO)
break;
}
if (i >= vol->bcache_size) {
for (i = vol->bcache_size; i < new_bcache_size; i++) {
new_bcache[i].refcount = 0;
new_bcache[i].cache_level = 0;
- new_bcache[i].phys_bno = (fsw_u32)FSW_INVALID_BNO;
+ new_bcache[i].phys_bno = (fsw_u64)FSW_INVALID_BNO;
new_bcache[i].data = NULL;
}
i = vol->bcache_size;
vol->bcache = new_bcache;
vol->bcache_size = new_bcache_size;
}
- vol->bcache[i].phys_bno = (fsw_u32)FSW_INVALID_BNO;
+ vol->bcache[i].phys_bno = (fsw_u64)FSW_INVALID_BNO;
miss:
// read the data
* from fsw_block_get.
*/
-void fsw_block_release(struct VOLSTRUCTNAME *vol, fsw_u32 phys_bno, void *buffer)
+void fsw_block_release(struct VOLSTRUCTNAME *vol, fsw_u64 phys_bno, void *buffer)
{
fsw_u32 i;
fsw_status_t status;
struct fsw_string target_name;
struct fsw_dnode *target_dno;
+ /* Linux kernel max link count is 40 */
+ int link_count = 40;
fsw_dnode_retain(dno);
- while (1) {
+ while (--link_count > 0) {
// get full information
status = fsw_dnode_fill(dno);
if (status)
fsw_dnode_release(dno);
dno = target_dno; // is already retained
}
+ if(link_count == 0)
+ status = FSW_NOT_FOUND;
errorexit:
fsw_dnode_release(dno);
struct fsw_dnode *dno = shand->dnode;
struct fsw_volume *vol = dno->vol;
fsw_u8 *buffer, *block_buffer;
- fsw_u32 buflen, copylen, pos;
- fsw_u32 log_bno, pos_in_extent, phys_bno, pos_in_physblock;
+ fsw_u64 buflen, copylen, pos;
+ fsw_u64 log_bno, pos_in_extent, phys_bno, pos_in_physblock;
fsw_u32 cache_level;
if (shand->pos >= dno->size) { // already at EOF
while (buflen > 0) {
// get extent for the current logical block
- log_bno = pos / vol->log_blocksize;
+ log_bno = FSW_U64_DIV(pos, vol->log_blocksize);
if (shand->extent.type == FSW_EXTENT_TYPE_INVALID ||
log_bno < shand->extent.log_start ||
log_bno >= shand->extent.log_start + shand->extent.log_count) {
// dispatch by extent type
if (shand->extent.type == FSW_EXTENT_TYPE_PHYSBLOCK) {
// convert to physical block number and offset
- phys_bno = shand->extent.phys_start + pos_in_extent / vol->phys_blocksize;
+ phys_bno = shand->extent.phys_start + FSW_U64_DIV(pos_in_extent, vol->phys_blocksize);
pos_in_physblock = pos_in_extent & (vol->phys_blocksize - 1);
copylen = vol->phys_blocksize - pos_in_physblock;
if (copylen > buflen)