* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
-//#define DPRINT(x...) Print(x)
+//#define DPRINT(x...) Print(x)
#include "fsw_core.h"
-#define uint8_t UINT8
-#define uint16_t UINT16
-#define uint32_t UINT32
-#define uint64_t UINT64
-#define int64_t INT64
-#define int32_t INT32
+#define uint8_t fsw_u8
+#define uint16_t fsw_u16
+#define uint32_t fsw_u32
+#define uint64_t fsw_u64
+#define int64_t fsw_s64
+#define int32_t fsw_s32
#ifndef DPRINT
-#define DPRINT(x...) /* */
+#define DPRINT(x...) /* */
#endif
/* no single io/element size over 2G */
#include "scandisk.c"
#define BTRFS_DEFAULT_BLOCK_SIZE 4096
+//#define BTRFS_DEFAULT_BLOCK_SIZE 8192
#define BTRFS_INITIAL_BCACHE_SIZE 1024
#define GRUB_BTRFS_SIGNATURE "_BHRfS_M"
* */
#define GRUB_BTRFS_LZO_BLOCK_SIZE 4096
#define GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE (GRUB_BTRFS_LZO_BLOCK_SIZE + \
- (GRUB_BTRFS_LZO_BLOCK_SIZE / 16) + 64 + 3)
+ (GRUB_BTRFS_LZO_BLOCK_SIZE / 16) + 64 + 3)
/*
* on disk struct has prefix 'btrfs_', little endian
uint64_t total_bytes;
uint64_t bytes_used;
uint64_t root_dir_objectid;
-#define BTRFS_MAX_NUM_DEVICES 0x10000
+#define BTRFS_MAX_NUM_DEVICES 0x10000
uint64_t num_devices;
uint32_t sectorsize;
uint32_t nodesize;
unsigned allocated;
struct
{
- uint64_t addr;
- unsigned iter;
- unsigned maxiter;
- int leaf;
+ uint64_t addr;
+ unsigned iter;
+ unsigned maxiter;
+ int leaf;
} *data;
};
uint8_t type;
union
{
- char inl[0];
- struct
- {
- uint64_t laddr;
- uint64_t compressed_size;
- uint64_t offset;
- uint64_t filled;
- };
+ char inl[0];
+ struct
+ {
+ uint64_t laddr;
+ uint64_t compressed_size;
+ uint64_t offset;
+ uint64_t filled;
+ };
};
} __attribute__ ((__packed__));
struct fsw_btrfs_uuid_list *l;
for (l = master_uuid_list; l; l=l->next)
- if(uuid_eq(l->master->uuid, vol->uuid)) {
- if(master_out)
- *master_out = l->master;
- return 0;
- }
+ if(uuid_eq(l->master->uuid, vol->uuid)) {
+ if(master_out)
+ *master_out = l->master;
+ return 0;
+ }
l = AllocatePool(sizeof(struct fsw_btrfs_uuid_list));
l->master = vol;
struct fsw_btrfs_uuid_list **lp;
for (lp = &master_uuid_list; *lp; lp=&(*lp)->next)
- if((*lp)->master == vol) {
- struct fsw_btrfs_uuid_list *n = *lp;
- *lp = n->next;
- FreePool(n);
- break;
- }
+ if((*lp)->master == vol) {
+ struct fsw_btrfs_uuid_list *n = *lp;
+ *lp = n->next;
+ FreePool(n);
+ break;
+ }
}
static fsw_status_t btrfs_set_superblock_info(struct fsw_btrfs_volume *vol, struct btrfs_superblock *sb)
vol->sectorshift = 0;
vol->sectorsize = fsw_u32_le_swap(sb->sectorsize);
for(i=9; i<20; i++) {
- if((1UL<<i) == vol->sectorsize) {
- vol->sectorshift = i;
- break;
- }
+ if((1UL<<i) == vol->sectorsize) {
+ vol->sectorshift = i;
+ break;
+ }
}
if(fsw_u64_le_swap(sb->num_devices) > BTRFS_MAX_NUM_DEVICES)
- vol->num_devices = BTRFS_MAX_NUM_DEVICES;
+ vol->num_devices = BTRFS_MAX_NUM_DEVICES;
else
- vol->num_devices = fsw_u64_le_swap(sb->num_devices);
+ vol->num_devices = fsw_u64_le_swap(sb->num_devices);
fsw_memcpy(vol->bootstrap_mapping, sb->bootstrap_mapping, sizeof(vol->bootstrap_mapping));
return FSW_SUCCESS;
}
};
static fsw_status_t fsw_btrfs_read_logical(struct fsw_btrfs_volume *vol,
- uint64_t addr, void *buf, fsw_size_t size, int rdepth, int cache_level);
+ uint64_t addr, void *buf, fsw_size_t size, int rdepth, int cache_level);
static fsw_status_t btrfs_read_superblock (struct fsw_volume *vol, struct btrfs_superblock *sb_out)
{
fsw_set_blocksize(vol, BTRFS_DEFAULT_BLOCK_SIZE, BTRFS_DEFAULT_BLOCK_SIZE);
for (i = 0; i < 4; i++)
{
- uint8_t *buffer;
- struct btrfs_superblock *sb;
-
- /* Don't try additional superblocks beyond device size. */
- if (total_blocks <= superblock_pos[i])
- break;
-
- err = fsw_block_get(vol, superblock_pos[i], 0, (void **)&buffer);
- if (err == FSW_UNSUPPORTED) {
- fsw_block_release(vol, superblock_pos[i], buffer);
- break;
- }
-
- sb = (struct btrfs_superblock *)buffer;
- if (!fsw_memeq (sb->signature, GRUB_BTRFS_SIGNATURE,
- sizeof (GRUB_BTRFS_SIGNATURE) - 1))
- {
- fsw_block_release(vol, superblock_pos[i], buffer);
- break;
- }
- if (i == 0 || fsw_u64_le_swap (sb->generation) > fsw_u64_le_swap (sb_out->generation))
- {
- fsw_memcpy (sb_out, sb, sizeof (*sb));
- total_blocks = fsw_u64_le_swap (sb->this_device.size) >> 12;
- }
- fsw_block_release(vol, superblock_pos[i], buffer);
+ uint8_t *buffer;
+ struct btrfs_superblock *sb;
+
+ /* Don't try additional superblocks beyond device size. */
+ if (total_blocks <= superblock_pos[i])
+ break;
+
+ err = fsw_block_get(vol, superblock_pos[i], 0, (void **)&buffer);
+ if (err == FSW_UNSUPPORTED) {
+ fsw_block_release(vol, superblock_pos[i], buffer);
+ break;
+ }
+
+ sb = (struct btrfs_superblock *)buffer;
+ if (!fsw_memeq (sb->signature, GRUB_BTRFS_SIGNATURE,
+ sizeof (GRUB_BTRFS_SIGNATURE) - 1))
+ {
+ fsw_block_release(vol, superblock_pos[i], buffer);
+ break;
+ }
+ if (i == 0 || fsw_u64_le_swap (sb->generation) > fsw_u64_le_swap (sb_out->generation))
+ {
+ fsw_memcpy (sb_out, sb, sizeof (*sb));
+ total_blocks = fsw_u64_le_swap (sb->this_device.size) >> 12;
+ }
+ fsw_block_release(vol, superblock_pos[i], buffer);
}
if ((err == FSW_UNSUPPORTED || !err) && i == 0)
- return FSW_UNSUPPORTED;
+ return FSW_UNSUPPORTED;
if (err == FSW_UNSUPPORTED)
- err = FSW_SUCCESS;
+ err = FSW_SUCCESS;
if(err == 0)
- DPRINT(L"btrfs: UUID: %08x-%08x-%08x-%08x device id: %d\n",
- sb_out->uuid[0], sb_out->uuid[1], sb_out->uuid[2], sb_out->uuid[3],
- sb_out->this_device.device_id);
+ DPRINT(L"btrfs: UUID: %08x-%08x-%08x-%08x device id: %d\n",
+ sb_out->uuid[0], sb_out->uuid[1], sb_out->uuid[2], sb_out->uuid[3],
+ sb_out->this_device.device_id);
return err;
}
static int key_cmp (const struct btrfs_key *a, const struct btrfs_key *b)
{
if (fsw_u64_le_swap (a->object_id) < fsw_u64_le_swap (b->object_id))
- return -1;
+ return -1;
if (fsw_u64_le_swap (a->object_id) > fsw_u64_le_swap (b->object_id))
- return +1;
+ return +1;
if (a->type < b->type)
- return -1;
+ return -1;
if (a->type > b->type)
- return +1;
+ return +1;
if (fsw_u64_le_swap (a->offset) < fsw_u64_le_swap (b->offset))
- return -1;
+ return -1;
if (fsw_u64_le_swap (a->offset) > fsw_u64_le_swap (b->offset))
- return +1;
+ return +1;
return 0;
}
}
static fsw_status_t save_ref (struct fsw_btrfs_leaf_descriptor *desc,
- uint64_t addr, unsigned i, unsigned m, int l)
+ uint64_t addr, unsigned i, unsigned m, int l)
{
desc->depth++;
if (desc->allocated < desc->depth)
{
- void *newdata;
- int oldsize = sizeof (desc->data[0]) * desc->allocated;
- desc->allocated *= 2;
- newdata = AllocatePool (sizeof (desc->data[0]) * desc->allocated);
- if (!newdata)
- return FSW_OUT_OF_MEMORY;
- fsw_memcpy(newdata, desc->data, oldsize);
- FreePool(desc->data);
- desc->data = newdata;
+ void *newdata;
+ int oldsize = sizeof (desc->data[0]) * desc->allocated;
+ desc->allocated *= 2;
+ newdata = AllocatePool (sizeof (desc->data[0]) * desc->allocated);
+ if (!newdata)
+ return FSW_OUT_OF_MEMORY;
+ fsw_memcpy(newdata, desc->data, oldsize);
+ FreePool(desc->data);
+ desc->data = newdata;
}
desc->data[desc->depth - 1].addr = addr;
desc->data[desc->depth - 1].iter = i;
}
static int next (struct fsw_btrfs_volume *vol,
- struct fsw_btrfs_leaf_descriptor *desc,
- uint64_t * outaddr, fsw_size_t * outsize,
- struct btrfs_key *key_out)
+ struct fsw_btrfs_leaf_descriptor *desc,
+ uint64_t * outaddr, fsw_size_t * outsize,
+ struct btrfs_key *key_out)
{
fsw_status_t err;
struct btrfs_leaf_node leaf;
for (; desc->depth > 0; desc->depth--)
{
- desc->data[desc->depth - 1].iter++;
- if (desc->data[desc->depth - 1].iter
- < desc->data[desc->depth - 1].maxiter)
- break;
+ desc->data[desc->depth - 1].iter++;
+ if (desc->data[desc->depth - 1].iter
+ < desc->data[desc->depth - 1].maxiter)
+ break;
}
if (desc->depth == 0)
- return 0;
+ return 0;
while (!desc->data[desc->depth - 1].leaf)
{
- struct btrfs_internal_node node;
- struct btrfs_header head;
- fsw_memzero(&node, sizeof(node));
-
- err = fsw_btrfs_read_logical (vol, desc->data[desc->depth - 1].iter
- * sizeof (node)
- + sizeof (struct btrfs_header)
- + desc->data[desc->depth - 1].addr,
- &node, sizeof (node), 0, 1);
- if (err)
- return -err;
-
- err = fsw_btrfs_read_logical (vol, fsw_u64_le_swap (node.addr),
- &head, sizeof (head), 0, 1);
- if (err)
- return -err;
-
- save_ref (desc, fsw_u64_le_swap (node.addr), 0,
- fsw_u32_le_swap (head.nitems), !head.level);
+ struct btrfs_internal_node node;
+ struct btrfs_header head;
+ fsw_memzero(&node, sizeof(node));
+
+ err = fsw_btrfs_read_logical (vol, desc->data[desc->depth - 1].iter
+ * sizeof (node)
+ + sizeof (struct btrfs_header)
+ + desc->data[desc->depth - 1].addr,
+ &node, sizeof (node), 0, 1);
+ if (err)
+ return -err;
+
+ err = fsw_btrfs_read_logical (vol, fsw_u64_le_swap (node.addr),
+ &head, sizeof (head), 0, 1);
+ if (err)
+ return -err;
+
+ save_ref (desc, fsw_u64_le_swap (node.addr), 0,
+ fsw_u32_le_swap (head.nitems), !head.level);
}
err = fsw_btrfs_read_logical (vol, desc->data[desc->depth - 1].iter
- * sizeof (leaf)
- + sizeof (struct btrfs_header)
- + desc->data[desc->depth - 1].addr, &leaf,
- sizeof (leaf), 0, 1);
+ * sizeof (leaf)
+ + sizeof (struct btrfs_header)
+ + desc->data[desc->depth - 1].addr, &leaf,
+ sizeof (leaf), 0, 1);
if (err)
- return -err;
+ return -err;
*outsize = fsw_u32_le_swap (leaf.size);
*outaddr = desc->data[desc->depth - 1].addr + sizeof (struct btrfs_header)
- + fsw_u32_le_swap (leaf.offset);
+ + fsw_u32_le_swap (leaf.offset);
*key_out = leaf.key;
return 1;
}
-#define depth2cache(x) ((x) >= 4 ? 1 : 5-(x))
+#define depth2cache(x) ((x) >= 4 ? 1 : 5-(x))
static fsw_status_t lower_bound (struct fsw_btrfs_volume *vol,
- const struct btrfs_key *key_in,
- struct btrfs_key *key_out,
- uint64_t root,
- uint64_t *outaddr, fsw_size_t *outsize,
- struct fsw_btrfs_leaf_descriptor *desc,
- int rdepth)
+ const struct btrfs_key *key_in,
+ struct btrfs_key *key_out,
+ uint64_t root,
+ uint64_t *outaddr, fsw_size_t *outsize,
+ struct fsw_btrfs_leaf_descriptor *desc,
+ int rdepth)
{
uint64_t addr = fsw_u64_le_swap (root);
int depth = -1;
if (desc)
{
- desc->allocated = 16;
- desc->depth = 0;
- desc->data = AllocatePool (sizeof (desc->data[0]) * desc->allocated);
- if (!desc->data)
- return FSW_OUT_OF_MEMORY;
+ desc->allocated = 16;
+ desc->depth = 0;
+ desc->data = AllocatePool (sizeof (desc->data[0]) * desc->allocated);
+ if (!desc->data)
+ return FSW_OUT_OF_MEMORY;
}
/* > 2 would work as well but be robust and allow a bit more just in case.
*/
if (rdepth > 10)
- return FSW_VOLUME_CORRUPTED;
+ return FSW_VOLUME_CORRUPTED;
DPRINT (L"btrfs: retrieving %lx %x %lx\n",
- key_in->object_id, key_in->type, key_in->offset);
+ key_in->object_id, key_in->type, key_in->offset);
while (1)
{
- fsw_status_t err;
- struct btrfs_header head;
- fsw_memzero(&head, sizeof(head));
+ fsw_status_t err;
+ struct btrfs_header head;
+ fsw_memzero(&head, sizeof(head));
reiter:
- depth++;
- /* FIXME: preread few nodes into buffer. */
- err = fsw_btrfs_read_logical (vol, addr, &head, sizeof (head),
- rdepth + 1, depth2cache(rdepth));
- if (err)
- return err;
- addr += sizeof (head);
- if (head.level)
- {
- unsigned i;
- struct btrfs_internal_node node, node_last;
- int have_last = 0;
- fsw_memzero (&node_last, sizeof (node_last));
- for (i = 0; i < fsw_u32_le_swap (head.nitems); i++)
- {
- err = fsw_btrfs_read_logical (vol, addr + i * sizeof (node),
- &node, sizeof (node), rdepth + 1, depth2cache(rdepth));
- if (err)
- return err;
-
- DPRINT (L"btrfs: internal node (depth %d) %lx %x %lx\n", depth,
- node.key.object_id, node.key.type,
- node.key.offset);
-
- if (key_cmp (&node.key, key_in) == 0)
- {
- err = FSW_SUCCESS;
- if (desc)
- err = save_ref (desc, addr - sizeof (head), i,
- fsw_u32_le_swap (head.nitems), 0);
- if (err)
- return err;
- addr = fsw_u64_le_swap (node.addr);
- goto reiter;
- }
- if (key_cmp (&node.key, key_in) > 0)
- break;
- node_last = node;
- have_last = 1;
- }
- if (have_last)
- {
- err = FSW_SUCCESS;
- if (desc)
- err = save_ref (desc, addr - sizeof (head), i - 1,
- fsw_u32_le_swap (head.nitems), 0);
- if (err)
- return err;
- addr = fsw_u64_le_swap (node_last.addr);
- goto reiter;
- }
- *outsize = 0;
- *outaddr = 0;
- fsw_memzero (key_out, sizeof (*key_out));
- if (desc)
- return save_ref (desc, addr - sizeof (head), -1,
- fsw_u32_le_swap (head.nitems), 0);
- return FSW_SUCCESS;
- }
- {
- unsigned i;
- struct btrfs_leaf_node leaf, leaf_last;
- int have_last = 0;
- for (i = 0; i < fsw_u32_le_swap (head.nitems); i++)
- {
- err = fsw_btrfs_read_logical (vol, addr + i * sizeof (leaf),
- &leaf, sizeof (leaf), rdepth + 1, depth2cache(rdepth));
- if (err)
- return err;
-
- DPRINT (L"btrfs: leaf (depth %d) %lx %x %lx\n", depth,
- leaf.key.object_id, leaf.key.type, leaf.key.offset);
-
- if (key_cmp (&leaf.key, key_in) == 0)
- {
- fsw_memcpy (key_out, &leaf.key, sizeof (*key_out));
- *outsize = fsw_u32_le_swap (leaf.size);
- *outaddr = addr + fsw_u32_le_swap (leaf.offset);
- if (desc)
- return save_ref (desc, addr - sizeof (head), i,
- fsw_u32_le_swap (head.nitems), 1);
- return FSW_SUCCESS;
- }
-
- if (key_cmp (&leaf.key, key_in) > 0)
- break;
-
- have_last = 1;
- leaf_last = leaf;
- }
-
- if (have_last)
- {
- fsw_memcpy (key_out, &leaf_last.key, sizeof (*key_out));
- *outsize = fsw_u32_le_swap (leaf_last.size);
- *outaddr = addr + fsw_u32_le_swap (leaf_last.offset);
- if (desc)
- return save_ref (desc, addr - sizeof (head), i - 1,
- fsw_u32_le_swap (head.nitems), 1);
- return FSW_SUCCESS;
- }
- *outsize = 0;
- *outaddr = 0;
- fsw_memzero (key_out, sizeof (*key_out));
- if (desc)
- return save_ref (desc, addr - sizeof (head), -1,
- fsw_u32_le_swap (head.nitems), 1);
- return FSW_SUCCESS;
- }
+ depth++;
+ /* FIXME: preread few nodes into buffer. */
+ err = fsw_btrfs_read_logical (vol, addr, &head, sizeof (head),
+ rdepth + 1, depth2cache(rdepth));
+ if (err)
+ return err;
+ addr += sizeof (head);
+ if (head.level)
+ {
+ unsigned i;
+ struct btrfs_internal_node node, node_last;
+ int have_last = 0;
+ fsw_memzero (&node_last, sizeof (node_last));
+ for (i = 0; i < fsw_u32_le_swap (head.nitems); i++)
+ {
+ err = fsw_btrfs_read_logical (vol, addr + i * sizeof (node),
+ &node, sizeof (node), rdepth + 1, depth2cache(rdepth));
+ if (err)
+ return err;
+
+ DPRINT (L"btrfs: internal node (depth %d) %lx %x %lx\n", depth,
+ node.key.object_id, node.key.type,
+ node.key.offset);
+
+ if (key_cmp (&node.key, key_in) == 0)
+ {
+ err = FSW_SUCCESS;
+ if (desc)
+ err = save_ref (desc, addr - sizeof (head), i,
+ fsw_u32_le_swap (head.nitems), 0);
+ if (err)
+ return err;
+ addr = fsw_u64_le_swap (node.addr);
+ goto reiter;
+ }
+ if (key_cmp (&node.key, key_in) > 0)
+ break;
+ node_last = node;
+ have_last = 1;
+ }
+ if (have_last)
+ {
+ err = FSW_SUCCESS;
+ if (desc)
+ err = save_ref (desc, addr - sizeof (head), i - 1,
+ fsw_u32_le_swap (head.nitems), 0);
+ if (err)
+ return err;
+ addr = fsw_u64_le_swap (node_last.addr);
+ goto reiter;
+ }
+ *outsize = 0;
+ *outaddr = 0;
+ fsw_memzero (key_out, sizeof (*key_out));
+ if (desc)
+ return save_ref (desc, addr - sizeof (head), -1,
+ fsw_u32_le_swap (head.nitems), 0);
+ return FSW_SUCCESS;
+ }
+ {
+ unsigned i;
+ struct btrfs_leaf_node leaf, leaf_last;
+ int have_last = 0;
+ for (i = 0; i < fsw_u32_le_swap (head.nitems); i++)
+ {
+ err = fsw_btrfs_read_logical (vol, addr + i * sizeof (leaf),
+ &leaf, sizeof (leaf), rdepth + 1, depth2cache(rdepth));
+ if (err)
+ return err;
+
+ DPRINT (L"btrfs: leaf (depth %d) %lx %x %lx\n", depth,
+ leaf.key.object_id, leaf.key.type, leaf.key.offset);
+
+ if (key_cmp (&leaf.key, key_in) == 0)
+ {
+ fsw_memcpy (key_out, &leaf.key, sizeof (*key_out));
+ *outsize = fsw_u32_le_swap (leaf.size);
+ *outaddr = addr + fsw_u32_le_swap (leaf.offset);
+ if (desc)
+ return save_ref (desc, addr - sizeof (head), i,
+ fsw_u32_le_swap (head.nitems), 1);
+ return FSW_SUCCESS;
+ }
+
+ if (key_cmp (&leaf.key, key_in) > 0)
+ break;
+
+ have_last = 1;
+ leaf_last = leaf;
+ }
+
+ if (have_last)
+ {
+ fsw_memcpy (key_out, &leaf_last.key, sizeof (*key_out));
+ *outsize = fsw_u32_le_swap (leaf_last.size);
+ *outaddr = addr + fsw_u32_le_swap (leaf_last.offset);
+ if (desc)
+ return save_ref (desc, addr - sizeof (head), i - 1,
+ fsw_u32_le_swap (head.nitems), 1);
+ return FSW_SUCCESS;
+ }
+ *outsize = 0;
+ *outaddr = 0;
+ fsw_memzero (key_out, sizeof (*key_out));
+ if (desc)
+ return save_ref (desc, addr - sizeof (head), -1,
+ fsw_u32_le_swap (head.nitems), 1);
+ return FSW_SUCCESS;
+ }
}
}
{
int i;
for( i = 0; i < master->n_devices_attached; i++)
- if(sb->this_device.device_id == master->devices_attached[i].id)
- return FSW_UNSUPPORTED;
+ if(sb->this_device.device_id == master->devices_attached[i].id)
+ return FSW_UNSUPPORTED;
slave = clone_dummy_volume(slave);
if(slave == NULL)
- return FSW_OUT_OF_MEMORY;
+ return FSW_OUT_OF_MEMORY;
fsw_set_blocksize(slave, master->sectorsize, master->sectorsize);
slave->bcache_size = BTRFS_INITIAL_BCACHE_SIZE;
fsw_status_t err;
if(vol->n_devices_attached >= vol->n_devices_allocated)
- return FSW_UNSUPPORTED;
+ return FSW_UNSUPPORTED;
err = btrfs_read_superblock(slave, &sb);
if(err)
- return FSW_UNSUPPORTED;
+ return FSW_UNSUPPORTED;
if(!uuid_eq(vol->uuid, sb.uuid))
- return FSW_UNSUPPORTED;
+ return FSW_UNSUPPORTED;
return btrfs_add_multi_device(vol, slave, &sb);
}
int i;
do {
- for (i = 0; i < vol->n_devices_attached; i++)
- if (id == vol->devices_attached[i].id)
- return vol->devices_attached[i].dev;
+ for (i = 0; i < vol->n_devices_attached; i++)
+ if (id == vol->devices_attached[i].id)
+ return vol->devices_attached[i].dev;
} while(vol->n_devices_attached < vol->n_devices_allocated &&
- do_rescan-- > 0 &&
- scan_disks(scan_disks_hook, &vol->g) > 0);
+ do_rescan-- > 0 &&
+ scan_disks(scan_disks_hook, &vol->g) > 0);
DPRINT(L"sub device %d not found\n", id);
return NULL;
}
static fsw_status_t fsw_btrfs_read_logical (struct fsw_btrfs_volume *vol, uint64_t addr,
- void *buf, fsw_size_t size, int rdepth, int cache_level)
+ void *buf, fsw_size_t size, int rdepth, int cache_level)
{
while (size > 0)
{
- uint8_t *ptr;
- struct btrfs_key *key;
- struct btrfs_chunk_item *chunk;
- uint64_t csize;
- fsw_status_t err = 0;
- struct btrfs_key key_out;
- int challoc = 0;
- struct btrfs_key key_in;
- fsw_size_t chsize;
- uint64_t chaddr;
-
- for (ptr = vol->bootstrap_mapping; ptr < vol->bootstrap_mapping + sizeof (vol->bootstrap_mapping) - sizeof (struct btrfs_key);)
- {
- key = (struct btrfs_key *) ptr;
- if (key->type != GRUB_BTRFS_ITEM_TYPE_CHUNK)
- break;
- chunk = (struct btrfs_chunk_item *) (key + 1);
- if (fsw_u64_le_swap (key->offset) <= addr
- && addr < fsw_u64_le_swap (key->offset)
- + fsw_u64_le_swap (chunk->size))
- {
- goto chunk_found;
- }
- ptr += sizeof (*key) + sizeof (*chunk)
- + sizeof (struct btrfs_chunk_stripe)
- * fsw_u16_le_swap (chunk->nstripes);
- }
-
- key_in.object_id = fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK);
- key_in.type = GRUB_BTRFS_ITEM_TYPE_CHUNK;
- key_in.offset = fsw_u64_le_swap (addr);
- err = lower_bound (vol, &key_in, &key_out, vol->chunk_tree, &chaddr, &chsize, NULL, rdepth);
- if (err)
- return err;
- key = &key_out;
- if (key->type != GRUB_BTRFS_ITEM_TYPE_CHUNK
- || !(fsw_u64_le_swap (key->offset) <= addr))
- {
- return FSW_VOLUME_CORRUPTED;
- }
- // "couldn't find the chunk descriptor");
-
- chunk = AllocatePool (chsize);
- if (!chunk) {
- return FSW_OUT_OF_MEMORY;
- }
-
- challoc = 1;
- err = fsw_btrfs_read_logical (vol, chaddr, chunk, chsize, rdepth, cache_level < 5 ? cache_level+1 : 5);
- if (err)
- {
- if(chunk)
- FreePool (chunk);
- return err;
- }
+ uint8_t *ptr;
+ struct btrfs_key *key;
+ struct btrfs_chunk_item *chunk;
+ uint64_t csize;
+ fsw_status_t err = 0;
+ struct btrfs_key key_out;
+ int challoc = 0;
+ struct btrfs_key key_in;
+ fsw_size_t chsize;
+ uint64_t chaddr;
+
+ for (ptr = vol->bootstrap_mapping; ptr < vol->bootstrap_mapping + sizeof (vol->bootstrap_mapping) - sizeof (struct btrfs_key);)
+ {
+ key = (struct btrfs_key *) ptr;
+ if (key->type != GRUB_BTRFS_ITEM_TYPE_CHUNK)
+ break;
+ chunk = (struct btrfs_chunk_item *) (key + 1);
+ if (fsw_u64_le_swap (key->offset) <= addr
+ && addr < fsw_u64_le_swap (key->offset)
+ + fsw_u64_le_swap (chunk->size))
+ {
+ goto chunk_found;
+ }
+ ptr += sizeof (*key) + sizeof (*chunk)
+ + sizeof (struct btrfs_chunk_stripe)
+ * fsw_u16_le_swap (chunk->nstripes);
+ }
+
+ key_in.object_id = fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK);
+ key_in.type = GRUB_BTRFS_ITEM_TYPE_CHUNK;
+ key_in.offset = fsw_u64_le_swap (addr);
+ err = lower_bound (vol, &key_in, &key_out, vol->chunk_tree, &chaddr, &chsize, NULL, rdepth);
+ if (err)
+ return err;
+ key = &key_out;
+ if (key->type != GRUB_BTRFS_ITEM_TYPE_CHUNK
+ || !(fsw_u64_le_swap (key->offset) <= addr))
+ {
+ return FSW_VOLUME_CORRUPTED;
+ }
+ // "couldn't find the chunk descriptor");
+
+ chunk = AllocatePool (chsize);
+ if (!chunk) {
+ return FSW_OUT_OF_MEMORY;
+ }
+
+ challoc = 1;
+ err = fsw_btrfs_read_logical (vol, chaddr, chunk, chsize, rdepth, cache_level < 5 ? cache_level+1 : 5);
+ if (err)
+ {
+ if(chunk)
+ FreePool (chunk);
+ return err;
+ }
chunk_found:
- {
+ {
#ifdef __MAKEWITH_GNUEFI
-#define UINTREM UINTN
+#define UINTREM UINTN
#else
#undef DivU64x32
#define DivU64x32 DivU64x32Remainder
#define UINTREM UINT32
#endif
- UINTREM stripen;
- UINTREM stripe_offset;
- uint64_t off = addr - fsw_u64_le_swap (key->offset);
- unsigned redundancy = 1;
- unsigned i, j;
-
- if (fsw_u64_le_swap (chunk->size) <= off)
- {
- return FSW_VOLUME_CORRUPTED;
- //"couldn't find the chunk descriptor");
- }
-
- DPRINT(L"btrfs chunk 0x%lx+0xlx %d stripes (%d substripes) of %lx\n",
- fsw_u64_le_swap (key->offset),
- fsw_u64_le_swap (chunk->size),
- fsw_u16_le_swap (chunk->nstripes),
- fsw_u16_le_swap (chunk->nsubstripes),
- fsw_u64_le_swap (chunk->stripe_length));
-
- /* gnu-efi has no DivU64x64Remainder, limited to DivU64x32 */
- switch (fsw_u64_le_swap (chunk->type)
- & ~GRUB_BTRFS_CHUNK_TYPE_BITS_DONTCARE)
- {
- case GRUB_BTRFS_CHUNK_TYPE_SINGLE:
- {
- uint64_t stripe_length;
-
- stripe_length = DivU64x32 (fsw_u64_le_swap (chunk->size),
- fsw_u16_le_swap (chunk->nstripes), NULL);
-
- if(stripe_length > 1UL<<30)
- return FSW_VOLUME_CORRUPTED;
-
- stripen = DivU64x32 (off, (uint32_t)stripe_length, &stripe_offset);
- csize = (stripen + 1) * stripe_length - off;
- DPRINT(L"read_logical %d chunk_found single csize=%d\n", __LINE__, csize);
- break;
- }
- case GRUB_BTRFS_CHUNK_TYPE_DUPLICATED:
- case GRUB_BTRFS_CHUNK_TYPE_RAID1:
- {
- stripen = 0;
- stripe_offset = off;
- csize = fsw_u64_le_swap (chunk->size) - off;
- redundancy = 2;
- DPRINT(L"read_logical %d chunk_found dup/raid1 off=%lx csize=%d\n", __LINE__, stripe_offset, csize);
- break;
- }
- case GRUB_BTRFS_CHUNK_TYPE_RAID0:
- {
- uint64_t stripe_length = fsw_u64_le_swap (chunk->stripe_length);
- uint64_t middle, high;
- UINTREM low;
-
- if(stripe_length > 1UL<<30)
- return FSW_VOLUME_CORRUPTED;
-
- middle = DivU64x32 (off, (uint32_t)stripe_length, &low);
-
- high = DivU64x32 (middle, fsw_u16_le_swap (chunk->nstripes), &stripen);
- stripe_offset =
- low + fsw_u64_le_swap (chunk->stripe_length) * high;
- csize = fsw_u64_le_swap (chunk->stripe_length) - low;
- DPRINT(L"read_logical %d chunk_found raid0 csize=%d\n", __LINE__, csize);
- break;
- }
- case GRUB_BTRFS_CHUNK_TYPE_RAID10:
- {
- uint64_t stripe_length = fsw_u64_le_swap (chunk->stripe_length);
- uint64_t middle, high;
- UINTREM low;
-
- if(stripe_length > 1UL<<30)
- return FSW_VOLUME_CORRUPTED;
-
- middle = DivU64x32 (off, stripe_length, &low);
-
- high = DivU64x32 (middle,
- fsw_u16_le_swap (chunk->nstripes)
- / fsw_u16_le_swap (chunk->nsubstripes),
- &stripen);
- stripen *= fsw_u16_le_swap (chunk->nsubstripes);
- redundancy = fsw_u16_le_swap (chunk->nsubstripes);
- stripe_offset = low + fsw_u64_le_swap (chunk->stripe_length)
- * high;
- csize = fsw_u64_le_swap (chunk->stripe_length) - low;
- DPRINT(L"read_logical %d chunk_found raid01 csize=%d\n", __LINE__, csize);
- break;
- }
- default:
- DPRINT (L"btrfs: unsupported RAID\n");
- return FSW_UNSUPPORTED;
- }
- if (csize == 0)
- //"couldn't find the chunk descriptor");
- return FSW_VOLUME_CORRUPTED;
-
- if (csize > (uint64_t) size)
- csize = size;
-
- for (j = 0; j < 2; j++)
- {
- for (i = 0; i < redundancy; i++)
- {
- struct btrfs_chunk_stripe *stripe;
- uint64_t paddr;
- struct fsw_volume *dev;
-
- stripe = (struct btrfs_chunk_stripe *) (chunk + 1);
- /* Right now the redundancy handling is easy.
- With RAID5-like it will be more difficult. */
- stripe += stripen + i;
-
- paddr = fsw_u64_le_swap (stripe->offset) + stripe_offset;
-
- DPRINT (L"btrfs: chunk 0x%lx+0x%lx (%d stripes (%d substripes) of %lx) stripe %lx maps to 0x%lx\n",
- fsw_u64_le_swap (key->offset),
- fsw_u64_le_swap (chunk->size),
- fsw_u16_le_swap (chunk->nstripes),
- fsw_u16_le_swap (chunk->nsubstripes),
- fsw_u64_le_swap (chunk->stripe_length),
- stripen, stripe->offset);
- DPRINT (L"btrfs: reading paddr 0x%lx for laddr 0x%lx\n", paddr, addr);
-
- dev = find_device (vol, stripe->device_id, j);
- if (!dev)
- {
- err = FSW_VOLUME_CORRUPTED;
- continue;
- }
-
- uint32_t off = paddr & (vol->sectorsize - 1);
- paddr >>= vol->sectorshift;
- uint64_t n = 0;
- while(n < csize) {
- char *buffer;
- err = fsw_block_get(dev, paddr, cache_level, (void **)&buffer);
- if(err)
- break;
- int s = vol->sectorsize - off;
- if(s > csize - n)
- s = csize - n;
- fsw_memcpy(buf+n, buffer+off, s);
- fsw_block_release(dev, paddr, (void *)buffer);
-
- n += s;
- off = 0;
- paddr++;
- }
- DPRINT (L"read logical: err %d csize %d got %d\n",
- err, csize, n);
- if(n>=csize)
- break;
- }
- if (i != redundancy)
- break;
- }
- if (err)
- return err;
- }
- size -= csize;
- buf = (uint8_t *) buf + csize;
- addr += csize;
- if (challoc && chunk)
- FreePool (chunk);
+ UINTREM stripen;
+ UINTREM stripe_offset;
+ uint64_t off = addr - fsw_u64_le_swap (key->offset);
+ unsigned redundancy = 1;
+ unsigned i, j;
+
+ if (fsw_u64_le_swap (chunk->size) <= off)
+ {
+ return FSW_VOLUME_CORRUPTED;
+ //"couldn't find the chunk descriptor");
+ }
+
+ DPRINT(L"btrfs chunk 0x%lx+0xlx %d stripes (%d substripes) of %lx\n",
+ fsw_u64_le_swap (key->offset),
+ fsw_u64_le_swap (chunk->size),
+ fsw_u16_le_swap (chunk->nstripes),
+ fsw_u16_le_swap (chunk->nsubstripes),
+ fsw_u64_le_swap (chunk->stripe_length));
+
+ /* gnu-efi has no DivU64x64Remainder, limited to DivU64x32 */
+ switch (fsw_u64_le_swap (chunk->type)
+ & ~GRUB_BTRFS_CHUNK_TYPE_BITS_DONTCARE)
+ {
+ case GRUB_BTRFS_CHUNK_TYPE_SINGLE:
+ {
+ uint64_t stripe_length;
+
+ stripe_length = DivU64x32 (fsw_u64_le_swap (chunk->size),
+ fsw_u16_le_swap (chunk->nstripes), NULL);
+
+ if(stripe_length > 1UL<<30)
+ return FSW_VOLUME_CORRUPTED;
+
+ stripen = DivU64x32 (off, (uint32_t)stripe_length, &stripe_offset);
+ csize = (stripen + 1) * stripe_length - off;
+ DPRINT(L"read_logical %d chunk_found single csize=%d\n", __LINE__, csize);
+ break;
+ }
+ case GRUB_BTRFS_CHUNK_TYPE_DUPLICATED:
+ case GRUB_BTRFS_CHUNK_TYPE_RAID1:
+ {
+ stripen = 0;
+ stripe_offset = off;
+ csize = fsw_u64_le_swap (chunk->size) - off;
+ redundancy = 2;
+ DPRINT(L"read_logical %d chunk_found dup/raid1 off=%lx csize=%d\n", __LINE__, stripe_offset, csize);
+ break;
+ }
+ case GRUB_BTRFS_CHUNK_TYPE_RAID0:
+ {
+ uint64_t stripe_length = fsw_u64_le_swap (chunk->stripe_length);
+ uint64_t middle, high;
+ UINTREM low;
+
+ if(stripe_length > 1UL<<30)
+ return FSW_VOLUME_CORRUPTED;
+
+ middle = DivU64x32 (off, (uint32_t)stripe_length, &low);
+
+ high = DivU64x32 (middle, fsw_u16_le_swap (chunk->nstripes), &stripen);
+ stripe_offset =
+ low + fsw_u64_le_swap (chunk->stripe_length) * high;
+ csize = fsw_u64_le_swap (chunk->stripe_length) - low;
+ DPRINT(L"read_logical %d chunk_found raid0 csize=%d\n", __LINE__, csize);
+ break;
+ }
+ case GRUB_BTRFS_CHUNK_TYPE_RAID10:
+ {
+ uint64_t stripe_length = fsw_u64_le_swap (chunk->stripe_length);
+ uint64_t middle, high;
+ UINTREM low;
+
+ if(stripe_length > 1UL<<30)
+ return FSW_VOLUME_CORRUPTED;
+
+ middle = DivU64x32 (off, stripe_length, &low);
+
+ high = DivU64x32 (middle,
+ fsw_u16_le_swap (chunk->nstripes)
+ / fsw_u16_le_swap (chunk->nsubstripes),
+ &stripen);
+ stripen *= fsw_u16_le_swap (chunk->nsubstripes);
+ redundancy = fsw_u16_le_swap (chunk->nsubstripes);
+ stripe_offset = low + fsw_u64_le_swap (chunk->stripe_length)
+ * high;
+ csize = fsw_u64_le_swap (chunk->stripe_length) - low;
+ DPRINT(L"read_logical %d chunk_found raid01 csize=%d\n", __LINE__, csize);
+ break;
+ }
+ default:
+ DPRINT (L"btrfs: unsupported RAID\n");
+ return FSW_UNSUPPORTED;
+ }
+ if (csize == 0)
+ //"couldn't find the chunk descriptor");
+ return FSW_VOLUME_CORRUPTED;
+
+ if (csize > (uint64_t) size)
+ csize = size;
+
+ for (j = 0; j < 2; j++)
+ {
+ for (i = 0; i < redundancy; i++)
+ {
+ struct btrfs_chunk_stripe *stripe;
+ uint64_t paddr;
+ struct fsw_volume *dev;
+
+ stripe = (struct btrfs_chunk_stripe *) (chunk + 1);
+ /* Right now the redundancy handling is easy.
+ With RAID5-like it will be more difficult. */
+ stripe += stripen + i;
+
+ paddr = fsw_u64_le_swap (stripe->offset) + stripe_offset;
+
+ DPRINT (L"btrfs: chunk 0x%lx+0x%lx (%d stripes (%d substripes) of %lx) stripe %lx maps to 0x%lx\n",
+ fsw_u64_le_swap (key->offset),
+ fsw_u64_le_swap (chunk->size),
+ fsw_u16_le_swap (chunk->nstripes),
+ fsw_u16_le_swap (chunk->nsubstripes),
+ fsw_u64_le_swap (chunk->stripe_length),
+ stripen, stripe->offset);
+ DPRINT (L"btrfs: reading paddr 0x%lx for laddr 0x%lx\n", paddr, addr);
+
+ dev = find_device (vol, stripe->device_id, j);
+ if (!dev)
+ {
+ err = FSW_VOLUME_CORRUPTED;
+ continue;
+ }
+
+ uint32_t off = paddr & (vol->sectorsize - 1);
+ paddr >>= vol->sectorshift;
+ uint64_t n = 0;
+ while(n < csize) {
+ char *buffer;
+ err = fsw_block_get(dev, paddr, cache_level, (void **)&buffer);
+ if(err)
+ break;
+ int s = vol->sectorsize - off;
+ if(s > csize - n)
+ s = csize - n;
+ fsw_memcpy(buf+n, buffer+off, s);
+ fsw_block_release(dev, paddr, (void *)buffer);
+
+ n += s;
+ off = 0;
+ paddr++;
+ }
+ DPRINT (L"read logical: err %d csize %d got %d\n",
+ err, csize, n);
+ if(n>=csize)
+ break;
+ }
+ if (i != redundancy)
+ break;
+ }
+ if (err)
+ return err;
+ }
+ size -= csize;
+ buf = (uint8_t *) buf + csize;
+ addr += csize;
+ if (challoc && chunk)
+ FreePool (chunk);
}
return FSW_SUCCESS;
}
err = btrfs_read_superblock (volg, &sblock);
if (err)
- return err;
+ return err;
btrfs_set_superblock_info(vol, &sblock);
if(vol->sectorshift == 0)
- return FSW_UNSUPPORTED;
+ return FSW_UNSUPPORTED;
if(vol->num_devices >= BTRFS_MAX_NUM_DEVICES)
- return FSW_UNSUPPORTED;
+ return FSW_UNSUPPORTED;
vol->is_master = master_uuid_add(vol, &master_out);
/* already mounted via other device */
if(vol->is_master == 0) {
#define FAKE_LABEL "btrfs.multi.device"
- s.type = FSW_STRING_TYPE_UTF8;
- s.size = s.len = sizeof(FAKE_LABEL)-1;
- s.data = FAKE_LABEL;
- err = fsw_strdup_coerce(&volg->label, volg->host_string_type, &s);
- if (err)
- return err;
- btrfs_add_multi_device(master_out, volg, &sblock);
- /* create fake root */
- return fsw_dnode_create_root_with_tree(volg, 0, 0, &volg->root);
+ s.type = FSW_STRING_TYPE_UTF8;
+ s.size = s.len = sizeof(FAKE_LABEL)-1;
+ s.data = FAKE_LABEL;
+ err = fsw_strdup_coerce(&volg->label, volg->host_string_type, &s);
+ if (err)
+ return err;
+ btrfs_add_multi_device(master_out, volg, &sblock);
+ /* create fake root */
+ return fsw_dnode_create_root_with_tree(volg, 0, 0, &volg->root);
}
fsw_set_blocksize(volg, vol->sectorsize, vol->sectorsize);
vol->g.bcache_size = BTRFS_INITIAL_BCACHE_SIZE;
vol->n_devices_allocated = vol->num_devices;
vol->devices_attached = AllocatePool (sizeof (vol->devices_attached[0])
- * vol->n_devices_allocated);
+ * vol->n_devices_allocated);
if (!vol->devices_attached)
- return FSW_OUT_OF_MEMORY;
+ return FSW_OUT_OF_MEMORY;
vol->n_devices_attached = 1;
vol->devices_attached[0].dev = volg;
vol->devices_attached[0].id = sblock.this_device.device_id;
for (i = 0; i < 0x100; i++)
- if (sblock.label[i] == 0)
- break;
+ if (sblock.label[i] == 0)
+ break;
s.type = FSW_STRING_TYPE_UTF8;
s.size = s.len = i;
s.data = sblock.label;
err = fsw_strdup_coerce(&volg->label, volg->host_string_type, &s);
if (err) {
- FreePool (vol->devices_attached);
- vol->devices_attached = NULL;
- return err;
+ FreePool (vol->devices_attached);
+ vol->devices_attached = NULL;
+ return err;
}
err = fsw_btrfs_get_default_root(vol, sblock.root_dir_objectid);
if (err) {
- DPRINT(L"root not found\n");
- FreePool (vol->devices_attached);
- vol->devices_attached = NULL;
- return err;
+ DPRINT(L"root not found\n");
+ FreePool (vol->devices_attached);
+ vol->devices_attached = NULL;
+ return err;
}
return FSW_SUCCESS;
struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
if (vol==NULL)
- return;
+ return;
if (vol->is_master)
- master_uuid_remove(vol);
+ master_uuid_remove(vol);
/* The device 0 is closed one layer upper. */
for (i = 1; i < vol->n_devices_attached; i++)
- fsw_unmount (vol->devices_attached[i].dev);
+ fsw_unmount (vol->devices_attached[i].dev);
if(vol->devices_attached)
- FreePool (vol->devices_attached);
+ FreePool (vol->devices_attached);
if(vol->extent)
- FreePool (vol->extent);
+ FreePool (vol->extent);
}
static fsw_status_t fsw_btrfs_volume_stat(struct fsw_volume *volg, struct fsw_volume_stat *sb)
}
static fsw_status_t fsw_btrfs_read_inode (struct fsw_btrfs_volume *vol,
- struct btrfs_inode *inode, uint64_t num,
- uint64_t tree)
+ struct btrfs_inode *inode, uint64_t num,
+ uint64_t tree)
{
struct btrfs_key key_in, key_out;
uint64_t elemaddr;
err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, NULL, 0);
if (err)
- return err;
+ return err;
if (num != key_out.object_id
- || key_out.type != GRUB_BTRFS_ITEM_TYPE_INODE_ITEM)
- return FSW_NOT_FOUND;
+ || key_out.type != GRUB_BTRFS_ITEM_TYPE_INODE_ITEM)
+ return FSW_NOT_FOUND;
return fsw_btrfs_read_logical (vol, elemaddr, inode, sizeof (*inode), 0, 2);
}
/* slave device got empty root */
if (!vol->is_master) {
- dno->g.size = 0;
- dno->g.type = FSW_DNODE_TYPE_DIR;
- return FSW_SUCCESS;
+ dno->g.size = 0;
+ dno->g.type = FSW_DNODE_TYPE_DIR;
+ return FSW_SUCCESS;
}
if (dno->raw)
- return FSW_SUCCESS;
+ return FSW_SUCCESS;
dno->raw = AllocatePool(sizeof(struct btrfs_inode));
if(dno->raw == NULL)
- return FSW_OUT_OF_MEMORY;
+ return FSW_OUT_OF_MEMORY;
err = fsw_btrfs_read_inode(vol, dno->raw, dno->g.dnode_id, dno->g.tree_id);
if (err) {
- FreePool(dno->raw);
- dno->raw = NULL;
- return err;
+ FreePool(dno->raw);
+ dno->raw = NULL;
+ return err;
}
// get info from the inode
// TODO: check docs for 64-bit sized files
mode = fsw_u32_le_swap(dno->raw->mode);
if (S_ISREG(mode))
- dno->g.type = FSW_DNODE_TYPE_FILE;
+ dno->g.type = FSW_DNODE_TYPE_FILE;
else if (S_ISDIR(mode))
- dno->g.type = FSW_DNODE_TYPE_DIR;
+ dno->g.type = FSW_DNODE_TYPE_DIR;
else if (S_ISLNK(mode))
- dno->g.type = FSW_DNODE_TYPE_SYMLINK;
+ dno->g.type = FSW_DNODE_TYPE_SYMLINK;
else
- dno->g.type = FSW_DNODE_TYPE_SPECIAL;
+ dno->g.type = FSW_DNODE_TYPE_SPECIAL;
return FSW_SUCCESS;
}
{
struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
if (dno->raw)
- FreePool(dno->raw);
+ FreePool(dno->raw);
}
static fsw_status_t fsw_btrfs_dnode_stat(struct fsw_volume *volg, struct fsw_dnode *dnog, struct fsw_dnode_stat *sb)
/* slave device got empty root */
if(dno->raw == NULL) {
- sb->used_bytes = 0;
- sb->store_time_posix(sb, FSW_DNODE_STAT_CTIME, 0);
- sb->store_time_posix(sb, FSW_DNODE_STAT_ATIME, 0);
- sb->store_time_posix(sb, FSW_DNODE_STAT_MTIME, 0);
- return FSW_SUCCESS;
+ sb->used_bytes = 0;
+ sb->store_time_posix(sb, FSW_DNODE_STAT_CTIME, 0);
+ sb->store_time_posix(sb, FSW_DNODE_STAT_ATIME, 0);
+ sb->store_time_posix(sb, FSW_DNODE_STAT_MTIME, 0);
+ return FSW_SUCCESS;
}
sb->used_bytes = fsw_u64_le_swap(dno->raw->nbytes);
sb->store_time_posix(sb, FSW_DNODE_STAT_ATIME,
- fsw_u64_le_swap(dno->raw->atime.sec));
+ fsw_u64_le_swap(dno->raw->atime.sec));
sb->store_time_posix(sb, FSW_DNODE_STAT_CTIME,
- fsw_u64_le_swap(dno->raw->ctime.sec));
+ fsw_u64_le_swap(dno->raw->ctime.sec));
sb->store_time_posix(sb, FSW_DNODE_STAT_MTIME,
- fsw_u64_le_swap(dno->raw->mtime.sec));
+ fsw_u64_le_swap(dno->raw->mtime.sec));
sb->store_attr_posix(sb, fsw_u32_le_swap(dno->raw->mode));
return FSW_SUCCESS;
}
static fsw_ssize_t grub_btrfs_lzo_decompress(char *ibuf, fsw_size_t isize, grub_off_t off,
- char *obuf, fsw_size_t osize)
+ char *obuf, fsw_size_t osize)
{
uint32_t total_size, cblock_size;
fsw_size_t ret = 0;
ibuf += sizeof (total_size);
if (isize < total_size)
- return -1;
+ return -1;
/* Jump forward to first block with requested data. */
while (off >= GRUB_BTRFS_LZO_BLOCK_SIZE)
{
- /* Don't let following uint32_t cross the page boundary. */
- if (((ibuf - ibuf0) & 0xffc) == 0xffc)
- ibuf = ((ibuf - ibuf0 + 3) & ~3) + ibuf0;
+ /* Don't let following uint32_t cross the page boundary. */
+ if (((ibuf - ibuf0) & 0xffc) == 0xffc)
+ ibuf = ((ibuf - ibuf0 + 3) & ~3) + ibuf0;
- cblock_size = fsw_u32_le_swap (fsw_get_unaligned32 (ibuf));
- ibuf += sizeof (cblock_size);
+ cblock_size = fsw_u32_le_swap (fsw_get_unaligned32 (ibuf));
+ ibuf += sizeof (cblock_size);
- if (cblock_size > GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE)
- return -1;
+ if (cblock_size > GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE)
+ return -1;
- off -= GRUB_BTRFS_LZO_BLOCK_SIZE;
- ibuf += cblock_size;
+ off -= GRUB_BTRFS_LZO_BLOCK_SIZE;
+ ibuf += cblock_size;
}
while (osize > 0)
{
- lzo_uint usize = GRUB_BTRFS_LZO_BLOCK_SIZE;
+ lzo_uint usize = GRUB_BTRFS_LZO_BLOCK_SIZE;
- /* Don't let following uint32_t cross the page boundary. */
- if (((ibuf - ibuf0) & 0xffc) == 0xffc)
- ibuf = ((ibuf - ibuf0 + 3) & ~3) + ibuf0;
+ /* Don't let following uint32_t cross the page boundary. */
+ if (((ibuf - ibuf0) & 0xffc) == 0xffc)
+ ibuf = ((ibuf - ibuf0 + 3) & ~3) + ibuf0;
- cblock_size = fsw_u32_le_swap (fsw_get_unaligned32 (ibuf));
- ibuf += sizeof (cblock_size);
+ cblock_size = fsw_u32_le_swap (fsw_get_unaligned32 (ibuf));
+ ibuf += sizeof (cblock_size);
- if (cblock_size > GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE)
- return -1;
+ if (cblock_size > GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE)
+ return -1;
- /* Block partially filled with requested data. */
- if (off > 0 || osize < GRUB_BTRFS_LZO_BLOCK_SIZE)
- {
- fsw_size_t to_copy = GRUB_BTRFS_LZO_BLOCK_SIZE - off;
+ /* Block partially filled with requested data. */
+ if (off > 0 || osize < GRUB_BTRFS_LZO_BLOCK_SIZE)
+ {
+ fsw_size_t to_copy = GRUB_BTRFS_LZO_BLOCK_SIZE - off;
- if (to_copy > osize)
- to_copy = osize;
+ if (to_copy > osize)
+ to_copy = osize;
- if (lzo1x_decompress_safe ((lzo_bytep)ibuf, cblock_size, (lzo_bytep)buf, &usize, NULL) != 0)
- return -1;
+ if (lzo1x_decompress_safe ((lzo_bytep)ibuf, cblock_size, (lzo_bytep)buf, &usize, NULL) != 0)
+ return -1;
- if (to_copy > usize)
- to_copy = usize;
- fsw_memcpy(obuf, buf + off, to_copy);
+ if (to_copy > usize)
+ to_copy = usize;
+ fsw_memcpy(obuf, buf + off, to_copy);
- osize -= to_copy;
- ret += to_copy;
- obuf += to_copy;
- ibuf += cblock_size;
- off = 0;
- continue;
- }
+ osize -= to_copy;
+ ret += to_copy;
+ obuf += to_copy;
+ ibuf += cblock_size;
+ off = 0;
+ continue;
+ }
- /* Decompress whole block directly to output buffer. */
- if (lzo1x_decompress_safe ((lzo_bytep)ibuf, cblock_size, (lzo_bytep)obuf, &usize, NULL) != 0)
- return -1;
+ /* Decompress whole block directly to output buffer. */
+ if (lzo1x_decompress_safe ((lzo_bytep)ibuf, cblock_size, (lzo_bytep)obuf, &usize, NULL) != 0)
+ return -1;
- osize -= usize;
- ret += usize;
- obuf += usize;
- ibuf += cblock_size;
+ osize -= usize;
+ ret += usize;
+ obuf += usize;
+ ibuf += cblock_size;
}
return ret;
}
static fsw_status_t fsw_btrfs_get_extent(struct fsw_volume *volg, struct fsw_dnode *dnog,
- struct fsw_extent *extent)
+ struct fsw_extent *extent)
{
struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
uint64_t ino = dnog->dnode_id;
/* slave device got empty root */
if (!vol->is_master)
- return FSW_NOT_FOUND;
+ return FSW_NOT_FOUND;
if (!vol->extent || vol->extstart > pos || vol->extino != ino
- || vol->exttree != tree || vol->extend <= pos)
+ || vol->exttree != tree || vol->extend <= pos)
{
- struct btrfs_key key_in, key_out;
- uint64_t elemaddr;
- fsw_size_t elemsize;
-
- if(vol->extent) {
- FreePool (vol->extent);
- vol->extent = NULL;
- }
- key_in.object_id = ino;
- key_in.type = GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM;
- key_in.offset = fsw_u64_le_swap (pos);
- err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, NULL, 0);
- if (err)
- return FSW_VOLUME_CORRUPTED;
- if (key_out.object_id != ino
- || key_out.type != GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM)
- {
- return FSW_VOLUME_CORRUPTED;
- }
- if ((fsw_ssize_t) elemsize < ((char *) &vol->extent->inl
- - (char *) vol->extent))
- {
- return FSW_VOLUME_CORRUPTED;
- }
- vol->extstart = fsw_u64_le_swap (key_out.offset);
- vol->extsize = elemsize;
- vol->extent = AllocatePool (elemsize);
- vol->extino = ino;
- vol->exttree = tree;
- if (!vol->extent)
- return FSW_OUT_OF_MEMORY;
-
- err = fsw_btrfs_read_logical (vol, elemaddr, vol->extent, elemsize, 0, 1);
- if (err)
- return err;
-
- vol->extend = vol->extstart + fsw_u64_le_swap (vol->extent->size);
- if (vol->extent->type == GRUB_BTRFS_EXTENT_REGULAR
- && (char *) &vol->extent + elemsize
- >= (char *) &vol->extent->filled + sizeof (vol->extent->filled))
- vol->extend =
- vol->extstart + fsw_u64_le_swap (vol->extent->filled);
-
- DPRINT (L"btrfs: %lx +0x%lx\n", fsw_u64_le_swap (key_out.offset), fsw_u64_le_swap (vol->extent->size));
- if (vol->extend <= pos)
- {
- return FSW_VOLUME_CORRUPTED;
- }
+ struct btrfs_key key_in, key_out;
+ uint64_t elemaddr;
+ fsw_size_t elemsize;
+
+ if(vol->extent) {
+ FreePool (vol->extent);
+ vol->extent = NULL;
+ }
+ key_in.object_id = ino;
+ key_in.type = GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM;
+ key_in.offset = fsw_u64_le_swap (pos);
+ err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, NULL, 0);
+ if (err)
+ return FSW_VOLUME_CORRUPTED;
+ if (key_out.object_id != ino
+ || key_out.type != GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM)
+ {
+ return FSW_VOLUME_CORRUPTED;
+ }
+ if ((fsw_ssize_t) elemsize < ((char *) &vol->extent->inl
+ - (char *) vol->extent))
+ {
+ return FSW_VOLUME_CORRUPTED;
+ }
+ vol->extstart = fsw_u64_le_swap (key_out.offset);
+ vol->extsize = elemsize;
+ vol->extent = AllocatePool (elemsize);
+ vol->extino = ino;
+ vol->exttree = tree;
+ if (!vol->extent)
+ return FSW_OUT_OF_MEMORY;
+
+ err = fsw_btrfs_read_logical (vol, elemaddr, vol->extent, elemsize, 0, 1);
+ if (err)
+ return err;
+
+ vol->extend = vol->extstart + fsw_u64_le_swap (vol->extent->size);
+ if (vol->extent->type == GRUB_BTRFS_EXTENT_REGULAR
+ && (char *) &vol->extent + elemsize
+ >= (char *) &vol->extent->filled + sizeof (vol->extent->filled))
+ vol->extend =
+ vol->extstart + fsw_u64_le_swap (vol->extent->filled);
+
+ DPRINT (L"btrfs: %lx +0x%lx\n", fsw_u64_le_swap (key_out.offset), fsw_u64_le_swap (vol->extent->size));
+ if (vol->extend <= pos)
+ {
+ return FSW_VOLUME_CORRUPTED;
+ }
}
csize = vol->extend - pos;
if (vol->extent->encryption ||vol->extent->encoding)
{
- return FSW_UNSUPPORTED;
+ return FSW_UNSUPPORTED;
}
switch(vol->extent->compression) {
- case GRUB_BTRFS_COMPRESSION_LZO:
- case GRUB_BTRFS_COMPRESSION_ZLIB:
- case GRUB_BTRFS_COMPRESSION_NONE:
- break;
- default:
- return FSW_UNSUPPORTED;
+ case GRUB_BTRFS_COMPRESSION_LZO:
+ case GRUB_BTRFS_COMPRESSION_ZLIB:
+ case GRUB_BTRFS_COMPRESSION_NONE:
+ break;
+ default:
+ return FSW_UNSUPPORTED;
}
count = ( csize + vol->sectorsize - 1) >> vol->sectorshift;
switch (vol->extent->type)
{
- case GRUB_BTRFS_EXTENT_INLINE:
- buf = AllocatePool( count << vol->sectorshift);
- if(!buf)
- return FSW_OUT_OF_MEMORY;
- if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_ZLIB)
- {
- if (grub_zlib_decompress (vol->extent->inl, vol->extsize -
- ((uint8_t *) vol->extent->inl
- - (uint8_t *) vol->extent),
- extoff, buf, csize)
- != (fsw_ssize_t) csize)
- {
- FreePool(buf);
- return FSW_VOLUME_CORRUPTED;
- }
- }
- else if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_LZO)
- {
- if (grub_btrfs_lzo_decompress(vol->extent->inl, vol->extsize -
- ((uint8_t *) vol->extent->inl
- - (uint8_t *) vol->extent),
- extoff, buf, csize)
- != (fsw_ssize_t) csize)
- {
- FreePool(buf);
- return -FSW_VOLUME_CORRUPTED;
- }
- }
- else
- fsw_memcpy (buf, vol->extent->inl + extoff, csize);
- break;
-
- case GRUB_BTRFS_EXTENT_REGULAR:
- if (!vol->extent->laddr)
- break;
-
- if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_NONE)
- {
- if( count > 64 ) {
- count = 64;
- csize = count << vol->sectorshift;
- }
- buf = AllocatePool( count << vol->sectorshift);
- if(!buf)
- return FSW_OUT_OF_MEMORY;
- err = fsw_btrfs_read_logical (vol,
- fsw_u64_le_swap (vol->extent->laddr)
- + fsw_u64_le_swap (vol->extent->offset)
- + extoff, buf, csize, 0, 0);
- if (err) {
- FreePool(buf);
- return err;
- }
- break;
- }
- if (vol->extent->compression != GRUB_BTRFS_COMPRESSION_NONE)
- {
- char *tmp;
- uint64_t zsize;
- fsw_ssize_t ret;
-
- zsize = fsw_u64_le_swap (vol->extent->compressed_size);
- tmp = AllocatePool (zsize);
- if (!tmp)
- return -FSW_OUT_OF_MEMORY;
- err = fsw_btrfs_read_logical (vol, fsw_u64_le_swap (vol->extent->laddr), tmp, zsize, 0, 0);
- if (err)
- {
- FreePool (tmp);
- return -FSW_VOLUME_CORRUPTED;
- }
-
- buf = AllocatePool( count << vol->sectorshift);
- if(!buf) {
- FreePool(tmp);
- return FSW_OUT_OF_MEMORY;
- }
-
- if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_ZLIB)
- {
- ret = grub_zlib_decompress (tmp, zsize, extoff
- + fsw_u64_le_swap (vol->extent->offset),
- buf, csize);
- }
- else if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_LZO)
- ret = grub_btrfs_lzo_decompress (tmp, zsize, extoff
- + fsw_u64_le_swap (vol->extent->offset),
- buf, csize);
- else
- ret = -1;
-
- FreePool (tmp);
-
- if (ret != (fsw_ssize_t) csize) {
- FreePool(tmp);
- return -FSW_VOLUME_CORRUPTED;
- }
-
- break;
- }
- break;
- default:
- return -FSW_VOLUME_CORRUPTED;
+ case GRUB_BTRFS_EXTENT_INLINE:
+ buf = AllocatePool( count << vol->sectorshift);
+ if(!buf)
+ return FSW_OUT_OF_MEMORY;
+ if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_ZLIB)
+ {
+ if (grub_zlib_decompress (vol->extent->inl, vol->extsize -
+ ((uint8_t *) vol->extent->inl
+ - (uint8_t *) vol->extent),
+ extoff, buf, csize)
+ != (fsw_ssize_t) csize)
+ {
+ FreePool(buf);
+ return FSW_VOLUME_CORRUPTED;
+ }
+ }
+ else if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_LZO)
+ {
+ if (grub_btrfs_lzo_decompress(vol->extent->inl, vol->extsize -
+ ((uint8_t *) vol->extent->inl
+ - (uint8_t *) vol->extent),
+ extoff, buf, csize)
+ != (fsw_ssize_t) csize)
+ {
+ FreePool(buf);
+ return -FSW_VOLUME_CORRUPTED;
+ }
+ }
+ else
+ fsw_memcpy (buf, vol->extent->inl + extoff, csize);
+ break;
+
+ case GRUB_BTRFS_EXTENT_REGULAR:
+ if (!vol->extent->laddr)
+ break;
+
+ if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_NONE)
+ {
+ if( count > 64 ) {
+ count = 64;
+ csize = count << vol->sectorshift;
+ }
+ buf = AllocatePool( count << vol->sectorshift);
+ if(!buf)
+ return FSW_OUT_OF_MEMORY;
+ err = fsw_btrfs_read_logical (vol,
+ fsw_u64_le_swap (vol->extent->laddr)
+ + fsw_u64_le_swap (vol->extent->offset)
+ + extoff, buf, csize, 0, 0);
+ if (err) {
+ FreePool(buf);
+ return err;
+ }
+ break;
+ }
+ if (vol->extent->compression != GRUB_BTRFS_COMPRESSION_NONE)
+ {
+ char *tmp;
+ uint64_t zsize;
+ fsw_ssize_t ret;
+
+ zsize = fsw_u64_le_swap (vol->extent->compressed_size);
+ tmp = AllocatePool (zsize);
+ if (!tmp)
+ return -FSW_OUT_OF_MEMORY;
+ err = fsw_btrfs_read_logical (vol, fsw_u64_le_swap (vol->extent->laddr), tmp, zsize, 0, 0);
+ if (err)
+ {
+ FreePool (tmp);
+ return -FSW_VOLUME_CORRUPTED;
+ }
+
+ buf = AllocatePool( count << vol->sectorshift);
+ if(!buf) {
+ FreePool(tmp);
+ return FSW_OUT_OF_MEMORY;
+ }
+
+ if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_ZLIB)
+ {
+ ret = grub_zlib_decompress (tmp, zsize, extoff
+ + fsw_u64_le_swap (vol->extent->offset),
+ buf, csize);
+ }
+ else if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_LZO)
+ ret = grub_btrfs_lzo_decompress (tmp, zsize, extoff
+ + fsw_u64_le_swap (vol->extent->offset),
+ buf, csize);
+ else
+ ret = -1;
+
+ FreePool (tmp);
+
+ if (ret != (fsw_ssize_t) csize) {
+ FreePool(tmp);
+ return -FSW_VOLUME_CORRUPTED;
+ }
+
+ break;
+ }
+ break;
+ default:
+ return -FSW_VOLUME_CORRUPTED;
}
extent->log_count = count;
if(buf) {
- if(csize < (count << vol->sectorshift))
- fsw_memzero( buf + csize, (count << vol->sectorshift) - csize);
- extent->buffer = buf;
- extent->type = FSW_EXTENT_TYPE_BUFFER;
+ if(csize < (count << vol->sectorshift))
+ fsw_memzero( buf + csize, (count << vol->sectorshift) - csize);
+ extent->buffer = buf;
+ extent->type = FSW_EXTENT_TYPE_BUFFER;
} else {
- extent->buffer = NULL;
- extent->type = FSW_EXTENT_TYPE_SPARSE;
+ extent->buffer = NULL;
+ extent->type = FSW_EXTENT_TYPE_SPARSE;
}
return FSW_SUCCESS;
}
static fsw_status_t fsw_btrfs_readlink(struct fsw_volume *volg, struct fsw_dnode *dnog,
- struct fsw_string *link_target)
+ struct fsw_string *link_target)
{
struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
char *tmp;
if (dno->g.size > FSW_PATH_MAX)
- return FSW_VOLUME_CORRUPTED;
+ return FSW_VOLUME_CORRUPTED;
tmp = AllocatePool(dno->g.size);
if(!tmp)
- return FSW_OUT_OF_MEMORY;
+ return FSW_OUT_OF_MEMORY;
i = 0;
do {
- struct fsw_extent extent;
- int size;
- extent.log_start = i;
- status = fsw_btrfs_get_extent(volg, dnog, &extent);
- if(status || extent.type != FSW_EXTENT_TYPE_BUFFER) {
- FreePool(tmp);
- if(extent.buffer)
- FreePool(extent.buffer);
- return FSW_VOLUME_CORRUPTED;
- }
- size = extent.log_count << vol->sectorshift;
- if(size > (dno->g.size - (i<<vol->sectorshift)))
- size = dno->g.size - (i<<vol->sectorshift);
- fsw_memcpy(tmp + (i<<vol->sectorshift), extent.buffer, size);
- FreePool(extent.buffer);
- i += extent.log_count;
+ struct fsw_extent extent;
+ int size;
+ extent.log_start = i;
+ status = fsw_btrfs_get_extent(volg, dnog, &extent);
+ if(status || extent.type != FSW_EXTENT_TYPE_BUFFER) {
+ FreePool(tmp);
+ if(extent.buffer)
+ FreePool(extent.buffer);
+ return FSW_VOLUME_CORRUPTED;
+ }
+ size = extent.log_count << vol->sectorshift;
+ if(size > (dno->g.size - (i<<vol->sectorshift)))
+ size = dno->g.size - (i<<vol->sectorshift);
+ fsw_memcpy(tmp + (i<<vol->sectorshift), extent.buffer, size);
+ FreePool(extent.buffer);
+ i += extent.log_count;
} while( (i << vol->sectorshift) < dno->g.size);
s.type = FSW_STRING_TYPE_UTF8;
}
static fsw_status_t fsw_btrfs_lookup_dir_item(struct fsw_btrfs_volume *vol,
- uint64_t tree_id, uint64_t object_id,
- struct fsw_string *lookup_name,
- struct btrfs_dir_item **direl_buf,
- struct btrfs_dir_item **direl_out
- )
+ uint64_t tree_id, uint64_t object_id,
+ struct fsw_string *lookup_name,
+ struct btrfs_dir_item **direl_buf,
+ struct btrfs_dir_item **direl_out
+ )
{
uint64_t elemaddr;
fsw_size_t elemsize;
err = lower_bound (vol, &key, &key_out, tree_id, &elemaddr, &elemsize, NULL, 0);
if (err)
- return err;
+ return err;
if (key_cmp (&key, &key_out) != 0)
- return FSW_NOT_FOUND;
+ return FSW_NOT_FOUND;
if (elemsize > allocated)
{
- allocated = 2 * elemsize;
- if(*direl_buf)
- FreePool (*direl_buf);
- *direl_buf = AllocatePool (allocated + 1);
- if (!*direl_buf)
- return FSW_OUT_OF_MEMORY;
+ allocated = 2 * elemsize;
+ if(*direl_buf)
+ FreePool (*direl_buf);
+ *direl_buf = AllocatePool (allocated + 1);
+ if (!*direl_buf)
+ return FSW_OUT_OF_MEMORY;
}
err = fsw_btrfs_read_logical (vol, elemaddr, *direl_buf, elemsize, 0, 1);
if (err)
- return err;
+ return err;
for (cdirel = *direl_buf;
- (uint8_t *) cdirel - (uint8_t *) *direl_buf < (fsw_ssize_t) elemsize;
- cdirel = (void *) ((uint8_t *) (*direl_buf + 1)
- + fsw_u16_le_swap (cdirel->n)
- + fsw_u16_le_swap (cdirel->m)))
+ (uint8_t *) cdirel - (uint8_t *) *direl_buf < (fsw_ssize_t) elemsize;
+ cdirel = (void *) ((uint8_t *) (*direl_buf + 1)
+ + fsw_u16_le_swap (cdirel->n)
+ + fsw_u16_le_swap (cdirel->m)))
{
- if (lookup_name->size == fsw_u16_le_swap (cdirel->n)
- && fsw_memeq (cdirel->name, lookup_name->data, lookup_name->size))
- break;
+ if (lookup_name->size == fsw_u16_le_swap (cdirel->n)
+ && fsw_memeq (cdirel->name, lookup_name->data, lookup_name->size))
+ break;
}
if ((uint8_t *) cdirel - (uint8_t *) *direl_buf >= (fsw_ssize_t) elemsize)
- return FSW_NOT_FOUND;
+ return FSW_NOT_FOUND;
*direl_out = cdirel;
return FSW_SUCCESS;
}
static fsw_status_t fsw_btrfs_get_root_tree(
- struct fsw_btrfs_volume *vol,
- struct btrfs_key *key_in,
- uint64_t *tree_out)
+ struct fsw_btrfs_volume *vol,
+ struct btrfs_key *key_in,
+ uint64_t *tree_out)
{
fsw_status_t err;
struct btrfs_root_item ri;
err = lower_bound (vol, key_in, &key_out, vol->root_tree, &elemaddr, &elemsize, NULL, 0);
if (err)
- return err;
+ return err;
if (key_in->object_id != key_out.object_id || key_in->type != key_out.type)
- return FSW_NOT_FOUND;
+ return FSW_NOT_FOUND;
err = fsw_btrfs_read_logical (vol, elemaddr, &ri, sizeof (ri), 0, 1);
if (err)
- return err;
+ return err;
*tree_out = ri.tree;
return FSW_SUCCESS;
}
static fsw_status_t fsw_btrfs_get_sub_dnode(
- struct fsw_btrfs_volume *vol,
- struct fsw_btrfs_dnode *dno,
- struct btrfs_dir_item *cdirel,
- struct fsw_string *name,
- struct fsw_dnode **child_dno_out)
+ struct fsw_btrfs_volume *vol,
+ struct fsw_btrfs_dnode *dno,
+ struct btrfs_dir_item *cdirel,
+ struct fsw_string *name,
+ struct fsw_dnode **child_dno_out)
{
fsw_status_t err;
int child_type;
switch (cdirel->key.type)
{
- case GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM:
- err = fsw_btrfs_get_root_tree (vol, &cdirel->key, &tree_id);
- if (err)
- return err;
-
- child_type = GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY;
- child_id = fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK);
- break;
- case GRUB_BTRFS_ITEM_TYPE_INODE_ITEM:
- child_type = cdirel->type;
- child_id = cdirel->key.object_id;
- break;
-
- default:
- DPRINT (L"btrfs: unrecognised object type 0x%x", cdirel->key.type);
- return FSW_VOLUME_CORRUPTED;
+ case GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM:
+ err = fsw_btrfs_get_root_tree (vol, &cdirel->key, &tree_id);
+ if (err)
+ return err;
+
+ child_type = GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY;
+ child_id = fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK);
+ break;
+ case GRUB_BTRFS_ITEM_TYPE_INODE_ITEM:
+ child_type = cdirel->type;
+ child_id = cdirel->key.object_id;
+ break;
+
+ default:
+ DPRINT (L"btrfs: unrecognised object type 0x%x", cdirel->key.type);
+ return FSW_VOLUME_CORRUPTED;
}
switch(child_type) {
- case GRUB_BTRFS_DIR_ITEM_TYPE_REGULAR:
- child_type = FSW_DNODE_TYPE_FILE;
- break;
- case GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY:
- child_type = FSW_DNODE_TYPE_DIR;
- break;
- case GRUB_BTRFS_DIR_ITEM_TYPE_SYMLINK:
- child_type = FSW_DNODE_TYPE_SYMLINK;
- break;
- default:
- child_type = FSW_DNODE_TYPE_SPECIAL;
- break;
+ case GRUB_BTRFS_DIR_ITEM_TYPE_REGULAR:
+ child_type = FSW_DNODE_TYPE_FILE;
+ break;
+ case GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY:
+ child_type = FSW_DNODE_TYPE_DIR;
+ break;
+ case GRUB_BTRFS_DIR_ITEM_TYPE_SYMLINK:
+ child_type = FSW_DNODE_TYPE_SYMLINK;
+ break;
+ default:
+ child_type = FSW_DNODE_TYPE_SPECIAL;
+ break;
}
return fsw_dnode_create_with_tree(&dno->g, tree_id, child_id, child_type, name, child_dno_out);
}
static fsw_status_t fsw_btrfs_dir_lookup(struct fsw_volume *volg, struct fsw_dnode *dnog,
- struct fsw_string *lookup_name, struct fsw_dnode **child_dno_out)
+ struct fsw_string *lookup_name, struct fsw_dnode **child_dno_out)
{
struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
/* slave device got empty root */
if (!vol->is_master)
- return FSW_NOT_FOUND;
+ return FSW_NOT_FOUND;
err = fsw_strdup_coerce(&s, FSW_STRING_TYPE_UTF8, lookup_name);
if(err)
- return err;
+ return err;
/* treat '...' under root as top root */
if(dnog == volg->root && s.size == 3 && ((char *)s.data)[0]=='.' && ((char *)s.data)[1]=='.' && ((char *)s.data)[2]=='.')
{
- fsw_strfree (&s);
- if(dnog->tree_id == vol->top_tree) {
- fsw_dnode_retain(dnog);
- *child_dno_out = dnog;
- return FSW_SUCCESS;
- }
- return fsw_dnode_create_with_tree(dnog,
- vol->top_tree, fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK),
- FSW_DNODE_TYPE_DIR, lookup_name, child_dno_out);
+ fsw_strfree (&s);
+ if(dnog->tree_id == vol->top_tree) {
+ fsw_dnode_retain(dnog);
+ *child_dno_out = dnog;
+ return FSW_SUCCESS;
+ }
+ return fsw_dnode_create_with_tree(dnog,
+ vol->top_tree, fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK),
+ FSW_DNODE_TYPE_DIR, lookup_name, child_dno_out);
}
struct btrfs_dir_item *direl=NULL, *cdirel;
err = fsw_btrfs_lookup_dir_item(vol, dnog->tree_id, dnog->dnode_id, &s, &direl, &cdirel);
if(!err)
- err = fsw_btrfs_get_sub_dnode(vol, dno, cdirel, lookup_name, child_dno_out);
+ err = fsw_btrfs_get_sub_dnode(vol, dno, cdirel, lookup_name, child_dno_out);
if(direl)
- FreePool (direl);
+ FreePool (direl);
fsw_strfree (&s);
return err;
}
top_root_key.offset = -1LL;
err = fsw_btrfs_get_root_tree (vol, &top_root_key, &vol->top_tree);
if (err)
- return err;
+ return err;
s.type = FSW_STRING_TYPE_UTF8;
s.data = "default";
/* if "default" is failed or invalid, use top tree */
if (err || /* failed */
- cdirel->type != GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY || /* not dir */
- cdirel->key.type != GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM || /* not tree */
- cdirel->key.object_id == fsw_u64_le_swap(5UL) || /* same as top */
- (err = fsw_btrfs_get_root_tree (vol, &cdirel->key, &default_tree_id)))
- default_tree_id = vol->top_tree;
+ cdirel->type != GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY || /* not dir */
+ cdirel->key.type != GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM || /* not tree */
+ cdirel->key.object_id == fsw_u64_le_swap(5UL) || /* same as top */
+ (err = fsw_btrfs_get_root_tree (vol, &cdirel->key, &default_tree_id)))
+ default_tree_id = vol->top_tree;
if (!err)
- err = fsw_dnode_create_root_with_tree(&vol->g, default_tree_id,
- fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK), &vol->g.root);
+ err = fsw_dnode_create_root_with_tree(&vol->g, default_tree_id,
+ fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK), &vol->g.root);
if (direl)
- FreePool (direl);
+ FreePool (direl);
return err;
}
static fsw_status_t fsw_btrfs_dir_read(struct fsw_volume *volg, struct fsw_dnode *dnog,
- struct fsw_shandle *shand, struct fsw_dnode **child_dno_out)
+ struct fsw_shandle *shand, struct fsw_dnode **child_dno_out)
{
struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
/* slave device got empty root */
if (!vol->is_master)
- return FSW_NOT_FOUND;
+ return FSW_NOT_FOUND;
key_in.object_id = dnog->dnode_id;
key_in.type = GRUB_BTRFS_ITEM_TYPE_DIR_ITEM;
if((int64_t)key_in.offset == -1LL)
{
- return FSW_NOT_FOUND;
+ return FSW_NOT_FOUND;
}
err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, &desc, 0);
if (err) {
- return err;
+ return err;
}
DPRINT(L"key_in %lx:%x:%lx out %lx:%x:%lx elem %lx+%lx\n",
- key_in.object_id, key_in.type, key_in.offset,
- key_out.object_id, key_out.type, key_out.offset,
- elemaddr, elemsize);
+ key_in.object_id, key_in.type, key_in.offset,
+ key_out.object_id, key_out.type, key_out.offset,
+ elemaddr, elemsize);
if (key_out.type != GRUB_BTRFS_ITEM_TYPE_DIR_ITEM ||
- key_out.object_id != key_in.object_id)
+ key_out.object_id != key_in.object_id)
{
- r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
- if (r <= 0)
- goto out;
- DPRINT(L"next out %lx:%x:%lx\n",
- key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
+ r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
+ if (r <= 0)
+ goto out;
+ DPRINT(L"next out %lx:%x:%lx\n",
+ key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
}
if (key_out.type == GRUB_BTRFS_ITEM_TYPE_DIR_ITEM &&
- key_out.object_id == key_in.object_id &&
- fsw_u64_le_swap(key_out.offset) <= fsw_u64_le_swap(key_in.offset))
+ key_out.object_id == key_in.object_id &&
+ fsw_u64_le_swap(key_out.offset) <= fsw_u64_le_swap(key_in.offset))
{
- r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
- if (r <= 0)
- goto out;
- DPRINT(L"next out %lx:%x:%lx\n",
- key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
+ r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
+ if (r <= 0)
+ goto out;
+ DPRINT(L"next out %lx:%x:%lx\n",
+ key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
}
do
{
- struct btrfs_dir_item *cdirel;
- if (key_out.type != GRUB_BTRFS_ITEM_TYPE_DIR_ITEM ||
- key_out.object_id != key_in.object_id)
- {
- r = 0;
- break;
- }
- if (elemsize > allocated)
- {
- allocated = 2 * elemsize;
- if(direl)
- FreePool (direl);
- direl = AllocatePool (allocated + 1);
- if (!direl)
- {
- r = -FSW_OUT_OF_MEMORY;
- break;
- }
- }
-
- err = fsw_btrfs_read_logical (vol, elemaddr, direl, elemsize, 0, 1);
- if (err)
- {
- r = -err;
- break;
- }
-
- for (cdirel = direl;
- (uint8_t *) cdirel - (uint8_t *) direl
- < (fsw_ssize_t) elemsize;
- cdirel = (void *) ((uint8_t *) (direl + 1)
- + fsw_u16_le_swap (cdirel->n)
- + fsw_u16_le_swap (cdirel->m)))
- {
- struct fsw_string s;
- s.type = FSW_STRING_TYPE_UTF8;
- s.size = s.len = fsw_u16_le_swap (cdirel->n);
- s.data = cdirel->name;
- DPRINT(L"item key %lx:%x%lx, type %lx, namelen=%lx\n",
- cdirel->key.object_id, cdirel->key.type, cdirel->key.offset, cdirel->type, s.size);
- if(!err) {
- err = fsw_btrfs_get_sub_dnode(vol, dno, cdirel, &s, child_dno_out);
- if(direl)
- FreePool (direl);
- free_iterator (&desc);
- shand->pos = key_out.offset;
- return FSW_SUCCESS;
- }
- }
- r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
- DPRINT(L"next2 out %lx:%x:%lx\n",
- key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
+ struct btrfs_dir_item *cdirel;
+ if (key_out.type != GRUB_BTRFS_ITEM_TYPE_DIR_ITEM ||
+ key_out.object_id != key_in.object_id)
+ {
+ r = 0;
+ break;
+ }
+ if (elemsize > allocated)
+ {
+ allocated = 2 * elemsize;
+ if(direl)
+ FreePool (direl);
+ direl = AllocatePool (allocated + 1);
+ if (!direl)
+ {
+ r = -FSW_OUT_OF_MEMORY;
+ break;
+ }
+ }
+
+ err = fsw_btrfs_read_logical (vol, elemaddr, direl, elemsize, 0, 1);
+ if (err)
+ {
+ r = -err;
+ break;
+ }
+
+ for (cdirel = direl;
+ (uint8_t *) cdirel - (uint8_t *) direl
+ < (fsw_ssize_t) elemsize;
+ cdirel = (void *) ((uint8_t *) (direl + 1)
+ + fsw_u16_le_swap (cdirel->n)
+ + fsw_u16_le_swap (cdirel->m)))
+ {
+ struct fsw_string s;
+ s.type = FSW_STRING_TYPE_UTF8;
+ s.size = s.len = fsw_u16_le_swap (cdirel->n);
+ s.data = cdirel->name;
+ DPRINT(L"item key %lx:%x%lx, type %lx, namelen=%lx\n",
+ cdirel->key.object_id, cdirel->key.type, cdirel->key.offset, cdirel->type, s.size);
+ if(!err) {
+ err = fsw_btrfs_get_sub_dnode(vol, dno, cdirel, &s, child_dno_out);
+ if(direl)
+ FreePool (direl);
+ free_iterator (&desc);
+ shand->pos = key_out.offset;
+ return FSW_SUCCESS;
+ }
+ }
+ r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
+ DPRINT(L"next2 out %lx:%x:%lx\n",
+ key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
}
while (r > 0);
out:
if(direl)
- FreePool (direl);
+ FreePool (direl);
free_iterator (&desc);
r = r < 0 ? -r : FSW_NOT_FOUND;
* This must be a power of two, and at least 32K for zip's deflate method
*/
-#define WSIZE 0x8000
+#define WSIZE 0x8000
#define INBUFSIZ 0x2000
static void initialize_tables (grub_gzio_t);
/* Little-Endian defines for the 2-byte magic numbers for gzip files. */
-#define GZIP_MAGIC grub_le_to_cpu16 (0x8B1F)
-#define OLD_GZIP_MAGIC grub_le_to_cpu16 (0x9E1F)
+#define GZIP_MAGIC grub_le_to_cpu16 (0x8B1F)
+#define OLD_GZIP_MAGIC grub_le_to_cpu16 (0x9E1F)
/* Compression methods (see algorithm.doc) */
#define STORED 0
#define MAX_METHODS 9
/* gzip flag byte */
-#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
-#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
-#define COMMENT 0x10 /* bit 4 set: file comment present */
-#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
-#define RESERVED 0xC0 /* bit 6,7: reserved */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
+#define RESERVED 0xC0 /* bit 6,7: reserved */
-#define UNSUPPORTED_FLAGS (CONTINUATION | ENCRYPTED | RESERVED)
+#define UNSUPPORTED_FLAGS (CONTINUATION | ENCRYPTED | RESERVED)
/* inflate block codes */
-#define INFLATE_STORED 0
-#define INFLATE_FIXED 1
-#define INFLATE_DYNAMIC 2
+#define INFLATE_STORED 0
+#define INFLATE_FIXED 1
+#define INFLATE_DYNAMIC 2
typedef unsigned char uch;
typedef unsigned short ush;
error in the data. */
struct huft
{
- uch e; /* number of extra bits or operation */
- uch b; /* number of bits in this code or subcode */
+ uch e; /* number of extra bits or operation */
+ uch b; /* number of bits in this code or subcode */
union
{
- ush n; /* literal, length base, or distance base */
- struct huft *t; /* pointer to next level of table */
+ ush n; /* literal, length base, or distance base */
+ struct huft *t; /* pointer to next level of table */
}
v;
};
/* Tables for deflate from PKZIP's appnote.txt. */
static unsigned bitorder[] =
-{ /* Order of the bit length code lengths */
+{ /* Order of the bit length code lengths */
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static ush cplens[] =
-{ /* Copy lengths for literal codes 257..285 */
+{ /* Copy lengths for literal codes 257..285 */
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
- /* note: see note #13 above about the 258 in this list. */
+ /* note: see note #13 above about the 258 in this list. */
static ush cplext[] =
-{ /* Extra bits for literal codes 257..285 */
+{ /* Extra bits for literal codes 257..285 */
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
static ush cpdist[] =
-{ /* Copy offsets for distance codes 0..29 */
+{ /* Copy offsets for distance codes 0..29 */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577};
static ush cpdext[] =
-{ /* Extra bits for distance codes */
+{ /* Extra bits for distance codes */
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
12, 12, 13, 13};
*/
-static int lbits = 9; /* bits in base literal/length lookup table */
-static int dbits = 6; /* bits in base distance lookup table */
+static int lbits = 9; /* bits in base literal/length lookup table */
+static int dbits = 6; /* bits in base distance lookup table */
/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
-#define BMAX 16 /* maximum bit length of any code (16 for explode) */
-#define N_MAX 288 /* maximum number of codes in any set */
+#define BMAX 16 /* maximum bit length of any code (16 for explode) */
+#define N_MAX 288 /* maximum number of codes in any set */
/* Macros for inflate() bit peeking and grabbing.
get_byte (grub_gzio_t gzio)
{
if (gzio->mem_input_off < gzio->mem_input_size)
- return gzio->mem_input[gzio->mem_input_off++];
+ return gzio->mem_input[gzio->mem_input_off++];
return 0;
}
gzio_seek (grub_gzio_t gzio, grub_off_t off)
{
if (off > gzio->mem_input_size)
- gzio->err = -1;
+ gzio->err = -1;
else
- gzio->mem_input_off = off;
+ gzio->mem_input_off = off;
}
/* more function prototypes */
static int huft_build (unsigned *, unsigned, unsigned, ush *, ush *,
- struct huft **, int *);
+ struct huft **, int *);
static int huft_free (struct huft *);
static int inflate_codes_in_window (grub_gzio_t);
oversubscribed set of lengths), and three if not enough memory. */
static int
-huft_build (unsigned *b, /* code lengths in bits (all assumed <= BMAX) */
- unsigned n, /* number of codes (assumed <= N_MAX) */
- unsigned s, /* number of simple-valued codes (0..s-1) */
- ush * d, /* list of base values for non-simple codes */
- ush * e, /* list of extra bits for non-simple codes */
- struct huft **t, /* result: starting table */
- int *m) /* maximum lookup bits, returns actual */
+huft_build (unsigned *b, /* code lengths in bits (all assumed <= BMAX) */
+ unsigned n, /* number of codes (assumed <= N_MAX) */
+ unsigned s, /* number of simple-valued codes (0..s-1) */
+ ush * d, /* list of base values for non-simple codes */
+ ush * e, /* list of extra bits for non-simple codes */
+ struct huft **t, /* result: starting table */
+ int *m) /* maximum lookup bits, returns actual */
{
- unsigned a; /* counter for codes of length k */
- unsigned c[BMAX + 1]; /* bit length count table */
- unsigned f; /* i repeats in table every f entries */
- int g; /* maximum code length */
- int h; /* table level */
- register unsigned i; /* counter, current code */
- register unsigned j; /* counter */
- register int k; /* number of bits in current code */
- int l; /* bits per table (returned in m) */
- register unsigned *p; /* pointer into c[], b[], or v[] */
- register struct huft *q; /* points to current table */
- struct huft r; /* table entry for structure assignment */
- struct huft *u[BMAX]; /* table stack */
- unsigned v[N_MAX]; /* values in order of bit length */
- register int w; /* bits before this table == (l * h) */
- unsigned x[BMAX + 1]; /* bit offsets, then code stack */
- unsigned *xp; /* pointer into x */
- int y; /* number of dummy codes added */
- unsigned z; /* number of entries in current table */
+ unsigned a; /* counter for codes of length k */
+ unsigned c[BMAX + 1]; /* bit length count table */
+ unsigned f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register unsigned i; /* counter, current code */
+ register unsigned j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ register unsigned *p; /* pointer into c[], b[], or v[] */
+ register struct huft *q; /* points to current table */
+ struct huft r; /* table entry for structure assignment */
+ struct huft *u[BMAX]; /* table stack */
+ unsigned v[N_MAX]; /* values in order of bit length */
+ register int w; /* bits before this table == (l * h) */
+ unsigned x[BMAX + 1]; /* bit offsets, then code stack */
+ unsigned *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ unsigned z; /* number of entries in current table */
/* Generate counts for each bit length */
fsw_memzero ((char *) c, sizeof (c));
i = n;
do
{
- c[*p]++; /* assume all entries <= BMAX */
- p++; /* Can't combine with above line (Solaris bug) */
+ c[*p]++; /* assume all entries <= BMAX */
+ p++; /* Can't combine with above line (Solaris bug) */
}
while (--i);
- if (c[0] == n) /* null input--all zero length codes */
+ if (c[0] == n) /* null input--all zero length codes */
{
*t = (struct huft *) NULL;
*m = 0;
for (j = 1; j <= BMAX; j++)
if (c[j])
break;
- k = j; /* minimum code length */
+ k = j; /* minimum code length */
if ((unsigned) l < j)
l = j;
for (i = BMAX; i; i--)
if (c[i])
break;
- g = i; /* maximum code length */
+ g = i; /* maximum code length */
if ((unsigned) l > i)
l = i;
*m = l;
/* Adjust last length count to fill out codes, if needed */
for (y = 1 << j; j < i; j++, y <<= 1)
if ((y -= c[j]) < 0)
- return 2; /* bad input: more codes than bits */
+ return 2; /* bad input: more codes than bits */
if ((y -= c[i]) < 0)
return 2;
c[i] += y;
p = c + 1;
xp = x + 2;
while (--i)
- { /* note that i == g from above */
+ { /* note that i == g from above */
*xp++ = (j += *p++);
}
do
{
if ((j = *p++) != 0)
- v[x[j]++] = i;
+ v[x[j]++] = i;
}
while (++i < n);
/* Generate the Huffman codes and for each, make the table entries */
- x[0] = i = 0; /* first Huffman code is zero */
- p = v; /* grab values in bit order */
- h = -1; /* no tables yet--level -1 */
- w = -l; /* bits decoded == (l * h) */
- u[0] = (struct huft *) NULL; /* just to keep compilers happy */
- q = (struct huft *) NULL; /* ditto */
- z = 0; /* ditto */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (struct huft *) NULL; /* just to keep compilers happy */
+ q = (struct huft *) NULL; /* ditto */
+ z = 0; /* ditto */
/* go through the bit lengths (k already is bits in shortest code) */
for (; k <= g; k++)
{
a = c[k];
while (a--)
- {
- /* here i is the Huffman code of length k bits for value *p */
- /* make tables up to required level */
- while (k > w + l)
- {
- h++;
- w += l; /* previous table always l bits */
-
- /* compute minimum size table less than or equal to l bits */
- z = (z = (unsigned) (g - w)) > (unsigned) l ? (unsigned) l : z; /* upper limit on table size */
- if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
- { /* too few codes for k-w bit table */
- f -= a + 1; /* deduct codes from patterns left */
- xp = c + k;
- while (++j < z) /* try smaller tables up to z bits */
- {
- if ((f <<= 1) <= *++xp)
- break; /* enough codes to use up j bits */
- f -= *xp; /* else deduct codes from patterns */
- }
- }
- z = 1 << j; /* table entries for j-bit table */
-
- /* allocate and link in new table */
- q = (struct huft *) AllocatePool ((z + 1) * sizeof (struct huft));
- if (! q)
- {
- if (h)
- huft_free (u[0]);
- return 3;
- }
-
- *t = q + 1; /* link to list for huft_free() */
- *(t = &(q->v.t)) = (struct huft *) NULL;
- u[h] = ++q; /* table starts after link */
-
- /* connect to last table, if there is one */
- if (h)
- {
- x[h] = i; /* save pattern for backing up */
- r.b = (uch) l; /* bits to dump before this table */
- r.e = (uch) (16 + j); /* bits in this table */
- r.v.t = q; /* pointer to this table */
- j = i >> (w - l); /* (get around Turbo C bug) */
- u[h - 1][j] = r; /* connect to last table */
- }
- }
-
- /* set up table entry in r */
- r.b = (uch) (k - w);
- if (p >= v + n)
- r.e = 99; /* out of values--invalid code */
- else if (*p < s)
- {
- r.e = (uch) (*p < 256 ? 16 : 15); /* 256 is end-of-block code */
- r.v.n = (ush) (*p); /* simple code is just the value */
- p++; /* one compiler does not like *p++ */
- }
- else
- {
- r.e = (uch) e[*p - s]; /* non-simple--look up in lists */
- r.v.n = d[*p++ - s];
- }
-
- /* fill code-like entries with r */
- f = 1 << (k - w);
- for (j = i >> w; j < z; j += f)
- q[j] = r;
-
- /* backwards increment the k-bit code i */
- for (j = 1 << (k - 1); i & j; j >>= 1)
- i ^= j;
- i ^= j;
-
- /* backup over finished tables */
- while ((i & ((1 << w) - 1)) != x[h])
- {
- h--; /* don't need to update q */
- w -= l;
- }
- }
+ {
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = (z = (unsigned) (g - w)) > (unsigned) l ? (unsigned) l : z; /* upper limit on table size */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate and link in new table */
+ q = (struct huft *) AllocatePool ((z + 1) * sizeof (struct huft));
+ if (! q)
+ {
+ if (h)
+ huft_free (u[0]);
+ return 3;
+ }
+
+ *t = q + 1; /* link to list for huft_free() */
+ *(t = &(q->v.t)) = (struct huft *) NULL;
+ u[h] = ++q; /* table starts after link */
+
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.b = (uch) l; /* bits to dump before this table */
+ r.e = (uch) (16 + j); /* bits in this table */
+ r.v.t = q; /* pointer to this table */
+ j = i >> (w - l); /* (get around Turbo C bug) */
+ u[h - 1][j] = r; /* connect to last table */
+ }
+ }
+
+ /* set up table entry in r */
+ r.b = (uch) (k - w);
+ if (p >= v + n)
+ r.e = 99; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.e = (uch) (*p < 256 ? 16 : 15); /* 256 is end-of-block code */
+ r.v.n = (ush) (*p); /* simple code is just the value */
+ p++; /* one compiler does not like *p++ */
+ }
+ else
+ {
+ r.e = (uch) e[*p - s]; /* non-simple--look up in lists */
+ r.v.n = d[*p++ - s];
+ }
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ while ((i & ((1 << w) - 1)) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ }
+ }
}
/* Return true (1) if we were given an incomplete table */
static int
inflate_codes_in_window (grub_gzio_t gzio)
{
- register unsigned e; /* table entry flag/number of extra bits */
- unsigned n, d; /* length and index for copy */
- unsigned w; /* current window position */
- struct huft *t; /* pointer to table entry */
- unsigned ml, md; /* masks for bl and bd bits */
- register ulg b; /* bit buffer */
- register unsigned k; /* number of bits in bit buffer */
+ register unsigned e; /* table entry flag/number of extra bits */
+ unsigned n, d; /* length and index for copy */
+ unsigned w; /* current window position */
+ struct huft *t; /* pointer to table entry */
+ unsigned ml, md; /* masks for bl and bd bits */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
/* make local copies of globals */
d = gzio->inflate_d;
n = gzio->inflate_n;
- b = gzio->bb; /* initialize bit buffer */
+ b = gzio->bb; /* initialize bit buffer */
k = gzio->bk;
- w = gzio->wp; /* initialize window position */
+ w = gzio->wp; /* initialize window position */
/* inflate the coded data */
- ml = mask_bits[gzio->bl]; /* precompute masks for speed */
+ ml = mask_bits[gzio->bl]; /* precompute masks for speed */
md = mask_bits[gzio->bd];
- for (;;) /* do until end of block */
+ for (;;) /* do until end of block */
{
if (! gzio->code_state)
- {
- NEEDBITS ((unsigned) gzio->bl);
- if ((e = (t = gzio->tl + ((unsigned) b & ml))->e) > 16)
- do
- {
- if (e == 99)
- {
- gzio->err = -1;
- return 1;
- }
- DUMPBITS (t->b);
- e -= 16;
- NEEDBITS (e);
- }
- while ((e = (t = t->v.t + ((unsigned) b & mask_bits[e]))->e) > 16);
- DUMPBITS (t->b);
-
- if (e == 16) /* then it's a literal */
- {
- gzio->slide[w++] = (uch) t->v.n;
- if (w == WSIZE)
- break;
- }
- else
- /* it's an EOB or a length */
- {
- /* exit if end of block */
- if (e == 15)
- {
- gzio->block_len = 0;
- break;
- }
-
- /* get length of block to copy */
- NEEDBITS (e);
- n = t->v.n + ((unsigned) b & mask_bits[e]);
- DUMPBITS (e);
-
- /* decode distance of block to copy */
- NEEDBITS ((unsigned) gzio->bd);
- if ((e = (t = gzio->td + ((unsigned) b & md))->e) > 16)
- do
- {
- if (e == 99)
- {
- gzio->err = -1;
- return 1;
- }
- DUMPBITS (t->b);
- e -= 16;
- NEEDBITS (e);
- }
- while ((e = (t = t->v.t + ((unsigned) b & mask_bits[e]))->e)
- > 16);
- DUMPBITS (t->b);
- NEEDBITS (e);
- d = w - t->v.n - ((unsigned) b & mask_bits[e]);
- DUMPBITS (e);
- gzio->code_state++;
- }
- }
+ {
+ NEEDBITS ((unsigned) gzio->bl);
+ if ((e = (t = gzio->tl + ((unsigned) b & ml))->e) > 16)
+ do
+ {
+ if (e == 99)
+ {
+ gzio->err = -1;
+ return 1;
+ }
+ DUMPBITS (t->b);
+ e -= 16;
+ NEEDBITS (e);
+ }
+ while ((e = (t = t->v.t + ((unsigned) b & mask_bits[e]))->e) > 16);
+ DUMPBITS (t->b);
+
+ if (e == 16) /* then it's a literal */
+ {
+ gzio->slide[w++] = (uch) t->v.n;
+ if (w == WSIZE)
+ break;
+ }
+ else
+ /* it's an EOB or a length */
+ {
+ /* exit if end of block */
+ if (e == 15)
+ {
+ gzio->block_len = 0;
+ break;
+ }
+
+ /* get length of block to copy */
+ NEEDBITS (e);
+ n = t->v.n + ((unsigned) b & mask_bits[e]);
+ DUMPBITS (e);
+
+ /* decode distance of block to copy */
+ NEEDBITS ((unsigned) gzio->bd);
+ if ((e = (t = gzio->td + ((unsigned) b & md))->e) > 16)
+ do
+ {
+ if (e == 99)
+ {
+ gzio->err = -1;
+ return 1;
+ }
+ DUMPBITS (t->b);
+ e -= 16;
+ NEEDBITS (e);
+ }
+ while ((e = (t = t->v.t + ((unsigned) b & mask_bits[e]))->e)
+ > 16);
+ DUMPBITS (t->b);
+ NEEDBITS (e);
+ d = w - t->v.n - ((unsigned) b & mask_bits[e]);
+ DUMPBITS (e);
+ gzio->code_state++;
+ }
+ }
if (gzio->code_state)
- {
- /* do the copy */
- do
- {
- n -= (e = (e = WSIZE - ((d &= WSIZE - 1) > w ? d : w)) > n ? n
- : e);
-
- if (w - d >= e)
- {
- fsw_memcpy (gzio->slide + w, gzio->slide + d, e);
- w += e;
- d += e;
- }
- else
- /* purposefully use the overlap for extra copies here!! */
- {
- while (e--)
- gzio->slide[w++] = gzio->slide[d++];
- }
-
- if (w == WSIZE)
- break;
- }
- while (n);
-
- if (! n)
- gzio->code_state--;
-
- /* did we break from the loop too soon? */
- if (w == WSIZE)
- break;
- }
+ {
+ /* do the copy */
+ do
+ {
+ n -= (e = (e = WSIZE - ((d &= WSIZE - 1) > w ? d : w)) > n ? n
+ : e);
+
+ if (w - d >= e)
+ {
+ fsw_memcpy (gzio->slide + w, gzio->slide + d, e);
+ w += e;
+ d += e;
+ }
+ else
+ /* purposefully use the overlap for extra copies here!! */
+ {
+ while (e--)
+ gzio->slide[w++] = gzio->slide[d++];
+ }
+
+ if (w == WSIZE)
+ break;
+ }
+ while (n);
+
+ if (! n)
+ gzio->code_state--;
+
+ /* did we break from the loop too soon? */
+ if (w == WSIZE)
+ break;
+ }
}
/* restore the globals from the locals */
gzio->inflate_d = d;
gzio->inflate_n = n;
- gzio->wp = w; /* restore global window pointer */
- gzio->bb = b; /* restore global bit buffer */
+ gzio->wp = w; /* restore global window pointer */
+ gzio->bb = b; /* restore global bit buffer */
gzio->bk = k;
return ! gzio->block_len;
static void
init_stored_block (grub_gzio_t gzio)
{
- register ulg b; /* bit buffer */
- register unsigned k; /* number of bits in bit buffer */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
/* make local copies of globals */
- b = gzio->bb; /* initialize bit buffer */
+ b = gzio->bb; /* initialize bit buffer */
k = gzio->bk;
/* go to byte boundary */
static void
init_fixed_block (grub_gzio_t gzio)
{
- int i; /* temporary variable */
- unsigned l[288]; /* length list for huft_build */
+ int i; /* temporary variable */
+ unsigned l[288]; /* length list for huft_build */
/* set up literal table */
for (i = 0; i < 144; i++)
l[i] = 9;
for (; i < 280; i++)
l[i] = 7;
- for (; i < 288; i++) /* make a complete, but wrong code set */
+ for (; i < 288; i++) /* make a complete, but wrong code set */
l[i] = 8;
gzio->bl = 7;
if (huft_build (l, 288, 257, cplens, cplext, &gzio->tl, &gzio->bl) != 0)
{
- gzio->err = -1;
+ gzio->err = -1;
return;
}
/* set up distance table */
- for (i = 0; i < 30; i++) /* make an incomplete code set */
+ for (i = 0; i < 30; i++) /* make an incomplete code set */
l[i] = 5;
gzio->bd = 5;
if (huft_build (l, 30, 0, cpdist, cpdext, &gzio->td, &gzio->bd) > 1)
{
- gzio->err = -1;
+ gzio->err = -1;
huft_free (gzio->tl);
gzio->tl = 0;
return;
static void
init_dynamic_block (grub_gzio_t gzio)
{
- int i; /* temporary variables */
+ int i; /* temporary variables */
unsigned j;
- unsigned l; /* last length */
- unsigned m; /* mask for bit lengths table */
- unsigned n; /* number of lengths to get */
- unsigned nb; /* number of bit length codes */
- unsigned nl; /* number of literal/length codes */
- unsigned nd; /* number of distance codes */
- unsigned ll[286 + 30]; /* literal/length and distance code lengths */
- register ulg b; /* bit buffer */
- register unsigned k; /* number of bits in bit buffer */
+ unsigned l; /* last length */
+ unsigned m; /* mask for bit lengths table */
+ unsigned n; /* number of lengths to get */
+ unsigned nb; /* number of bit length codes */
+ unsigned nl; /* number of literal/length codes */
+ unsigned nd; /* number of distance codes */
+ unsigned ll[286 + 30]; /* literal/length and distance code lengths */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
/* make local bit buffer */
b = gzio->bb;
/* read in table lengths */
NEEDBITS (5);
- nl = 257 + ((unsigned) b & 0x1f); /* number of literal/length codes */
+ nl = 257 + ((unsigned) b & 0x1f); /* number of literal/length codes */
DUMPBITS (5);
NEEDBITS (5);
- nd = 1 + ((unsigned) b & 0x1f); /* number of distance codes */
+ nd = 1 + ((unsigned) b & 0x1f); /* number of distance codes */
DUMPBITS (5);
NEEDBITS (4);
- nb = 4 + ((unsigned) b & 0xf); /* number of bit length codes */
+ nb = 4 + ((unsigned) b & 0xf); /* number of bit length codes */
DUMPBITS (4);
if (nl > 286 || nd > 30)
{
j = (gzio->td = gzio->tl + ((unsigned) b & m))->b;
DUMPBITS (j);
j = gzio->td->v.n;
- if (j < 16) /* length of code in bits (0..15) */
- ll[i++] = l = j; /* save last length in l */
- else if (j == 16) /* repeat last length 3 to 6 times */
- {
- NEEDBITS (2);
- j = 3 + ((unsigned) b & 3);
- DUMPBITS (2);
- if ((unsigned) i + j > n)
- {
- gzio->err = -1;
- return;
- }
- while (j--)
- ll[i++] = l;
- }
- else if (j == 17) /* 3 to 10 zero length codes */
- {
- NEEDBITS (3);
- j = 3 + ((unsigned) b & 7);
- DUMPBITS (3);
- if ((unsigned) i + j > n)
- {
- gzio->err = -1;
- return;
- }
- while (j--)
- ll[i++] = 0;
- l = 0;
- }
+ if (j < 16) /* length of code in bits (0..15) */
+ ll[i++] = l = j; /* save last length in l */
+ else if (j == 16) /* repeat last length 3 to 6 times */
+ {
+ NEEDBITS (2);
+ j = 3 + ((unsigned) b & 3);
+ DUMPBITS (2);
+ if ((unsigned) i + j > n)
+ {
+ gzio->err = -1;
+ return;
+ }
+ while (j--)
+ ll[i++] = l;
+ }
+ else if (j == 17) /* 3 to 10 zero length codes */
+ {
+ NEEDBITS (3);
+ j = 3 + ((unsigned) b & 7);
+ DUMPBITS (3);
+ if ((unsigned) i + j > n)
+ {
+ gzio->err = -1;
+ return;
+ }
+ while (j--)
+ ll[i++] = 0;
+ l = 0;
+ }
else
- /* j == 18: 11 to 138 zero length codes */
- {
- NEEDBITS (7);
- j = 11 + ((unsigned) b & 0x7f);
- DUMPBITS (7);
- if ((unsigned) i + j > n)
- {
- gzio->err = -1;
- return;
- }
- while (j--)
- ll[i++] = 0;
- l = 0;
- }
+ /* j == 18: 11 to 138 zero length codes */
+ {
+ NEEDBITS (7);
+ j = 11 + ((unsigned) b & 0x7f);
+ DUMPBITS (7);
+ if ((unsigned) i + j > n)
+ {
+ gzio->err = -1;
+ return;
+ }
+ while (j--)
+ ll[i++] = 0;
+ l = 0;
+ }
}
/* free decoding table for trees */
static void
get_new_block (grub_gzio_t gzio)
{
- register ulg b; /* bit buffer */
- register unsigned k; /* number of bits in bit buffer */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
/* make local bit buffer */
b = gzio->bb;
while (gzio->wp < WSIZE && !gzio->err)
{
if (! gzio->block_len)
- {
- if (gzio->last_block)
- break;
+ {
+ if (gzio->last_block)
+ break;
- get_new_block (gzio);
- }
+ get_new_block (gzio);
+ }
if (gzio->block_type > INFLATE_DYNAMIC)
- gzio->err = -1;
+ gzio->err = -1;
if (gzio->err)
- return;
+ return;
/*
* Expand stored block here.
*/
if (gzio->block_type == INFLATE_STORED)
- {
- int w = gzio->wp;
+ {
+ int w = gzio->wp;
- /*
- * This is basically a glorified pass-through
- */
+ /*
+ * This is basically a glorified pass-through
+ */
- while (gzio->block_len && w < WSIZE && !gzio->err)
- {
- gzio->slide[w++] = get_byte (gzio);
- gzio->block_len--;
- }
+ while (gzio->block_len && w < WSIZE && !gzio->err)
+ {
+ gzio->slide[w++] = get_byte (gzio);
+ gzio->block_len--;
+ }
- gzio->wp = w;
+ gzio->wp = w;
- continue;
- }
+ continue;
+ }
/*
* Expand other kind of block.
*/
if (inflate_codes_in_window (gzio))
- {
- huft_free (gzio->tl);
- huft_free (gzio->td);
- gzio->tl = 0;
- gzio->td = 0;
- }
+ {
+ huft_free (gzio->tl);
+ huft_free (gzio->td);
+ gzio->tl = 0;
+ gzio->td = 0;
+ }
}
gzio->saved_offset += WSIZE;
static grub_ssize_t
grub_gzio_read_real (grub_gzio_t gzio, grub_off_t offset,
- char *buf, grub_size_t len)
+ char *buf, grub_size_t len)
{
grub_ssize_t ret = 0;
register char *srcaddr;
while (offset >= gzio->saved_offset)
- inflate_window (gzio);
+ inflate_window (gzio);
srcaddr = (char *) ((offset & (WSIZE - 1)) + gzio->slide);
size = gzio->saved_offset - offset;
if (size > len)
- size = len;
+ size = len;
fsw_memcpy (buf, srcaddr, size);
grub_ssize_t
grub_zlib_decompress (char *inbuf, grub_size_t insize, grub_off_t off,
- char *outbuf, grub_size_t outsize)
+ char *outbuf, grub_size_t outsize)
{
grub_gzio_t gzio = 0;
grub_ssize_t ret;