Commit 3ea40c9e authored by Son Pham's avatar Son Pham

Lab 9, Problem 2 Completed

parent 83a936af
# Name: Son Pham
# Class: CSCI 315
# Prof: Luiz Felipe Perrone
# answers.txt
[1.1] Since my memory allocator doesn't use page, and the process specify the amount of memory it needds case by case, and only just enough amount of memory is allocated, the only type of fragmentation possible is EXTERNAL.
[1.2] Some of the performance metrics we could use:
- Average fragmentation (Sum of allocated memory over
- Standard deviation of fractions.
- Fraction variance.
- Confidence interval.
[1.3] double average_frag()
123 -> 456 -> 789
Pseudo-code:
//
int numBlobs = 0;
int totalSize = 0;
dnode temp = head of the freeList;
//
while (blob != NULL) {
numBlobs += 1;
totalSize += blobSize;
blob = blob -> next;
}
return (double) totalSize / numBlobs;
Since totalSize is always the amount allocated. I only need to take that allocated amount and divided by the numBlobs.
Since numBlobs will increase by one everytime an allocation is succeeded, I can just make an int that keep tracks of this number and I don't have to count the number of blobs everytime.
[2.1] Think critically about the pseudo-code given. The way the algorithm is currently structued is not typical of programs because:
1. You don't allocate and deallocate like crazy like that, you usually allocate all at once and deallocate all at once.
2. This is not representative of a program fragmentation because when the number of requests are high, you basically chop the program up until everything pieces of very small byte.
Usually, a program ALLOCATE ALL MEMORY first and DEALLOCATE ALL MEMORY later. In order to have a representative amount of fragmentation, it is very important to allocate all the memory first and then deallocate later.
The pseudo-code for the algorithm would look like.
srandom(seed);
int r = 0;
void *p[num_requests];
int i = 0;
while (r < requests):
p[r] allocate(random number between 100 and 1000);
while (r< requests):
if (p[r] != NULL) deallocate(p[r]);
average_frag();
\ No newline at end of file
This diff is collapsed.
CC = gcc -I ./include
CFLAGS = -std=gnu99 -Wall -g #-DDEBUG
INC = ./include
SRC = ./src
OBJ = ./obj
DOC = ./doc
BIN = ./bin
vpath %.h ./include
vpath %.c ./src
EXECS = dlisttest frag-eval
all: $(EXECS)
doc:
doxygen
$(OBJ)/allocator.o: dlist.h dnode.h allocator.h allocator.c
$(CC) $(CFLAGS) -c $(SRC)/allocator.c -o $(OBJ)/allocator.o
$(OBJ)/dnode.o: dnode.h dnode.c
$(CC) $(CFLAGS) -c $(SRC)/dnode.c -o $(OBJ)/dnode.o
$(OBJ)/dlist.o: dlist.h dlist.c dnode.h
$(CC) $(CFLAGS) -c $(SRC)/dlist.c -o $(OBJ)/dlist.o
dlisttest: $(SRC)/dlisttest.c $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/dlisttest.c -o $(BIN)/dlisttest
frag-eval: $(SRC)/frag-eval.c $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/frag-eval.c -o $(BIN)/frag-eval
.PHONY: clean
clean:
/bin/rm -rf $(BIN)/* $(OBJ)/* core* *~
CC = gcc -I ./include
CFLAGS = -std=gnu99 -Wall -g #-DDEBUG
INC = ./include
SRC = ./src
OBJ = ./obj
DOC = ./doc
BIN = ./bin
vpath %.h ./include
vpath %.c ./src
EXECS = dlisttest frag-eval
all: $(EXECS)
doc:
doxygen
$(OBJ)/allocator.o: dlist.h dnode.h allocator.h allocator.c
$(CC) $(CFLAGS) -c $(SRC)/allocator.c -o $(OBJ)/allocator.o
$(OBJ)/dnode.o: dnode.h dnode.c
$(CC) $(CFLAGS) -c $(SRC)/dnode.c -o $(OBJ)/dnode.o
$(OBJ)/dlist.o: dlist.h dlist.c dnode.h
$(CC) $(CFLAGS) -c $(SRC)/dlist.c -o $(OBJ)/dlist.o
dlisttest: $(SRC)/dlisttest.c $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/dlisttest.c -o $(BIN)/dlisttest
frag-eval: $(SRC)/frag-eval.c $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC) frag-eval.c -o $(BIN)/frag-eval
.PHONY: clean
clean:
/bin/rm -rf $(BIN)/* $(OBJ)/* core* *~
# Name: Son Pham
# Class: CSCI 315
# Prof: Luiz Felipe Perrone
# answers.txt
[1.1] Since my memory allocator doesn't use page, and the process specify the amount of memory it needds case by case, and only just enough amount of memory is allocated, the only type of fragmentation possible is EXTERNAL.
[1.2] Some of the performance metrics we could use:
- Average fragmentation (Sum of allocated memory over
- Standard deviation of fractions.
- Fraction variance.
- Confidence interval.
[1.3] double average_frag()
123 -> 456 -> 789
Pseudo-code:
//
int numBlobs = 0;
int totalSize = 0;
dnode temp = head of the freeList;
//
while (blob != NULL) {
numBlobs += 1;
totalSize += blobSize;
blob = blob -> next;
}
return (double) totalSize / numBlobs;
Since totalSize is always the amount allocated. I only need to take that allocated amount and divided by the numBlobs.
Since numBlobs will increase by one everytime an allocation is succeeded, I can just make an int that keep tracks of this number and I don't have to count the number of blobs everytime.
# Name: Son Pham
# Class: CSCI 315
# Prof: Luiz Felipe Perrone
# answers.txt
[1.1] Since my memory allocator doesn't use page, and the process specify the amount of memory it needds case by case, and only just enough amount of memory is allocated, the only type of fragmentation possible is EXTERNAL.
[1.2] Some of the performance metrics we could use:
- Average fragmentation (Sum of allocated memory over
- Standard deviation of fractions.
- Fraction variance.
- Confidence interval.
[1.3] double average_frag()
123 -> 456 -> 789
Pseudo-code:
//
int numBlobs = 0;
int totalSize = 0;
dnode temp = head of the freeList;
//
while (blob != NULL) {
numBlobs += 1;
totalSize += blobSize;
blob = blob -> next;
}
return (double) totalSize / blobSize;
# Name: Son Pham
# Class: CSCI 315
# Prof: Luiz Felipe Perrone
# designAPI.txt
Testing:
--------
My four test files are:
memory-test.c
worst-fit-test.c
best-fit-test.c
first-fit-test.c
dlist.c:
--------
I add function dlist_find_node so that I can find the node that corresponds to the memory. I feel that it is a lot easier to deal with the node than the memory of the node. This becomes important when I have to grab the size of the node that I want to deallocate
I also add two functions: dlist_get_front_node and dlist_get_back_node to get the address of the two nodes at the two ends of the list.
I add the dlist_print function to print the list in a convenient and tractable manner.
dnode.h:
--------
I add the size of the memory chunk as one of the piece of information of the node to make it easier to deal with.
First-fit policy:
-----------------
I simply traverse the entire list and find the first node that is available and has enough capacity to carry the whole size
Best-fit policy:
----------------
I traverse the list and find the smallest chunk of memory that still has enough capacity to carry the allocated size
Worst-fit policy:
-----------------
I traverse the list and find the largest chunk of memory that still has enough capacity to carry the allocated size.
# Name: Son Pham
# Class: CSCI 315
# Prof: Luiz Felipe Perrone
# designAPI.txt
dlist.c:
--------
I add function dlist_find_node so that I can find the node that corresponds to the memory. I feel that it is a lot easier to deal with the node than the memory of the node.
I also add two functions: dlist_get_front_node and dlist_get_back_node to get the address of the two nodes at the two ends of the list.
I add the dlist_print function to print the list in a convenient and tractable manner.
dnode.h:
--------
I add the size of the memory chunk as one of the piece of information of the node to make it easier to deal with.
First-fit policy:
-----------------
I simply traverse the entire list and find the first node that is available and has enough capacity to carry the whole size
Best-fit policy:
----------------
I traverse the list and find the smallest chunk of memory that still has enough capacity to carry the allocated size
Worst-fit policy:
-----------------
I traverse the list and find the largest chunk of memory that still has enough capacity to carry the allocated size.
This diff is collapsed.
CC = gcc -I ./include
CFLAGS = -std=gnu99 -Wall -g #-DDEBUG
INC = ./include
SRC = ./src
OBJ = ./obj
DOC = ./doc
BIN = ./bin
vpath %.h ./include
vpath %.c ./src
EXECS = dlisttest extra-credit-test
all: $(EXECS)
doc:
doxygen
$(OBJ)/allocator.o: dlist.h dnode.h allocator.h allocator.c
$(CC) $(CFLAGS) -c $(SRC)/allocator.c -o $(OBJ)/allocator.o
$(OBJ)/dnode.o: dnode.h dnode.c
$(CC) $(CFLAGS) -c $(SRC)/dnode.c -o $(OBJ)/dnode.o
$(OBJ)/dlist.o: dlist.h dlist.c dnode.h
$(CC) $(CFLAGS) -c $(SRC)/dlist.c -o $(OBJ)/dlist.o
dlisttest: $(SRC)/dlisttest.c $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/dlisttest.c -o $(BIN)/dlisttest
extra-credit-test: $(SRC)/extra-credit-test.c $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/extra-credit-test.c -o $(BIN)/extra-credit-test
.PHONY: clean
clean:
/bin/rm -rf $(BIN)/* $(OBJ)/* core* *~
CC = gcc -I ./include
CFLAGS = -std=gnu99 -Wall -g #-DDEBUG
INC = ./include
SRC = ./src
OBJ = ./obj
DOC = ./doc
BIN = ./bin
vpath %.h ./include
vpath %.c ./src
EXECS = dlisttest memory-test
all: $(EXECS)
doc:
doxygen
$(OBJ)/allocator.o: dlist.h dnode.h allocator.h allocator.c
$(CC) $(CFLAGS) -c $(SRC)/allocator.c -o $(OBJ)/allocator.o
$(OBJ)/dnode.o: dnode.h dnode.c
$(CC) $(CFLAGS) -c $(SRC)/dnode.c -o $(OBJ)/dnode.o
$(OBJ)/dlist.o: dlist.h dlist.c dnode.h
$(CC) $(CFLAGS) -c $(SRC)/dlist.c -o $(OBJ)/dlist.o
dlisttest: $(SRC)/dlisttest.c $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/dlisttest.c -o $(BIN)/dlisttest
extra-credit-test: $(SRC)/extra-credit-test.c $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/extra-credit-test.c -o $(BIN)/extra-credit-test
.PHONY: clean
clean:
/bin/rm -rf $(BIN)/* $(OBJ)/* $(DOC)/* core* *~
Name: Son Pham
Class: CSCI 208
Prof: Luiz Felipe Perrone
Lab 8 - answers.txt
=========
Problem 1
=========
1. Segmentation fault occurs when a process tries to access a memory address that is outside of boundary of the physical page that the process incapsulates.
2. Doubly linked list has a lot of advantage is this kind of problems. It creates:
+ O(n) in space complexity. You only need 3 piece of information for each process.
+ Since each node only needs a pointer to link to other, the list can be stored virtually anywhere as long as the 3 pieces of information stay intached. Other data structure such as array requires spatial locality.
+ O(1) in allocation, adding a new node to a linked list is very easy and takes almost no effort, and a linked list doesn't need a specific bound. In contrast, an array may need an O(n) algorithm to double its size.
Negatives:
+ O(n) access time, you will have to traverse the whole list to find out where it is.
With a lot of benefits from doubly linked list and acceptable access time. This data structure is ideal for the problem.
3. malloc is a general-purpose memory allocated. It adds a "memory node" in the allocated list and allocate the amount of memory specified into that node. The memory in the free list decrease
Free basically grab a node from allocated list and add the same node back to the free list.
Doubly-linked list is a very suitable data structure for this problem due to equal-or-faster-than-linear run time for all operation and no requirement for spatial locality.
A Heap structure can also be very good if the user specifically want either best-fit or worst-fit as it is sligtly more efficient in adding node with O(log n) time. However, compacting node in Heap requires O(n log n) time and the benifit doesn't worth the cost as the memory has to compact very regularly.
4. External fragmentation is a piece of memory OUTSIDE of the process that contains no meaningful data in it.
5. Internal fragmentation is a piece of memory INSIDE of the process that contains no meaninful data. This is due to the fact that the process doesn't occupy the whole memory page to store it data, leaving some internal residue.
6. First-fit: Go to the first memory blob available and has enough capacity and allocate memory in the frame. This is obvious the most efficient in terms of allocating speed since it requires the least traversal. However, we have no control whether the allocation is good or bad.
7. Best-fit: Go to the smallest memory blob that still has enough memory to hold the process. It will try to make the best use of small piece of memory so that big piece of memory will be available for memory-intesive process. However, it leaves a lot of small residue.
8. Worst-fit: Go to the biggest memory blob possible and allocate a chunk to the process. This has the effect of "average out" the amount of memory available at each chunk and reduce the amount of small residue. However, this will not leave a lot of room for big process.
# Name: Son Pham
# Class: CSCI 315
# Prof: Luiz Felipe Perrone
# designAPI.txt
Testing:
--------
My four test files are:
memory-test.c
worst-fit-test.c
best-fit-test.c
first-fit-test.c
dlist.c:
--------
I add function dlist_find_node so that I can find the node that corresponds to the memory. I feel that it is a lot easier to deal with the node than the memory of the node. This becomes important when I have to grab the size of the node that I want to deallocate
I also add two functions: dlist_get_front_node and dlist_get_back_node to get the address of the two nodes at the two ends of the list.
I add the dlist_print function to print the list in a convenient and tractable manner.
dnode.h:
--------
I add the size of the memory chunk as one of the piece of information of the node to make it easier to deal with.
First-fit policy:
-----------------
I simply traverse the entire list and find the first node that is available and has enough capacity to carry the whole size
Best-fit policy:
----------------
I traverse the list and find the smallest chunk of memory that still has enough capacity to carry the allocated size
Worst-fit policy:
-----------------
I traverse the list and find the largest chunk of memory that still has enough capacity to carry the allocated size.
Extra-credit:
-------------
The strategy is to check every single node and see if there are any nodes adjacent to the one we just add back to free_list. For this I write function is_close_together() to check if two nodes are close together and merge_two_dnodes() to merge them together. The two functions follow that nodes must be checked properly before merged and that the node remains is the one with smaller memory address.
# Name: Son Pham
# Class: CSCI 315
# Prof: Luiz Felipe Perrone
# designAPI.txt
Testing:
--------
My four test files are:
memory-test.c
worst-fit-test.c
best-fit-test.c
first-fit-test.c
dlist.c:
--------
I add function dlist_find_node so that I can find the node that corresponds to the memory. I feel that it is a lot easier to deal with the node than the memory of the node. This becomes important when I have to grab the size of the node that I want to deallocate
I also add two functions: dlist_get_front_node and dlist_get_back_node to get the address of the two nodes at the two ends of the list.
I add the dlist_print function to print the list in a convenient and tractable manner.
dnode.h:
--------
I add the size of the memory chunk as one of the piece of information of the node to make it easier to deal with.
First-fit policy:
-----------------
I simply traverse the entire list and find the first node that is available and has enough capacity to carry the whole size
Best-fit policy:
----------------
I traverse the list and find the smallest chunk of memory that still has enough capacity to carry the allocated size
Worst-fit policy:
-----------------
I traverse the list and find the largest chunk of memory that still has enough capacity to carry the allocated size.
This diff is collapsed.
CC = gcc -I ./include
CFLAGS = -std=gnu99 -Wall -g #-DDEBUG
INC = ./include
SRC = ./src
OBJ = ./obj
DOC = ./doc
BIN = ./bin
vpath %.h ./include
vpath %.c ./src
EXECS = dlisttest memory-test best-fit-test worst-fit-test first-fit-test
all: $(EXECS)
doc:
doxygen
$(OBJ)/allocator.o: dlist.h dnode.h allocator.h allocator.c
$(CC) $(CFLAGS) -c $(SRC)/allocator.c -o $(OBJ)/allocator.o
$(OBJ)/dnode.o: dnode.h dnode.c
$(CC) $(CFLAGS) -c $(SRC)/dnode.c -o $(OBJ)/dnode.o
$(OBJ)/dlist.o: dlist.h dlist.c dnode.h
$(CC) $(CFLAGS) -c $(SRC)/dlist.c -o $(OBJ)/dlist.o
dlisttest: $(SRC)/dlisttest.c $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/dlisttest.c -o $(BIN)/dlisttest
memory-test: $(SRC)/memory-test.c $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/memory-test.c -o $(BIN)/memory-test
first-fit-test: $(SRC)/first-fit-test.c $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/first-fit-test.c -o $(BIN)/first-fit-test
worst-fit-test: $(SRC)/worst-fit-test.c $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/worst-fit-test.c -o $(BIN)/worst-fit-test
best-fit-test: $(SRC)/best-fit-test.c $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o
$(CC) $(CFLAGS) $(OBJ)/allocator.o $(OBJ)/dnode.o $(OBJ)/dlist.o $(SRC)/best-fit-test.c -o $(BIN)/best-fit-test
.PHONY: clean
clean:
/bin/rm -rf $(BIN)/* $(OBJ)/* $(DOC)/* core* *~
Name: Son Pham
Class: CSCI 208
Prof: Luiz Felipe Perrone
Lab 8 - answers.txt
=========
Problem 1
=========
1. Segmentation fault occurs when a process tries to access a memory address that is outside of boundary of the physical page that the process incapsulates.
2. Doubly linked list has a lot of advantage is this kind of problems. It creates:
+ O(n) in space complexity. You only need 3 piece of information for each process.
+ Since each node only needs a pointer to link to other, the list can be stored virtually anywhere as long as the 3 pieces of information stay intached. Other data structure such as array requires spatial locality.
+ O(1) in allocation, adding a new node to a linked list is very easy and takes almost no effort, and a linked list doesn't need a specific bound. In contrast, an array may need an O(n) algorithm to double its size.
Negatives:
+ O(n) access time, you will have to traverse the whole list to find out where it is.
With a lot of benefits from doubly linked list and acceptable access time. This data structure is ideal for the problem.
3. malloc is a general-purpose memory allocated. It adds a "memory node" in the allocated list and allocate the amount of memory specified into that node. The memory in the free list decrease
Free basically grab a node from allocated list and add the same node back to the free list.
Doubly-linked list is a very suitable data structure for this problem due to equal-or-faster-than-linear run time for all operation and no requirement for spatial locality.
A Heap structure can also be very good if the user specifically want either best-fit or worst-fit as it is sligtly more efficient in adding node with O(log n) time. However, compacting node in Heap requires O(n log n) time and the benifit doesn't worth the cost as the memory has to compact very regularly.
4. External fragmentation is a piece of memory OUTSIDE of the process that contains no meaningful data in it.
5. Internal fragmentation is a piece of memory INSIDE of the process that contains no meaninful data. This is due to the fact that the process doesn't occupy the whole memory page to store it data, leaving some internal residue.
6. First-fit: Go to the first memory blob available and has enough capacity and allocate memory in the frame. This is obvious the most efficient in terms of allocating speed since it requires the least traversal. However, we have no control whether the allocation is good or bad.
7. Best-fit: Go to the smallest memory blob that still has enough memory to hold the process. It will try to make the best use of small piece of memory so that big piece of memory will be available for memory-intesive process. However, it leaves a lot of small residue.
8. Worst-fit: Go to the biggest memory blob possible and allocate a chunk to the process. This has the effect of "average out" the amount of memory available at each chunk and reduce the amount of small residue. However, this will not leave a lot of room for big process.
/**
* Name: Son Pham
* Class: CSCI 315
* Prof: Luiz Felipe Perrone
* allocator.h
*/
#ifndef _ALLOCATOR_H_
#define _ALLOCATOR_H_
#define FIRST_FIT 0
#define WORST_FIT 1
#define BEST_FIT 2
/* Necessary information about the doubly linked list */
struct dlist* free_list;
struct dlist* allocate_list;
int policy;
void* mem;
/**
* Check if two nodes of memories are close together
*/
/**
* Initialize a memory allocator with an amount of size
* @param size indicating the size of the memory
*/
int allocator_init(size_t size);
/**
* Allocate a size amount of memory and return a pointer to the memory
* Return NULL on error.
* @param: size indicating the size of the memory the user wants to allocate
*/
void* allocate(size_t size);
/**
* Allocate a size amount of memory base on first-fit scheme
* and return a pointer to the memory
* Return NULL on error.
* @param: size indicating the size of the memory the user wants to allocate
*/
void* allocate_first_fit(size_t size);
/**
* Allocate a size amount of memory base on best-fit scheme
* and return a pointer to the memory
* Return NULL on error.
* @param: size indicating the size of the memory the user wants to allocate
*/
void* allocate_best_fit(size_t size);
/**
* Allocate a size amount of memory base on worst-fit scheme
* and return a pointer to the memory
* Return NULL on error.
* @param: size indicating the size of the memory the user wants to allocate
*/
void* allocate_worst_fit(size_t size);
/**
* Allocate a size amount of memory base on best-fit scheme
* and return a pointer to the memory
* Return NULL on error.
* @param: size indicating the size of the memory the user wants to allocate
*/
int deallocate(void* ptr);
/**
* Merge the two supposedly adjacent free nodes into one node
* @param: node1, node2 are two adjacent and free node.
*/
int merge_two_dnodes(struct dnode* node1, struct dnode* node2);
/**
* Check if the two nodes are close together
* @param: node1, node2 are two nodes in check
* @return: true if they are adjacent, false otherwise.
*/
bool is_close_together(struct dnode* node1, struct dnode* node2);
#endif /* _ALLOCATOR_H_ */
/**
* Name: Son Pham
* Class: CSCI 315
* Prof: Luiz Felipe Perrone
* allocator.h
*/
#ifndef _ALLOCATOR_H_
#define _ALLOCATOR_H_
#define FIRST_FIT 0
#define WORST_FIT 1
#define BEST_FIT 2
/* Necessary information about the doubly linked list */
struct dlist* free_list;
struct dlist* allocate_list;
int policy;
void* mem;
/**
* Check if two nodes of memories are close together
*/
/**
* Initialize a memory allocator with an amount of size
* @param size indicating the size of the memory
*/
int allocator_init(size_t size);
/**
* Allocate a size amount of memory and return a pointer to the memory
* Return NULL on error.
* @param: size indicating the size of the memory the user wants to allocate
*/
void* allocate(size_t size);
/**
* Allocate a size amount of memory base on first-fit scheme
* and return a pointer to the memory
* Return NULL on error.
* @param: size indicating the size of the memory the user wants to allocate
*/
void* allocate_first_fit(size_t size);
/**
* Allocate a size amount of memory base on best-fit scheme
* and return a pointer to the memory
* Return NULL on error.
* @param: size indicating the size of the memory the user wants to allocate
*/