thesis,bib: add paper about linux capabilities
This commit is contained in:
parent
c389731c68
commit
786e228fce
2 changed files with 45 additions and 32 deletions
|
@ -125,6 +125,7 @@ Collectively, they represent the context of a process, and changes to resources
|
||||||
}
|
}
|
||||||
|
|
||||||
\subsubsection{Capabilities}
|
\subsubsection{Capabilities}
|
||||||
|
\Glspl{lxcaps} \cite{Hallyn2008}
|
||||||
% TODO
|
% TODO
|
||||||
|
|
||||||
\subsubsection{Control Groups}
|
\subsubsection{Control Groups}
|
||||||
|
|
|
@ -3,27 +3,17 @@ Any changes to this file will be lost if it is regenerated by Mendeley.
|
||||||
|
|
||||||
BibTeX export options can be customized via Options -> BibTeX in Mendeley Desktop
|
BibTeX export options can be customized via Options -> BibTeX in Mendeley Desktop
|
||||||
|
|
||||||
@article{Menage2007,
|
@inproceedings{Hallyn2008,
|
||||||
abstract = {While Linux provides copious monitoring and control options for individual processes, it has less support for applying the same operations efficiently to related groups of processes. This has led to multiple proposals for subtly different mechanisms for process aggregation for resource control and isolation. Even though some of these efforts could conceptually operate well together, merging each of them in their current states would lead to duplication in core kernel data structures/routines. The Containers framework, based on the existing cpusets mechanism, provides the generic process group- ing features required by the various different resource controllers and other process-affecting subsystems. The result is to reduce the code (and kernel impact) required for such subsystems, and provide a common interface with greater scope for co-operation. This paper looks at the challenges in meeting the needs of all the stakeholders, which include low overhead, feature richness, completeness and flexible groupings. We demonstrate how to extend containers by writing resource control and monitoring components, we also look at how to implement namespaces and cpusets on top of the framework.},
|
author = {Hallyn, S.E. and Morgan, A.G.},
|
||||||
author = {Menage, Paul B},
|
booktitle = {Linux Symposium},
|
||||||
file = {:home/steveej/src/github/steveej/msc-thesis/papers/Adding Generic Process Containers to the Linux Kernel.pdf:pdf},
|
file = {:home/steveej/src/github/steveej/msc-thesis/papers/Linux Capabilities$\backslash$: making them work.pdf:pdf},
|
||||||
journal = {Proceedings of the Ottawa Linux Symposium},
|
issn = {1440-1746},
|
||||||
pages = {45--58},
|
keywords = {Animals,Gastroenterology,Gastrointestinal Diseases,Humans},
|
||||||
title = {{Adding Generic Process Containers to the Linux Kernel}},
|
pmid = {21751466},
|
||||||
url = {http://www.kernel.org/doc/ols/2007/ols2007v2-pages-45-58.pdf},
|
title = {{Linux Capabilities: making them work}},
|
||||||
year = {2007}
|
url = {http://kernel.org/doc/mirror/ols2008v1.pdf{\#}page=163},
|
||||||
}
|
|
||||||
@article{Fink2014,
|
|
||||||
abstract = {Docker is a relatively new method of virtualization available natively for 64-bit Linux. Compared to more traditional virtualization techniques, Docker is lighter on system resources, offers a git-like system of commits and tags, and can be scaled from your laptop to the cloud.},
|
|
||||||
author = {Fink, John},
|
|
||||||
file = {:home/steveej/src/github/steveej/msc-thesis/papers/Docker - a Software as a Service, Operating System-Level Virtualization Framework.pdf:pdf},
|
|
||||||
journal = {Code4Lib},
|
|
||||||
number = {25},
|
|
||||||
pages = {3--5},
|
|
||||||
title = {{Docker: a Software as a Service, Operating System-Level Virtualization Framework}},
|
|
||||||
url = {http://journal.code4lib.org/articles/9669},
|
|
||||||
volume = {1},
|
volume = {1},
|
||||||
year = {2014}
|
year = {2008}
|
||||||
}
|
}
|
||||||
@book{Utrecht2006,
|
@book{Utrecht2006,
|
||||||
abstract = {Software deployment is the set of activities related to getting$\backslash$r$\backslash$nsoftware components to work on the machines of end users. It includes$\backslash$r$\backslash$nactivities such as installation, upgrading, uninstallation, and so on.$\backslash$r$\backslash$nMany tools have been developed to support deployment, but they all$\backslash$r$\backslash$nhave serious limitations with respect to correctness. For instance,$\backslash$r$\backslash$nthe installation of a component can lead to the failure of previously$\backslash$r$\backslash$ninstalled components; a component might require other components that$\backslash$r$\backslash$nare not present; and it is generally difficult to undo deployment$\backslash$r$\backslash$nactions. The fundamental causes of these problems are a lack of$\backslash$r$\backslash$nisolation between components, the difficulty in identifying the$\backslash$r$\backslash$ndependencies between components, and incompatibilities between$\backslash$r$\backslash$nversions and variants of components.$\backslash$r$\backslash$n $\backslash$r$\backslash$nThis thesis describes a better approach based on a purely functional$\backslash$r$\backslash$ndeployment model, implemented in a deployment system called Nix.$\backslash$r$\backslash$nComponents are stored in isolation from each other in a Nix store.$\backslash$r$\backslash$nEach component has a name that contains a cryptographic hash of all$\backslash$r$\backslash$ninputs that contributed to its build process, and the content of a$\backslash$r$\backslash$ncomponent never changes after it has been built. Hence the model is$\backslash$r$\backslash$npurely functional.$\backslash$r$\backslash$n $\backslash$r$\backslash$nThis storage scheme provides several important advantages. First, it$\backslash$r$\backslash$nensures isolation between components: if two components differ in any$\backslash$r$\backslash$nway, they will be stored in different locations and will not overwrite$\backslash$r$\backslash$neach other. Second, it allows us to identify component dependencies.$\backslash$r$\backslash$nUndeclared build time dependencies are prevented due to the absence of$\backslash$r$\backslash$n"global" component directories used in other deployment systems.$\backslash$r$\backslash$nRuntime dependencies can be found by scanning for cryptographic hashes$\backslash$r$\backslash$nin the binary contents of components, a technique analogous to$\backslash$r$\backslash$nconservative garbage collection in programming language$\backslash$r$\backslash$nimplementation. Since dependency information is complete, complete$\backslash$r$\backslash$ndeployment can be performed by copying closures of components under$\backslash$r$\backslash$nthe dependency relation.$\backslash$r$\backslash$n $\backslash$r$\backslash$nDevelopers and users are not confronted with components' cryptographic$\backslash$r$\backslash$nhashes directly. Components are built automatically from Nix$\backslash$r$\backslash$nexpressions, which describe how to build and compose arbitrary$\backslash$r$\backslash$nsoftware components; hashes are computed as part of this process.$\backslash$r$\backslash$nComponents are automatically made available to users through "user$\backslash$r$\backslash$nenvironments", which are synthesised sets of activated components.$\backslash$r$\backslash$nUser environments enable atomic upgrades and rollbacks, as well as$\backslash$r$\backslash$ndifferent sets of activated components for different users.$\backslash$r$\backslash$n $\backslash$r$\backslash$nNix expressions provide a source-based deployment model. However,$\backslash$r$\backslash$nsource-based deployment can be transparently optimised into binary$\backslash$r$\backslash$ndeployment by making pre-built binaries (keyed on their cryptographic$\backslash$r$\backslash$nhashes) available in a shared location such as a network server. This$\backslash$r$\backslash$nis referred to as transparent source/binary deployment.$\backslash$r$\backslash$n $\backslash$r$\backslash$nThe purely functional deployment model has been validated by applying$\backslash$r$\backslash$nit to the deployment of more than 278 existing Unix packages. In$\backslash$r$\backslash$naddition, this thesis shows that the model can be applied naturally to$\backslash$r$\backslash$nthe related activities of continuous integration using build farms,$\backslash$r$\backslash$nservice deployment and build management.},
|
abstract = {Software deployment is the set of activities related to getting$\backslash$r$\backslash$nsoftware components to work on the machines of end users. It includes$\backslash$r$\backslash$nactivities such as installation, upgrading, uninstallation, and so on.$\backslash$r$\backslash$nMany tools have been developed to support deployment, but they all$\backslash$r$\backslash$nhave serious limitations with respect to correctness. For instance,$\backslash$r$\backslash$nthe installation of a component can lead to the failure of previously$\backslash$r$\backslash$ninstalled components; a component might require other components that$\backslash$r$\backslash$nare not present; and it is generally difficult to undo deployment$\backslash$r$\backslash$nactions. The fundamental causes of these problems are a lack of$\backslash$r$\backslash$nisolation between components, the difficulty in identifying the$\backslash$r$\backslash$ndependencies between components, and incompatibilities between$\backslash$r$\backslash$nversions and variants of components.$\backslash$r$\backslash$n $\backslash$r$\backslash$nThis thesis describes a better approach based on a purely functional$\backslash$r$\backslash$ndeployment model, implemented in a deployment system called Nix.$\backslash$r$\backslash$nComponents are stored in isolation from each other in a Nix store.$\backslash$r$\backslash$nEach component has a name that contains a cryptographic hash of all$\backslash$r$\backslash$ninputs that contributed to its build process, and the content of a$\backslash$r$\backslash$ncomponent never changes after it has been built. Hence the model is$\backslash$r$\backslash$npurely functional.$\backslash$r$\backslash$n $\backslash$r$\backslash$nThis storage scheme provides several important advantages. First, it$\backslash$r$\backslash$nensures isolation between components: if two components differ in any$\backslash$r$\backslash$nway, they will be stored in different locations and will not overwrite$\backslash$r$\backslash$neach other. Second, it allows us to identify component dependencies.$\backslash$r$\backslash$nUndeclared build time dependencies are prevented due to the absence of$\backslash$r$\backslash$n"global" component directories used in other deployment systems.$\backslash$r$\backslash$nRuntime dependencies can be found by scanning for cryptographic hashes$\backslash$r$\backslash$nin the binary contents of components, a technique analogous to$\backslash$r$\backslash$nconservative garbage collection in programming language$\backslash$r$\backslash$nimplementation. Since dependency information is complete, complete$\backslash$r$\backslash$ndeployment can be performed by copying closures of components under$\backslash$r$\backslash$nthe dependency relation.$\backslash$r$\backslash$n $\backslash$r$\backslash$nDevelopers and users are not confronted with components' cryptographic$\backslash$r$\backslash$nhashes directly. Components are built automatically from Nix$\backslash$r$\backslash$nexpressions, which describe how to build and compose arbitrary$\backslash$r$\backslash$nsoftware components; hashes are computed as part of this process.$\backslash$r$\backslash$nComponents are automatically made available to users through "user$\backslash$r$\backslash$nenvironments", which are synthesised sets of activated components.$\backslash$r$\backslash$nUser environments enable atomic upgrades and rollbacks, as well as$\backslash$r$\backslash$ndifferent sets of activated components for different users.$\backslash$r$\backslash$n $\backslash$r$\backslash$nNix expressions provide a source-based deployment model. However,$\backslash$r$\backslash$nsource-based deployment can be transparently optimised into binary$\backslash$r$\backslash$ndeployment by making pre-built binaries (keyed on their cryptographic$\backslash$r$\backslash$nhashes) available in a shared location such as a network server. This$\backslash$r$\backslash$nis referred to as transparent source/binary deployment.$\backslash$r$\backslash$n $\backslash$r$\backslash$nThe purely functional deployment model has been validated by applying$\backslash$r$\backslash$nit to the deployment of more than 278 existing Unix packages. In$\backslash$r$\backslash$naddition, this thesis shows that the model can be applied naturally to$\backslash$r$\backslash$nthe related activities of continuous integration using build farms,$\backslash$r$\backslash$nservice deployment and build management.},
|
||||||
|
@ -41,18 +31,16 @@ url = {http://www.st.ewi.tudelft.nl/{~}dolstra/pubs/phd-thesis.pdf},
|
||||||
volume = {56},
|
volume = {56},
|
||||||
year = {2006}
|
year = {2006}
|
||||||
}
|
}
|
||||||
@article{Felter2014,
|
@article{Fink2014,
|
||||||
abstract = {IBM Research Report Isolation and resource control for cloud applications has traditionally been achieve through the use of virtual machines. Deploying applications in a VM results in reduced performance due to the extra levels of abstraction. In a cloud environment, this results in loss efficiency for the infrastructure. Newer advances in container-based virtualization simplifies the deployment of applications while isolating them from one another. In this paper, we explore the performance of traditional virtual machine deployments, and contrast them with the use of Linux containers. We use a suite of workloads that stress the CPU, memory, storage and networking resources. Our results show that containers result in equal or better performance than VM in almost all cases. Both VMs and containers require tuning to support I/O-intensive applicaions. We also discuss the implications of our performance results for future cloud architecture.},
|
abstract = {Docker is a relatively new method of virtualization available natively for 64-bit Linux. Compared to more traditional virtualization techniques, Docker is lighter on system resources, offers a git-like system of commits and tags, and can be scaled from your laptop to the cloud.},
|
||||||
author = {Felter, Wes and Ferreira, Alexandre and Rajamony, Ram and Rubio, Juan},
|
author = {Fink, John},
|
||||||
doi = {10.1109/ISPASS.2015.7095802},
|
file = {:home/steveej/src/github/steveej/msc-thesis/papers/Docker - a Software as a Service, Operating System-Level Virtualization Framework.pdf:pdf},
|
||||||
file = {:home/steveej/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Felter et al. - 2014 - An Updated Performance Comparison of Virtual Machines and Linux Containers(3).pdf:pdf},
|
journal = {Code4Lib},
|
||||||
isbn = {9781479919574},
|
number = {25},
|
||||||
journal = {Technology},
|
pages = {3--5},
|
||||||
keywords = {cloud computing,performance,virtualization},
|
title = {{Docker: a Software as a Service, Operating System-Level Virtualization Framework}},
|
||||||
pages = {171--172},
|
url = {http://journal.code4lib.org/articles/9669},
|
||||||
title = {{An Updated Performance Comparison of Virtual Machines and Linux Containers}},
|
volume = {1},
|
||||||
url = {http://domino.research.ibm.com/library/CyberDig.nsf/papers/0929052195DD819C85257D2300681E7B/{\$}File/rc25482.pdf},
|
|
||||||
volume = {25482},
|
|
||||||
year = {2014}
|
year = {2014}
|
||||||
}
|
}
|
||||||
@book{Sarton1975,
|
@book{Sarton1975,
|
||||||
|
@ -64,6 +52,16 @@ pages = {145},
|
||||||
title = {{Introduction to the history of science.}},
|
title = {{Introduction to the history of science.}},
|
||||||
year = {1975}
|
year = {1975}
|
||||||
}
|
}
|
||||||
|
@article{Menage2007,
|
||||||
|
abstract = {While Linux provides copious monitoring and control options for individual processes, it has less support for applying the same operations efficiently to related groups of processes. This has led to multiple proposals for subtly different mechanisms for process aggregation for resource control and isolation. Even though some of these efforts could conceptually operate well together, merging each of them in their current states would lead to duplication in core kernel data structures/routines. The Containers framework, based on the existing cpusets mechanism, provides the generic process group- ing features required by the various different resource controllers and other process-affecting subsystems. The result is to reduce the code (and kernel impact) required for such subsystems, and provide a common interface with greater scope for co-operation. This paper looks at the challenges in meeting the needs of all the stakeholders, which include low overhead, feature richness, completeness and flexible groupings. We demonstrate how to extend containers by writing resource control and monitoring components, we also look at how to implement namespaces and cpusets on top of the framework.},
|
||||||
|
author = {Menage, Paul B},
|
||||||
|
file = {:home/steveej/src/github/steveej/msc-thesis/papers/Adding Generic Process Containers to the Linux Kernel.pdf:pdf},
|
||||||
|
journal = {Proceedings of the Ottawa Linux Symposium},
|
||||||
|
pages = {45--58},
|
||||||
|
title = {{Adding Generic Process Containers to the Linux Kernel}},
|
||||||
|
url = {http://www.kernel.org/doc/ols/2007/ols2007v2-pages-45-58.pdf},
|
||||||
|
year = {2007}
|
||||||
|
}
|
||||||
@inproceedings{Reshetova2014,
|
@inproceedings{Reshetova2014,
|
||||||
abstract = {The need for flexible, low-overhead virtualization is evident on many fronts ranging from high-density cloud servers to mobile devices. During the past decade OS-level virtualization has emerged as a new, efficient approach for virtualization, with implementations in multiple different Unix-based systems. Despite its popularity, there has been no systematic study of OS-level virtualization from the point of view of security. In this report, we conduct a comparative study of several OS-level virtualization systems, discuss their security and identify some gaps in current solutions.},
|
abstract = {The need for flexible, low-overhead virtualization is evident on many fronts ranging from high-density cloud servers to mobile devices. During the past decade OS-level virtualization has emerged as a new, efficient approach for virtualization, with implementations in multiple different Unix-based systems. Despite its popularity, there has been no systematic study of OS-level virtualization from the point of view of security. In this report, we conduct a comparative study of several OS-level virtualization systems, discuss their security and identify some gaps in current solutions.},
|
||||||
archivePrefix = {arXiv},
|
archivePrefix = {arXiv},
|
||||||
|
@ -80,3 +78,17 @@ title = {{Security of OS-level virtualization technologies}},
|
||||||
volume = {8788},
|
volume = {8788},
|
||||||
year = {2014}
|
year = {2014}
|
||||||
}
|
}
|
||||||
|
@article{Felter2014,
|
||||||
|
abstract = {IBM Research Report Isolation and resource control for cloud applications has traditionally been achieve through the use of virtual machines. Deploying applications in a VM results in reduced performance due to the extra levels of abstraction. In a cloud environment, this results in loss efficiency for the infrastructure. Newer advances in container-based virtualization simplifies the deployment of applications while isolating them from one another. In this paper, we explore the performance of traditional virtual machine deployments, and contrast them with the use of Linux containers. We use a suite of workloads that stress the CPU, memory, storage and networking resources. Our results show that containers result in equal or better performance than VM in almost all cases. Both VMs and containers require tuning to support I/O-intensive applicaions. We also discuss the implications of our performance results for future cloud architecture.},
|
||||||
|
author = {Felter, Wes and Ferreira, Alexandre and Rajamony, Ram and Rubio, Juan},
|
||||||
|
doi = {10.1109/ISPASS.2015.7095802},
|
||||||
|
file = {:home/steveej/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Felter et al. - 2014 - An Updated Performance Comparison of Virtual Machines and Linux Containers(3).pdf:pdf},
|
||||||
|
isbn = {9781479919574},
|
||||||
|
journal = {Technology},
|
||||||
|
keywords = {cloud computing,performance,virtualization},
|
||||||
|
pages = {171--172},
|
||||||
|
title = {{An Updated Performance Comparison of Virtual Machines and Linux Containers}},
|
||||||
|
url = {http://domino.research.ibm.com/library/CyberDig.nsf/papers/0929052195DD819C85257D2300681E7B/{\$}File/rc25482.pdf},
|
||||||
|
volume = {25482},
|
||||||
|
year = {2014}
|
||||||
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue