find cloud-init on cdrom by label

Signed-off-by: Avi Deitcher <avi@deitcher.net>
This commit is contained in:
Avi Deitcher
2020-04-24 11:23:51 +03:00
parent 0b488d805e
commit 3678adeca8
492 changed files with 194724 additions and 12 deletions

View File

@@ -13,7 +13,7 @@ onboot:
image: linuxkit/dhcpcd:2f8a9b670aa6e96a09db56ec45c9f07ef2a811ee
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:04ce7519c2ea2eaf99bbdc76bb01fc036eed7ab0
image: linuxkit/metadata:dc5bcfa45946053145391ceabe33729c8b9507a1
services:
- name: rngd
image: linuxkit/rngd:7fab61cca793113280397dcee8159f35dc37adcb

View File

@@ -11,7 +11,7 @@ init:
onboot:
# support metadata for optional config in /run/config
- name: metadata
image: linuxkit/metadata:04ce7519c2ea2eaf99bbdc76bb01fc036eed7ab0
image: linuxkit/metadata:dc5bcfa45946053145391ceabe33729c8b9507a1
- name: sysctl
image: linuxkit/sysctl:541f60fe3676611328e89e8bac251fc636b1a6aa
- name: sysfs

View File

@@ -13,7 +13,7 @@ onboot:
image: linuxkit/dhcpcd:2f8a9b670aa6e96a09db56ec45c9f07ef2a811ee
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:04ce7519c2ea2eaf99bbdc76bb01fc036eed7ab0
image: linuxkit/metadata:dc5bcfa45946053145391ceabe33729c8b9507a1
services:
- name: getty
image: linuxkit/getty:48f66df198981e692084bf70ab72b9fe2be0f880

View File

@@ -18,7 +18,7 @@ onboot:
image: linuxkit/dhcpcd:2f8a9b670aa6e96a09db56ec45c9f07ef2a811ee
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:04ce7519c2ea2eaf99bbdc76bb01fc036eed7ab0
image: linuxkit/metadata:dc5bcfa45946053145391ceabe33729c8b9507a1
command: ["/usr/bin/metadata", "hetzner"]
services:
- name: rngd

View File

@@ -13,7 +13,7 @@ onboot:
image: linuxkit/dhcpcd:2f8a9b670aa6e96a09db56ec45c9f07ef2a811ee
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:04ce7519c2ea2eaf99bbdc76bb01fc036eed7ab0
image: linuxkit/metadata:dc5bcfa45946053145391ceabe33729c8b9507a1
command: ["/usr/bin/metadata", "openstack"]
services:
- name: rngd

View File

@@ -18,7 +18,7 @@ onboot:
image: linuxkit/dhcpcd:2f8a9b670aa6e96a09db56ec45c9f07ef2a811ee
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:04ce7519c2ea2eaf99bbdc76bb01fc036eed7ab0
image: linuxkit/metadata:dc5bcfa45946053145391ceabe33729c8b9507a1
command: ["/usr/bin/metadata", "packet"]
services:
- name: rngd

View File

@@ -16,7 +16,7 @@ onboot:
image: linuxkit/dhcpcd:2f8a9b670aa6e96a09db56ec45c9f07ef2a811ee
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:04ce7519c2ea2eaf99bbdc76bb01fc036eed7ab0
image: linuxkit/metadata:dc5bcfa45946053145391ceabe33729c8b9507a1
services:
- name: getty
image: linuxkit/getty:48f66df198981e692084bf70ab72b9fe2be0f880

View File

@@ -13,7 +13,7 @@ onboot:
image: linuxkit/dhcpcd:2f8a9b670aa6e96a09db56ec45c9f07ef2a811ee
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:04ce7519c2ea2eaf99bbdc76bb01fc036eed7ab0
image: linuxkit/metadata:dc5bcfa45946053145391ceabe33729c8b9507a1
command: ["/usr/bin/metadata", "vultr"]
services:
- name: getty

View File

@@ -1,4 +1,3 @@
dev
proc
sys
usr

View File

@@ -2,13 +2,15 @@ package main
import (
"encoding/json"
"flag"
"io/ioutil"
"log"
"os"
"path"
"strconv"
"strings"
"syscall"
log "github.com/sirupsen/logrus"
)
const (
@@ -26,6 +28,22 @@ const (
metaDataURL = "http://169.254.169.254/latest/meta-data/"
)
var (
defaultLogFormatter = &log.TextFormatter{}
)
// infoFormatter overrides the default format for Info() log events to
// provide an easier to read output
type infoFormatter struct {
}
func (f *infoFormatter) Format(entry *log.Entry) ([]byte, error) {
if entry.Level == log.InfoLevel {
return append([]byte(entry.Message), '\n'), nil
}
return defaultLogFormatter.Format(entry)
}
// Provider is a generic interface for metadata/userdata providers.
type Provider interface {
// String should return a unique name for the Provider
@@ -49,9 +67,21 @@ var cdromProviders []Provider
var fileProviders []Provider
func main() {
log.SetFormatter(new(infoFormatter))
log.SetLevel(log.InfoLevel)
flagVerbose := flag.Bool("v", false, "Verbose execution")
flag.Parse()
if *flagVerbose {
// Switch back to the standard formatter
log.SetFormatter(defaultLogFormatter)
log.SetLevel(log.DebugLevel)
}
providers := []string{"aws", "gcp", "hetzner", "openstack", "scaleway", "vultr", "packet", "cdrom"}
if len(os.Args) > 1 {
providers = os.Args[1:]
args := flag.Args()
if len(args) > 0 {
providers = args
}
for _, p := range providers {
switch {

View File

@@ -5,7 +5,11 @@ import (
"io/ioutil"
"path"
"path/filepath"
"strings"
"syscall"
"github.com/diskfs/go-diskfs"
log "github.com/sirupsen/logrus"
)
const (
@@ -13,6 +17,7 @@ const (
userdataFile = "user-data"
userdataFallback = "config"
cdromDevs = "/dev/sr[0-9]*"
blockDevs = "/sys/class/block/*"
)
var (
@@ -35,6 +40,14 @@ func ListCDROMs() []Provider {
// Glob can only error on invalid pattern
panic(fmt.Sprintf("Invalid glob pattern: %s", cdromDevs))
}
log.Debugf("cdrom devices to be checked: %v", cdroms)
// get the devices that match the cloud-init spec
cidevs := FindCIs()
log.Debugf("CIDATA devices to be checked: %v", cidevs)
// merge the two, ensuring that the list is unique
cdroms = append(cidevs, cdroms...)
cdroms = uniqueString(cdroms)
log.Debugf("unique devices to be checked: %v", cdroms)
providers := []Provider{}
for _, device := range cdroms {
providers = append(providers, NewCDROM(device))
@@ -42,6 +55,49 @@ func ListCDROMs() []Provider {
return providers
}
// FindCIs goes through all known devices. Returns any that are either fat32 or
// iso9660 and have a filesystem label "CIDATA" or "cidata", per the spec
// here https://github.com/canonical/cloud-init/blob/master/doc/rtd/topics/datasources/nocloud.rst
func FindCIs() []string {
devs, err := filepath.Glob(blockDevs)
log.Debugf("block devices found: %v", devs)
if err != nil {
// Glob can only error on invalid pattern
panic(fmt.Sprintf("Invalid glob pattern: %s", blockDevs))
}
foundDevices := []string{}
for _, device := range devs {
// get the base device name
dev := filepath.Base(device)
// ignore loop and ram devices
if strings.HasPrefix(dev, "loop") || strings.HasPrefix(dev, "ram") {
log.Debugf("ignoring loop or ram device: %s", dev)
continue
}
dev = fmt.Sprintf("/dev/%s", dev)
log.Debugf("checking device: %s", dev)
// open readonly, ignore errors
disk, err := diskfs.OpenWithMode(dev, diskfs.ReadOnly)
if err != nil {
log.Debugf("failed to open device read-only: %s: %v", dev, err)
continue
}
fs, err := disk.GetFilesystem(0)
if err != nil {
log.Debugf("failed to get filesystem on partition 0 for device: %s: %v", dev, err)
continue
}
// get the label
label := strings.TrimSpace(fs.Label())
log.Debugf("found trimmed filesystem label for device: %s: '%s'", dev, label)
if label == "cidata" || label == "CIDATA" {
log.Debugf("adding device: %s", dev)
foundDevices = append(foundDevices, dev)
}
}
return foundDevices
}
// NewCDROM returns a new ProviderCDROM
func NewCDROM(device string) *ProviderCDROM {
mountPoint, err := ioutil.TempDir("", "cd")
@@ -97,3 +153,18 @@ func (p *ProviderCDROM) mount() error {
func (p *ProviderCDROM) unmount() {
_ = syscall.Unmount(p.mountPoint, 0)
}
// uniqueString returns a unique subset of the string slice provided.
func uniqueString(input []string) []string {
u := make([]string, 0, len(input))
m := make(map[string]bool)
for _, val := range input {
if _, ok := m[val]; !ok {
m[val] = true
u = append(u, val)
}
}
return u
}

View File

@@ -1,3 +1,9 @@
github.com/diskfs/go-diskfs 29b62ddcc13e0d45cf3ce57e586585b0998bcac1
gopkg.in/djherbis/times.v1 v1.2.0
github.com/google/uuid v1.1.1
github.com/packethost/packngo 131798f2804a1b3e895ca98047d56f0d7e094e2a
github.com/sirupsen/logrus v1.0.3
github.com/vishvananda/netlink f5a6f697a596c788d474984a38a0ac4ba0719e93
github.com/vishvananda/netns 86bef332bfc3b59b7624a600bd53009ce91a9829
golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491
golang.org/x/sys 1957bb5e6d1f523308b49060df02171d06ddfc77

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Avi Deitcher
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,158 @@
# go-diskfs
go-diskfs is a [go](https://golang.org) library for performing manipulation of disks, disk images and filesystems natively in go.
You can do nearly everything that go-diskfs provides using shell tools like gdisk/fdisk/mkfs.vfat/mtools/sgdisk/sfdisk/dd. However, these have the following limitations:
* they need to be installed on your system
* you need to fork/exec to the command (and possibly a shell) to run them
* some are difficult to run without mounting disks, which may not be possible or may be risky in your environment, and almost certainly will require root privileges
* you do not want to launch a VM to run the excellent [libguestfs](https://libguestfs.org) and it may not be installed
go-diskfs performs all modifications _natively_ in go, without mounting any disks.
## Usage
Note: detailed go documentation is available at [godoc.org](https://godoc.org/github.com/diskfs/go-diskfs).
### Concepts
`go-diskfs` has a few basic concepts:
* Disk
* Partition
* Filesystem
#### Disk
A disk represents either a file or block device that you access and manipulate. With access to the disk, you can:
* read, modify or create a partition table
* open an existing or create a new filesystem
#### Partition
A partition is a slice of a disk, beginning at one point and ending at a later one. You can have multiple partitions on a disk, and a partition table that describes how partitions are laid out on the disk.
#### Filesystem
A filesystem is a construct that gives you access to create, read and write directories and files.
You do *not* need a partitioned disk to work with a filesystem; filesystems can be an entire `disk`, just as they can be an entire block device. However, they also can be in a partition in a `disk`
### Working With a Disk
Before you can do anything with a disk - partitions or filesystems - you need to access it.
* If you have an existing disk or image file, you `Open()` it
* If you are creating a new one, usually just disk image files, you `Create()` it
The disk will be opened read-write, with exclusive access. If it cannot do either, it will fail.
Once you have a `Disk`, you can work with partitions or filesystems in it.
#### Partitions on a Disk
The following are the partition actions you can take on a disk:
* `GetPartitionTable()` - if one exists. Will report the table layout and type.
* `Partition()` - partition the disk, overwriting any previous table if it exists
As of this writing, supported partition formats are Master Boot Record (`mbr`) and GUID Partition Table (`gpt`).
#### Filesystems on a Disk
Once you have a valid disk, and optionally partition, you can access filesystems on that disk image or partition.
* `CreateFilesystem()` - create a filesystem in an individual partition or the entire disk
* `GetFilesystem()` - access an existing filesystem in a partition or the entire disk
As of this writing, supported filesystems include `FAT32` and `ISO9660` (a.k.a. `.iso`).
With a filesystem in hand, you can create, access and modify directories and files.
* `Mkdir()` - make a directory in a filesystem
* `Readdir()` - read all of the entries in a directory
* `OpenFile()` - open a file for read, optionally write, create and append
Note that `OpenFile()` is intended to match [os.OpenFile](https://golang.org/pkg/os/#OpenFile) and returns a `godiskfs.File` that closely matches [os.File](https://golang.org/pkg/os/#File)
With a `File` in hand, you then can:
* `Write(p []byte)` to the file
* `Read(b []byte)` from the file
* `Seek(offset int64, whence int)` to set the next read or write to an offset in the file
### Read-Only Filesystems
Some filesystem types are intended to be created once, after which they are read-only, for example `ISO9660`/`.iso` and `squashfs`.
`godiskfs` recognizes read-only filesystems and limits working with them to the following:
* You can `GetFilesystem()` a read-only filesystem and do all read activities, but cannot write to them. Any attempt to `Mkdir()` or `OpenFile()` in write/append/create modes or `Write()` to the file will result in an error.
* You can `CreateFilesystem()` a read-only filesystem and write anything to it that you want. It will do all of its work in a "scratch" area, or temporary "workspace" directory on your local filesystem. When you are ready to complete it, you call `Finalize()`, after which it becomes read-only. If you forget to `Finalize()` it, you get... nothing. The `Finalize()` function exists only on read-only filesystems.
### Example
There are examples in the [examples/](./examples/) directory. Here is one to get you started.
The following example will create a fully bootable EFI disk image. It assumes you have a bootable EFI file (any modern Linux kernel compiled with `CONFIG_EFI_STUB=y` will work) available.
```go
import diskfs "github.com/diskfs/goi-diskfs"
espSize int := 100*1024*1024 // 100 MB
diskSize int := espSize + 4*1024*1024 // 104 MB
// create a disk image
diskImg := "/tmp/disk.img"
disk := diskfs.Create(diskImg, diskSize, diskfs.Raw)
// create a partition table
blkSize int := 512
partitionSectors int := espSize / blkSize
partitionStart int := 2048
partitionEnd int := partitionSectors - partitionStart + 1
table := PartitionTable{
type: partition.GPT,
partitions:[
Partition{Start: partitionStart, End: partitionEnd, Type: partition.EFISystemPartition, Name: "EFI System"}
]
}
// apply the partition table
err = disk.Partition(table)
/*
* create an ESP partition with some contents
*/
kernel, err := ioutil.ReadFile("/some/kernel/file")
fs, err := disk.CreateFilesystem(0, diskfs.TypeFat32)
// make our directories
err = fs.Mkdir("/EFI/BOOT")
rw, err := fs.OpenFile("/EFI/BOOT/BOOTX64.EFI", os.O_CREATE|os.O_RDRWR)
err = rw.Write(kernel)
```
## Tests
There are two ways to run tests: unit and integration (somewhat loosely defined).
* Unit: these tests run entirely within the go process, primarily test unexported and some exported functions, and may use pre-defined test fixtures in a directory's `testdata/` subdirectory. By default, these are run by running `go test ./...` or just `make unit_test`.
* Integration: these test the exported functions and their ability to create or manipulate correct files. They are validated by running a [docker](https://docker.com) container with the right utilities to validate the output. These are run by running `TEST_IMAGE=diskfs/godiskfs go test ./...` or just `make test`. The value of `TEST_IMAGE` will be the image to use to run tests.
For integration tests to work, the correct docker image must be available. You can create it by running `make image`. Check the [Makefile](./Makefile) to see the `docker build` command used to create it. Running `make test` automatically creates the image for you.
### Integration Test Image
The integration test image contains the various tools necessary to test images: `mtools`, `fdisk`, `gdisk`, etc. It works on precisely one file at a time. In order to avoid docker volume mounting limitations with various OSes, instead of mounting the image `-v`, it expects to receive the image as a `stdin` stream, and saves it internally to the container as `/file.img`.
For example, to test the existence of directory `/abc` on file `$PWD/foo.img`:
```
cat $PWD/foo.img | docker run -i --rm $INT_IMAGE mdir -i /file.img /abc
```
## Plans
Future plans are to add the following:
* embed boot code in `mbr` e.g. `altmbr.bin` (no need for `gpt` since an ESP with `/EFI/BOOT/BOOT<arch>.EFI` will boot)
* `ext4` filesystem
* `Joliet` extensions to `iso9660`
* `Rock Ridge` sparse file support - supports the flag, but not yet reading or writing
* `squashfs` filesystem
* `qcow` disk format

View File

@@ -0,0 +1,233 @@
// Package disk provides utilities for working directly with a disk
//
// Most of the provided functions are intelligent wrappers around implementations of
// github.com/diskfs/go-diskfs/partition and github.com/diskfs/go-diskfs/filesystem
package disk
import (
"errors"
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
"github.com/diskfs/go-diskfs/filesystem"
"github.com/diskfs/go-diskfs/filesystem/fat32"
"github.com/diskfs/go-diskfs/filesystem/iso9660"
"github.com/diskfs/go-diskfs/partition"
)
// Disk is a reference to a single disk block device or image that has been Create() or Open()
type Disk struct {
File *os.File
Info os.FileInfo
Type Type
Size int64
LogicalBlocksize int64
PhysicalBlocksize int64
Table partition.Table
Writable bool
DefaultBlocks bool
}
// Type represents the type of disk this is
type Type int
const (
// File is a file-based disk image
File Type = iota
// Device is an OS-managed block device
Device
)
var (
errIncorrectOpenMode = errors.New("disk file or device not open for write")
)
// GetPartitionTable retrieves a PartitionTable for a Disk
//
// returns an error if the Disk is invalid or does not exist, or the partition table is unknown
func (d *Disk) GetPartitionTable() (partition.Table, error) {
return partition.Read(d.File, int(d.LogicalBlocksize), int(d.PhysicalBlocksize))
}
// Partition applies a partition.Table implementation to a Disk
//
// The Table can have zero, one or more Partitions, each of which is unique to its
// implementation. E.g. MBR partitions in mbr.Table look different from GPT partitions in gpt.Table
//
// Actual writing of the table is delegated to the individual implementation
func (d *Disk) Partition(table partition.Table) error {
if !d.Writable {
return errIncorrectOpenMode
}
// fill in the uuid
err := table.Write(d.File, d.Size)
if err != nil {
return fmt.Errorf("Failed to write partition table: %v", err)
}
d.Table = table
// the partition table needs to be re-read only if
// the disk file is an actual block device
if d.Type == Device {
err = d.ReReadPartitionTable()
if err != nil {
return fmt.Errorf("Unable to re-read the partition table. Kernel still uses old partition table: %v", err)
}
}
return nil
}
// WritePartitionContents writes the contents of an io.Reader to a given partition
//
// if successful, returns the number of bytes written
//
// returns an error if there was an error writing to the disk, reading from the reader, the table
// is invalid, or the partition is invalid
func (d *Disk) WritePartitionContents(partition int, reader io.Reader) (int64, error) {
if !d.Writable {
return -1, errIncorrectOpenMode
}
if d.Table == nil {
return -1, fmt.Errorf("cannot write contents of a partition on a disk without a partition table")
}
if partition < 0 {
return -1, fmt.Errorf("cannot write contents of a partition without specifying a partition")
}
partitions := d.Table.GetPartitions()
// API indexes from 1, but slice from 0
if partition > len(partitions) {
return -1, fmt.Errorf("cannot write contents of partition %d which is greater than max partition %d", partition, len(partitions))
}
written, err := partitions[partition-1].WriteContents(d.File, reader)
return int64(written), err
}
// ReadPartitionContents reads the contents of a partition to an io.Writer
//
// if successful, returns the number of bytes read
//
// returns an error if there was an error reading from the disk, writing to the writer, the table
// is invalid, or the partition is invalid
func (d *Disk) ReadPartitionContents(partition int, writer io.Writer) (int64, error) {
if d.Table == nil {
return -1, fmt.Errorf("cannot read contents of a partition on a disk without a partition table")
}
if partition < 0 {
return -1, fmt.Errorf("cannot read contents of a partition without specifying a partition")
}
partitions := d.Table.GetPartitions()
// API indexes from 1, but slice from 0
if partition > len(partitions) {
return -1, fmt.Errorf("cannot read contents of partition %d which is greater than max partition %d", partition, len(partitions))
}
return partitions[partition-1].ReadContents(d.File, writer)
}
// FilesystemSpec represents the specification of a filesystem to be created
type FilesystemSpec struct {
Partition int
FSType filesystem.Type
VolumeLabel string
}
// CreateFilesystem creates a filesystem on a disk image, the equivalent of mkfs.
//
// Required:
// * desired partition number, or 0 to create the filesystem on the entire block device or
// disk image,
// * the filesystem type from github.com/diskfs/go-diskfs/filesystem
//
// Optional:
// * volume label for those filesystems that support it; under Linux this shows
// in '/dev/disks/by-label/<label>'
//
// if successful, returns a filesystem-implementing structure for the given filesystem type
//
// returns error if there was an error creating the filesystem, or the partition table is invalid and did not
// request the entire disk.
func (d *Disk) CreateFilesystem(spec FilesystemSpec) (filesystem.FileSystem, error) {
// find out where the partition starts and ends, or if it is the entire disk
var (
size, start int64
)
switch {
case !d.Writable:
return nil, errIncorrectOpenMode
case spec.Partition == 0:
size = d.Size
start = 0
case d.Table == nil:
return nil, fmt.Errorf("cannot create filesystem on a partition without a partition table")
default:
partitions := d.Table.GetPartitions()
// API indexes from 1, but slice from 0
partition := spec.Partition - 1
if spec.Partition > len(partitions) {
return nil, fmt.Errorf("cannot create filesystem on partition %d greater than maximum partition %d", spec.Partition, len(partitions))
}
size = partitions[partition].GetSize()
start = partitions[partition].GetStart()
}
switch spec.FSType {
case filesystem.TypeFat32:
return fat32.Create(d.File, size, start, d.LogicalBlocksize, spec.VolumeLabel)
case filesystem.TypeISO9660:
return iso9660.Create(d.File, size, start, d.LogicalBlocksize)
default:
return nil, errors.New("Unknown filesystem type requested")
}
}
// GetFilesystem gets the filesystem that already exists on a disk image
//
// pass the desired partition number, or 0 to create the filesystem on the entire block device / disk image,
//
// if successful, returns a filesystem-implementing structure for the given filesystem type
//
// returns error if there was an error reading the filesystem, or the partition table is invalid and did not
// request the entire disk.
func (d *Disk) GetFilesystem(partition int) (filesystem.FileSystem, error) {
// find out where the partition starts and ends, or if it is the entire disk
var (
size, start int64
err error
)
switch {
case partition == 0:
size = d.Size
start = 0
case d.Table == nil:
return nil, fmt.Errorf("cannot read filesystem on a partition without a partition table")
default:
partitions := d.Table.GetPartitions()
// API indexes from 1, but slice from 0
if partition > len(partitions) {
return nil, fmt.Errorf("cannot get filesystem on partition %d greater than maximum partition %d", partition, len(partitions))
}
size = partitions[partition-1].GetSize()
start = partitions[partition-1].GetStart()
}
// just try each type
log.Debug("trying fat32")
fat32FS, err := fat32.Read(d.File, size, start, d.LogicalBlocksize)
if err == nil {
return fat32FS, nil
}
log.Debugf("fat32 failed: %v", err)
pbs := d.PhysicalBlocksize
if d.DefaultBlocks {
pbs = 0
}
log.Debugf("trying iso9660 with physical block size %d", pbs)
iso9660FS, err := iso9660.Read(d.File, size, start, pbs)
if err == nil {
return iso9660FS, nil
}
log.Debugf("iso9660 failed: %v", err)
return nil, fmt.Errorf("Unknown filesystem on partition %d", partition)
}

View File

@@ -0,0 +1,26 @@
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package disk
import (
"fmt"
"golang.org/x/sys/unix"
)
const (
BLKRRPART = 0x125f
)
// ReReadPartitionTable forces the kernel to re-read the partition table
// on the disk.
//
// It is done via an ioctl call with request as BLKRRPART.
func (d *Disk) ReReadPartitionTable() error {
fd := d.File.Fd()
_, err := unix.IoctlGetInt(int(fd), BLKRRPART)
if err != nil {
return fmt.Errorf("Unable to re-read partition table: %v", err)
}
return nil
}

View File

@@ -0,0 +1,10 @@
package disk
// ReReadPartitionTable is used to re-read the partition table
// on the disk.
//
// In windows machine, force re-read is not done. The method returns nil when
// invoked
func (d *Disk) ReReadPartitionTable() error {
return nil
}

View File

@@ -0,0 +1,322 @@
// Package diskfs implements methods for creating and manipulating disks and filesystems
//
// methods for creating and manipulating disks and filesystems, whether block devices
// in /dev or direct disk images. This does **not**
// mount any disks or filesystems, neither directly locally nor via a VM. Instead, it manipulates the
// bytes directly.
//
// This is not intended as a replacement for operating system filesystem and disk drivers. Instead,
// it is intended to make it easy to work with partitions, partition tables and filesystems directly
// without requiring operating system mounts.
//
// Some examples:
//
// 1. Create a disk image of size 10MB with a FAT32 filesystem spanning the entire disk.
//
// import diskfs "github.com/diskfs/go-diskfs"
// size := 10*1024*1024 // 10 MB
//
// diskImg := "/tmp/disk.img"
// disk := diskfs.Create(diskImg, size, diskfs.Raw)
//
// fs, err := disk.CreateFilesystem(0, diskfs.TypeFat32)
//
// 2. Create a disk of size 20MB with an MBR partition table, a single partition beginning at block 2048 (1MB),
// of size 10MB filled with a FAT32 filesystem.
//
// import diskfs "github.com/diskfs/go-diskfs"
//
// diskSize := 10*1024*1024 // 10 MB
//
// diskImg := "/tmp/disk.img"
// disk := diskfs.Create(diskImg, size, diskfs.Raw)
//
// table := &mbr.Table{
// LogicalSectorSize: 512,
// PhysicalSectorSize: 512,
// Partitions: []*mbr.Partition{
// {
// Bootable: false,
// Type: Linux,
// Start: 2048,
// Size: 20480,
// },
// },
// }
//
// fs, err := disk.CreateFilesystem(1, diskfs.TypeFat32)
//
// 3. Create a disk of size 20MB with a GPT partition table, a single partition beginning at block 2048 (1MB),
// of size 10MB, and fill with the contents from the 10MB file "/root/contents.dat"
//
// import diskfs "github.com/diskfs/go-diskfs"
//
// diskSize := 10*1024*1024 // 10 MB
//
// diskImg := "/tmp/disk.img"
// disk := diskfs.Create(diskImg, size, diskfs.Raw)
//
// table := &gpt.Table{
// LogicalSectorSize: 512,
// PhysicalSectorSize: 512,
// Partitions: []*gpt.Partition{
// {
// LogicalSectorSize: 512,
// PhysicalSectorSize: 512,
// ProtectiveMBR: true,
// },
// },
// }
//
// f, err := os.Open("/root/contents.dat")
// written, err := disk.WritePartitionContents(1, f)
//
// 4. Create a disk of size 20MB with an MBR partition table, a single partition beginning at block 2048 (1MB),
// of size 10MB filled with a FAT32 filesystem, and create some directories and files in that filesystem.
//
// import diskfs "github.com/diskfs/go-diskfs"
//
// diskSize := 10*1024*1024 // 10 MB
//
// diskImg := "/tmp/disk.img"
// disk := diskfs.Create(diskImg, size, diskfs.Raw)
//
// table := &mbr.Table{
// LogicalSectorSize: 512,
// PhysicalSectorSize: 512,
// Partitions: []*mbr.Partition{
// {
// Bootable: false,
// Type: Linux,
// Start: 2048,
// Size: 20480,
// },
// },
// }
//
// fs, err := disk.CreateFilesystem(1, diskfs.TypeFat32)
// err := fs.Mkdir("/FOO/BAR")
// rw, err := fs.OpenFile("/FOO/BAR/AFILE.EXE", os.O_CREATE|os.O_RDRWR)
// b := make([]byte, 1024, 1024)
// rand.Read(b)
// err := rw.Write(b)
//
package diskfs
import (
"errors"
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"github.com/diskfs/go-diskfs/disk"
)
// when we use a disk image with a GPT, we cannot get the logical sector size from the disk via the kernel
// so we use the default sector size of 512, per Rod Smith
const (
defaultBlocksize, firstblock int = 512, 2048
blksszGet = 0x1268
blkbszGet = 0x80081270
)
// Format represents the format of the disk
type Format int
const (
// Raw disk format for basic raw disk
Raw Format = iota
)
// OpenModeOption represents file open modes
type OpenModeOption int
const (
// ReadOnly open file in read only mode
ReadOnly OpenModeOption = iota
// ReadWriteExclusive open file in read-write exclusive mode
ReadWriteExclusive
)
// OpenModeOption.String()
func (m OpenModeOption) String() string {
switch m {
case ReadOnly:
return "read-only"
case ReadWriteExclusive:
return "read-write exclusive"
default:
return "unknown"
}
}
var openModeOptions = map[OpenModeOption]int{
ReadOnly: os.O_RDONLY,
ReadWriteExclusive: os.O_RDWR | os.O_EXCL,
}
func writableMode(mode OpenModeOption) bool {
m, ok := openModeOptions[mode]
if ok {
if m&os.O_RDWR != 0 || m&os.O_WRONLY != 0 {
return true
}
}
return false
}
func initDisk(f *os.File, openMode OpenModeOption) (*disk.Disk, error) {
var (
diskType disk.Type
size int64
lblksize = int64(defaultBlocksize)
pblksize = int64(defaultBlocksize)
defaultBlocks = true
)
log.Debug("initDisk(): start")
// get device information
devInfo, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("could not get info for device %s: %x", f.Name(), err)
}
mode := devInfo.Mode()
switch {
case mode.IsRegular():
log.Debug("initDisk(): regular file")
diskType = disk.File
size = devInfo.Size()
if size <= 0 {
return nil, fmt.Errorf("could not get file size for device %s", f.Name())
}
case mode&os.ModeDevice != 0:
log.Debug("initDisk(): block device")
diskType = disk.Device
file, err := os.Open(f.Name())
if err != nil {
return nil, fmt.Errorf("error opening block device %s: %s\n", f.Name(), err)
}
size, err = file.Seek(0, io.SeekEnd)
if err != nil {
return nil, fmt.Errorf("error seeking to end of block device %s: %s\n", f.Name(), err)
}
lblksize, pblksize, err = getSectorSizes(f)
log.Debugf("initDisk(): logical block size %d, physical block size %d", lblksize, pblksize)
defaultBlocks = false
if err != nil {
return nil, fmt.Errorf("Unable to get block sizes for device %s: %v", f.Name(), err)
}
default:
return nil, fmt.Errorf("device %s is neither a block device nor a regular file", f.Name())
}
// how many good blocks do we have?
//var goodBlocks, orphanedBlocks int
//goodBlocks = size / lblksize
writable := writableMode(openMode)
return &disk.Disk{
File: f,
Info: devInfo,
Type: diskType,
Size: size,
LogicalBlocksize: lblksize,
PhysicalBlocksize: pblksize,
Writable: writable,
DefaultBlocks: defaultBlocks,
}, nil
}
func checkDevice(device string) error {
if device == "" {
return errors.New("must pass device name")
}
if _, err := os.Stat(device); os.IsNotExist(err) {
return fmt.Errorf("provided device %s does not exist", device)
}
return nil
}
// Open a Disk from a path to a device in read-write exclusive mode
// Should pass a path to a block device e.g. /dev/sda or a path to a file /tmp/foo.img
// The provided device must exist at the time you call Open()
func Open(device string) (*disk.Disk, error) {
err := checkDevice(device)
if err != nil {
return nil, err
}
f, err := os.OpenFile(device, os.O_RDWR|os.O_EXCL, 0600)
if err != nil {
return nil, fmt.Errorf("Could not open device %s exclusively for writing", device)
}
// return our disk
return initDisk(f, ReadWriteExclusive)
}
// OpenWithMode open a Disk from a path to a device with a given open mode
// If the device is open in read-only mode, operations to change disk partitioning will
// return an error
// Should pass a path to a block device e.g. /dev/sda or a path to a file /tmp/foo.img
// The provided device must exist at the time you call OpenWithMode()
func OpenWithMode(device string, mode OpenModeOption) (*disk.Disk, error) {
err := checkDevice(device)
if err != nil {
return nil, err
}
m, ok := openModeOptions[mode]
if !ok {
return nil, errors.New("unsupported file open mode")
}
f, err := os.OpenFile(device, m, 0600)
if err != nil {
return nil, fmt.Errorf("Could not open device %s with mode %v: %v", device, mode, err)
}
// return our disk
return initDisk(f, mode)
}
// Create a Disk from a path to a device
// Should pass a path to a block device e.g. /dev/sda or a path to a file /tmp/foo.img
// The provided device must not exist at the time you call Create()
func Create(device string, size int64, format Format) (*disk.Disk, error) {
if device == "" {
return nil, errors.New("must pass device name")
}
if size <= 0 {
return nil, errors.New("must pass valid device size to create")
}
f, err := os.OpenFile(device, os.O_RDWR|os.O_EXCL|os.O_CREATE, 0666)
if err != nil {
return nil, fmt.Errorf("Could not create device %s", device)
}
err = os.Truncate(device, size)
if err != nil {
return nil, fmt.Errorf("Could not expand device %s to size %d", device, size)
}
// return our disk
return initDisk(f, ReadWriteExclusive)
}
// to get the logical and physical sector sizes
func getSectorSizes(f *os.File) (int64, int64, error) {
/*
ioctl(fd, BLKBSZGET, &physicalsectsize);
*/
fd := f.Fd()
logicalSectorSize, err := unix.IoctlGetInt(int(fd), blksszGet)
if err != nil {
return 0, 0, fmt.Errorf("Unable to get device logical sector size: %v", err)
}
physicalSectorSize, err := unix.IoctlGetInt(int(fd), blkbszGet)
if err != nil {
return 0, 0, fmt.Errorf("Unable to get device physical sector size: %v", err)
}
return int64(logicalSectorSize), int64(physicalSectorSize), nil
}

View File

@@ -0,0 +1,94 @@
package fat32
import (
"time"
)
// Directory represents a single directory in a FAT32 filesystem
type Directory struct {
directoryEntry
entries []*directoryEntry
}
// dirEntriesFromBytes loads the directory entries from the raw bytes
func (d *Directory) entriesFromBytes(b []byte, f *FileSystem) error {
entries, err := parseDirEntries(b, f)
if err != nil {
return err
}
d.entries = entries
return nil
}
// entriesToBytes convert our entries to raw bytes
func (d *Directory) entriesToBytes(bytesPerCluster int) ([]byte, error) {
b := make([]byte, 0)
for _, de := range d.entries {
b2, err := de.toBytes()
if err != nil {
return nil, err
}
b = append(b, b2...)
}
remainder := len(b) % bytesPerCluster
extra := bytesPerCluster - remainder
zeroes := make([]byte, extra, extra)
b = append(b, zeroes...)
return b, nil
}
// createEntry creates an entry in the given directory, and returns the handle to it
func (d *Directory) createEntry(name string, cluster uint32, dir bool) (*directoryEntry, error) {
// is it a long filename or a short filename?
var isLFN bool
// TODO: convertLfnSfn does not calculate if the short name conflicts and thus shoukld increment the last character
// that should happen here, once we can look in the directory entry
shortName, extension, isLFN, _ := convertLfnSfn(name)
lfn := ""
if isLFN {
lfn = name
}
// allocate a slot for the new filename in the existing directory
entry := directoryEntry{
filenameLong: lfn,
longFilenameSlots: -1, // indicate that we do not know how many slots, which will force a recalculation
filenameShort: shortName,
fileExtension: extension,
fileSize: uint32(0),
clusterLocation: cluster,
filesystem: d.filesystem,
createTime: time.Now(),
modifyTime: time.Now(),
accessTime: time.Now(),
isSubdirectory: dir,
isNew: true,
}
entry.longFilenameSlots = calculateSlots(entry.filenameLong)
d.entries = append(d.entries, &entry)
return &entry, nil
}
// createVolumeLabel create a volume label entry in the given directory, and return the handle to it
func (d *Directory) createVolumeLabel(name string) (*directoryEntry, error) {
// allocate a slot for the new filename in the existing directory
entry := directoryEntry{
filenameLong: "",
longFilenameSlots: -1, // indicate that we do not know how many slots, which will force a recalculation
filenameShort: name[:8],
fileExtension: name[8:11],
fileSize: uint32(0),
clusterLocation: 0,
filesystem: d.filesystem,
createTime: time.Now(),
modifyTime: time.Now(),
accessTime: time.Now(),
isSubdirectory: false,
isNew: true,
isVolumeLabel: true,
}
d.entries = append(d.entries, &entry)
return &entry, nil
}

View File

@@ -0,0 +1,447 @@
package fat32
import (
"encoding/binary"
"fmt"
"regexp"
"strings"
"time"
)
// AccessRights is the byte mask representing access rights to a FAT file
type accessRights uint16
// AccessRightsUnlimited represents unrestricted access
const (
accessRightsUnlimited accessRights = 0x0000
charsPerSlot int = 13
)
// directoryEntry is a single directory entry
type directoryEntry struct {
filenameShort string
fileExtension string
filenameLong string
isReadOnly bool
isHidden bool
isSystem bool
isVolumeLabel bool
isSubdirectory bool
isArchiveDirty bool
isDevice bool
lowercaseShortname bool
lowercaseExtension bool
createTime time.Time
modifyTime time.Time
accessTime time.Time
acccessRights accessRights
clusterLocation uint32
fileSize uint32
filesystem *FileSystem
longFilenameSlots int
isNew bool
}
func (de *directoryEntry) toBytes() ([]byte, error) {
b := make([]byte, 0, bytesPerSlot)
// do we have a long filename?
if de.filenameLong != "" {
lfnBytes, err := longFilenameBytes(de.filenameLong, de.filenameShort, de.fileExtension)
if err != nil {
return nil, fmt.Errorf("Could not convert long filename to directory entries: %v", err)
}
b = append(b, lfnBytes...)
}
// this is for the regular 8.3 entry
dosBytes := make([]byte, bytesPerSlot, bytesPerSlot)
createDate, createTime := timeToDateTime(de.createTime)
modifyDate, modifyTime := timeToDateTime(de.modifyTime)
accessDate, _ := timeToDateTime(de.accessTime)
binary.LittleEndian.PutUint16(dosBytes[14:16], createTime)
binary.LittleEndian.PutUint16(dosBytes[16:18], createDate)
binary.LittleEndian.PutUint16(dosBytes[18:20], accessDate)
binary.LittleEndian.PutUint16(dosBytes[22:24], modifyTime)
binary.LittleEndian.PutUint16(dosBytes[24:26], modifyDate)
// convert the short filename and extension to ascii bytes
shortName, err := stringToASCIIBytes(fmt.Sprintf("% -8s", de.filenameShort))
if err != nil {
return nil, fmt.Errorf("Error converting short filename to bytes: %v", err)
}
// convert the short filename and extension to ascii bytes
extension, err := stringToASCIIBytes(fmt.Sprintf("% -3s", de.fileExtension))
if err != nil {
return nil, fmt.Errorf("Error converting file extension to bytes: %v", err)
}
copy(dosBytes[0:8], shortName)
copy(dosBytes[8:11], extension)
binary.LittleEndian.PutUint32(dosBytes[28:32], de.fileSize)
clusterLocation := make([]byte, 4, 4)
binary.LittleEndian.PutUint32(clusterLocation, de.clusterLocation)
dosBytes[26] = clusterLocation[0]
dosBytes[27] = clusterLocation[1]
dosBytes[20] = clusterLocation[2]
dosBytes[21] = clusterLocation[3]
// set the flags
if de.isVolumeLabel {
dosBytes[11] = dosBytes[11] | 0x08
}
if de.isSubdirectory {
dosBytes[11] = dosBytes[11] | 0x10
}
if de.isArchiveDirty {
dosBytes[11] = dosBytes[11] | 0x20
}
if de.lowercaseExtension {
dosBytes[12] = dosBytes[12] | 0x04
}
if de.lowercaseShortname {
dosBytes[12] = dosBytes[12] | 0x08
}
b = append(b, dosBytes...)
return b, nil
}
// parseDirEntries takes all of the bytes in a special file (i.e. a directory)
// and gets all of the DirectoryEntry for that directory
// this is, essentially, the equivalent of `ls -l` or if you prefer `dir`
func parseDirEntries(b []byte, f *FileSystem) ([]*directoryEntry, error) {
dirEntries := make([]*directoryEntry, 0, 20)
// parse the data into Fat32DirectoryEntry
lfn := ""
// this should be used to count the LFN entries and that they make sense
//lfnCount := 0
byteLoop:
for i := 0; i < len(b); i += 32 {
// is this the beginning of all empty entries?
switch b[i+0] {
case 0:
// need to break "byteLoop" else break will break the switchs
break byteLoop
case 0xe5:
continue
}
// is this an LFN entry?
if b[i+11] == 0x0f {
// check if this is the last logical / first physical and how many there are
if b[i]&0x40 == 0x40 {
lfn = ""
//lfnCount = int(b[i] & 0xf)
}
// parse the long filename
tmpLfn, err := longFilenameEntryFromBytes(b[i : i+32])
// an error is impossible since we pass exactly 32, but we leave the handler here anyways
if err != nil {
return nil, fmt.Errorf("Error parsing long filename at position %d: %v", i, err)
}
lfn = tmpLfn + lfn
continue
}
// not LFN, so parse regularly
createTime := binary.LittleEndian.Uint16(b[i+14 : i+16])
createDate := binary.LittleEndian.Uint16(b[i+16 : i+18])
accessDate := binary.LittleEndian.Uint16(b[i+18 : i+20])
modifyTime := binary.LittleEndian.Uint16(b[i+22 : i+24])
modifyDate := binary.LittleEndian.Uint16(b[i+24 : i+26])
re := regexp.MustCompile("[ ]+$")
sfn := re.ReplaceAllString(string(b[i:i+8]), "")
extension := re.ReplaceAllString(string(b[i+8:i+11]), "")
isSubdirectory := b[i+11]&0x10 == 0x10
isArchiveDirty := b[i+11]&0x20 == 0x20
isVolumeLabel := b[i+11]&0x08 == 0x08
lowercaseShortname := b[i+12]&0x08 == 0x08
lowercaseExtension := b[i+12]&0x04 == 0x04
entry := directoryEntry{
filenameLong: lfn,
longFilenameSlots: calculateSlots(lfn),
filenameShort: sfn,
fileExtension: extension,
fileSize: binary.LittleEndian.Uint32(b[i+28 : i+32]),
clusterLocation: binary.LittleEndian.Uint32(append(b[i+26:i+28], b[i+20:i+22]...)),
createTime: dateTimeToTime(createDate, createTime),
modifyTime: dateTimeToTime(modifyDate, modifyTime),
accessTime: dateTimeToTime(accessDate, 0),
isSubdirectory: isSubdirectory,
isArchiveDirty: isArchiveDirty,
isVolumeLabel: isVolumeLabel,
lowercaseShortname: lowercaseShortname,
lowercaseExtension: lowercaseExtension,
}
lfn = ""
dirEntries = append(dirEntries, &entry)
}
return dirEntries, nil
}
func dateTimeToTime(d uint16, t uint16) time.Time {
year := int(d>>9) + 1980
month := time.Month((d >> 5) & 0x0f)
date := int(d & 0x1f)
second := int((t & 0x1f) * 2)
minute := int((t >> 5) & 0x3f)
hour := int(t >> 11)
return time.Date(year, month, date, hour, minute, second, 0, time.UTC)
}
func timeToDateTime(t time.Time) (uint16, uint16) {
year := t.Year()
month := int(t.Month())
date := t.Day()
second := t.Second()
minute := t.Minute()
hour := t.Hour()
retDate := (year-1980)<<9 + (month << 5) + date
retTime := hour<<11 + minute<<5 + (second / 2)
return uint16(retDate), uint16(retTime)
}
func longFilenameBytes(s, shortName, extension string) ([]byte, error) {
// we need the checksum of the short name
checksum, err := lfnChecksum(shortName, extension)
if err != nil {
return nil, fmt.Errorf("Could not calculate checksum for 8.3 filename: %v", err)
}
// should be multiple of exactly 32 bytes
slots := calculateSlots(s)
// convert our string into runes
r := []rune(s)
b2SlotLength := maxCharsLongFilename * 2
maxChars := slots * maxCharsLongFilename
b2 := make([]byte, 0, maxChars*2)
// convert the rune slice into a byte slice with 2 bytes per rune
// vfat long filenames support UCS-2 *only*
// so it is *very* important we do not try to parse them otherwise
for i := 0; i < maxChars; i++ {
// do we have a rune at this point?
var tmpb []byte
switch {
case i == len(r):
tmpb = []byte{0x00, 0x00}
case i > len(r):
tmpb = []byte{0xff, 0xff}
default:
val := uint16(r[i])
// little endian
tmpb = []byte{byte(val & 0x00ff), byte(val >> 8)}
}
b2 = append(b2, tmpb...)
}
// this makes our byte array
maxBytes := slots * bytesPerSlot
b := make([]byte, 0, maxBytes)
// now just place the bytes in the right places
for count := slots; count > 0; count-- {
// how far from the start of the byte slice?
offset := (count - 1) * b2SlotLength
// enter the right bytes in the right places
tmpb := make([]byte, 0, 32)
// first byte is our index
tmpb = append(tmpb, byte(count))
// next 10 bytes are 5 chars of data
tmpb = append(tmpb, b2[offset:offset+10]...)
// next is a single byte indicating LFN, followed by single byte 0x00
tmpb = append(tmpb, 0x0f, 0x00)
// next is checksum
tmpb = append(tmpb, checksum)
// next 12 bytes are 6 chars of data
tmpb = append(tmpb, b2[offset+10:offset+22]...)
// next are 2 bytes of 0x00
tmpb = append(tmpb, 0x00, 0x00)
// next are 4 bytes, last 2 chars of LFN
tmpb = append(tmpb, b2[offset+22:offset+26]...)
b = append(b, tmpb...)
}
// the first byte should have bit 6 set
b[0] = b[0] | 0x40
return b, nil
}
// longFilenameEntryFromBytes takes a single slice of 32 bytes and extracts the long filename component from it
func longFilenameEntryFromBytes(b []byte) (string, error) {
// should be exactly 32 bytes
bLen := len(b)
if bLen != 32 {
return "", fmt.Errorf("longFilenameEntryFromBytes only can parse byte of length 32, not %d", bLen)
}
b2 := make([]byte, 0, maxCharsLongFilename*2)
// strip out the unused ones
b2 = append(b2, b[1:11]...)
b2 = append(b2, b[14:26]...)
b2 = append(b2, b[28:32]...)
// parse the bytes of the long filename
// vfat long filenames support UCS-2 *only*
// so it is *very* important we do not try to parse them otherwise
r := make([]rune, 0, maxCharsLongFilename)
// now we can iterate
for i := 0; i < maxCharsLongFilename; i++ {
// little endian
val := uint16(b2[2*i+1])<<8 + uint16(b2[2*i])
// stop at all 0
if val == 0 {
break
}
r = append(r, rune(val))
}
return string(r), nil
}
// takes the short form of the name and checksums it
// the period between the 8 characters and the 3 character extension is dropped
// any unused chars are replaced by space ASCII 0x20
func lfnChecksum(name, extension string) (byte, error) {
nameBytes, err := stringToValidASCIIBytes(name)
if err != nil {
return 0x00, fmt.Errorf("Invalid shortname character in filename: %s", name)
}
extensionBytes, err := stringToValidASCIIBytes(extension)
if err != nil {
return 0x00, fmt.Errorf("Invalid shortname character in extension: %s", extension)
}
// now make sure we don't have too many - and fill in blanks
length := len(nameBytes)
if length > 8 {
return 0x00, fmt.Errorf("Short name for file is longer than allowed 8 bytes: %s", name)
}
for i := 8; i > length; i-- {
nameBytes = append(nameBytes, 0x20)
}
length = len(extensionBytes)
if length > 3 {
return 0x00, fmt.Errorf("Extension for file is longer than allowed 3 bytes: %s", extension)
}
for i := 3; i > length; i-- {
extensionBytes = append(extensionBytes, 0x20)
}
b := append(nameBytes, extensionBytes...)
// calculate the checksum
var sum byte = 0x00
for i := 11; i > 0; i-- {
sum = ((sum & 0x01) << 7) + (sum >> 1) + b[11-i]
}
return sum, nil
}
// convert a string to ascii bytes, but only accept valid 8.3 bytes
func stringToValidASCIIBytes(s string) ([]byte, error) {
b, err := stringToASCIIBytes(s)
if err != nil {
return b, err
}
// now make sure every byte is valid
for _, b2 := range b {
// only valid chars - 0-9, A-Z, _, ~
if (0x30 <= b2 && b2 <= 0x39) || (0x41 <= b2 && b2 <= 0x5a) || (b2 == 0x5f) || (b2 == 0x7e) {
continue
}
return nil, fmt.Errorf("Invalid 8.3 character")
}
return b, nil
}
// convert a string to a byte array, if all characters are valid ascii
func stringToASCIIBytes(s string) ([]byte, error) {
length := len(s)
b := make([]byte, length, length)
// convert the name into 11 bytes
r := []rune(s)
// take the first 8 characters
for i := 0; i < length; i++ {
val := int(r[i])
// we only can handle values less than max byte = 255
if val > 255 {
return nil, fmt.Errorf("Non-ASCII character in name: %s", s)
}
b[i] = byte(val)
}
return b, nil
}
// calculate how many vfat slots a long filename takes up
// this does NOT include the slot for the true DOS 8.3 entry
func calculateSlots(s string) int {
sLen := len(s)
slots := sLen / charsPerSlot
if sLen%charsPerSlot != 0 {
slots++
}
return slots
}
// convert LFN to short name
// returns shortName, extension, isLFN, isTruncated
// isLFN : was there an LFN that had to be converted
// isTruncated : was the shortname longer than 8 chars and had to be converted?
func convertLfnSfn(name string) (string, string, bool, bool) {
isLFN, isTruncated := false, false
// get last period in name
lastDot := strings.LastIndex(name, ".")
// now convert it
var shortName, extension, rawShortName, rawExtension string
rawShortName = name
// get the extension
if lastDot > -1 {
rawExtension = name[lastDot+1:]
// too long?
if len(rawExtension) > 3 {
rawExtension = rawExtension[0:3]
isLFN = true
}
// convert the extension
extension = uCaseValid(rawExtension)
}
if extension != rawExtension {
isLFN = true
}
// convert the short name
if lastDot > -1 {
rawShortName = name[:lastDot]
}
shortName = uCaseValid(rawShortName)
if rawShortName != shortName {
isLFN = true
}
// convert shortName to 8 chars
if len(shortName) > 8 {
isLFN = true
isTruncated = true
shortName = shortName[:6] + "~" + "1"
}
return shortName, extension, isLFN, isTruncated
}
// converts a string into upper-case with only valid characters
func uCaseValid(name string) string {
// easiest way to do this is to go through the name one char at a time
r := []rune(name)
r2 := make([]rune, 0, len(r))
for _, val := range r {
switch {
case (0x30 <= val && val <= 0x39) || (0x41 <= val && val <= 0x5a) || (val == 0x7e):
// naturally valid characters
r2 = append(r2, val)
case (0x61 <= val && val <= 0x7a):
// lower-case characters should be upper-cased
r2 = append(r2, val-32)
case val == ' ' || val == '.':
// remove spaces and periods
continue
default:
// replace the rest with _
r2 = append(r2, '_')
}
}
return string(r2)
}

View File

@@ -0,0 +1,5 @@
// Package fat32 provides utilities to interact with, manipulate and create a FAT32 filesystem on a block device or
// a disk image.
//
//
package fat32

View File

@@ -0,0 +1,55 @@
package fat32
import (
"encoding/binary"
"errors"
"fmt"
)
// Dos20BPB is a DOS 2.0 BIOS Parameter Block structure
type dos20BPB struct {
bytesPerSector SectorSize // BytesPerSector is bytes in each sector - always should be 512
sectorsPerCluster uint8 // SectorsPerCluster is number of sectors per cluster
reservedSectors uint16 // ReservedSectors is number of reserved sectors
fatCount uint8 // FatCount is total number of FAT tables in the filesystem
rootDirectoryEntries uint16 // RootDirectoryEntries is maximum number of FAT12 or FAT16 root directory entries; must be 0 for FAT32
totalSectors uint16 // TotalSectors is total number of sectors in the filesystem
mediaType uint8 // MediaType is the type of media, mostly unused
sectorsPerFat uint16 // SectorsPerFat is number of sectors per each table
}
// Dos20BPBFromBytes reads the DOS 2.0 BIOS Parameter Block from a slice of exactly 13 bytes
func dos20BPBFromBytes(b []byte) (*dos20BPB, error) {
if b == nil || len(b) != 13 {
return nil, errors.New("cannot read DOS 2.0 BPB from invalid byte slice, must be precisely 13 bytes ")
}
bpb := dos20BPB{}
// make sure we have a valid sector size
sectorSize := binary.LittleEndian.Uint16(b[0:2])
if sectorSize != uint16(SectorSize512) {
return nil, fmt.Errorf("Invalid sector size %d provided in DOS 2.0 BPB. Must be %d", sectorSize, SectorSize512)
}
bpb.bytesPerSector = SectorSize512
bpb.sectorsPerCluster = uint8(b[2])
bpb.reservedSectors = binary.LittleEndian.Uint16(b[3:5])
bpb.fatCount = uint8(b[5])
bpb.rootDirectoryEntries = binary.LittleEndian.Uint16(b[6:8])
bpb.totalSectors = binary.LittleEndian.Uint16(b[8:10])
bpb.mediaType = uint8(b[10])
bpb.sectorsPerFat = binary.LittleEndian.Uint16(b[11:13])
return &bpb, nil
}
// ToBytes returns the bytes for a DOS 2.0 BIOS Parameter Block, ready to be written to disk
func (bpb *dos20BPB) toBytes() ([]byte, error) {
b := make([]byte, 13, 13)
binary.LittleEndian.PutUint16(b[0:2], uint16(bpb.bytesPerSector))
b[2] = bpb.sectorsPerCluster
binary.LittleEndian.PutUint16(b[3:5], bpb.reservedSectors)
b[5] = bpb.fatCount
binary.LittleEndian.PutUint16(b[6:8], bpb.rootDirectoryEntries)
binary.LittleEndian.PutUint16(b[8:10], bpb.totalSectors)
b[10] = bpb.mediaType
binary.LittleEndian.PutUint16(b[11:13], bpb.sectorsPerFat)
return b, nil
}

View File

@@ -0,0 +1,63 @@
package fat32
import (
"encoding/binary"
"errors"
"fmt"
)
// dos331BPB is the DOS 3.31 BIOS Parameter Block
type dos331BPB struct {
dos20BPB *dos20BPB // Dos20BPB holds the embedded DOS 2.0 BPB
sectorsPerTrack uint16 // SectorsPerTrack is number of sectors per track. May be unused when LBA-only access is in place, but should store some value for safety.
heads uint16 // Heads is the number of heads. May be unused when LBA-only access is in place, but should store some value for safety. Maximum 255.
hiddenSectors uint32 // HiddenSectors is the number of hidden sectors preceding the partition that contains the FAT volume. Should be 0 on non-partitioned media.
totalSectors uint32 // TotalSectors is the total sectors if too many to fit into the DOS 2.0 BPB TotalSectors. In practice, if the DOS 2.0 TotalSectors is 0 and this is non-zero, use this one. For partitioned media, this and the DOS 2.0 BPB entry may be zero, and should retrieve information from each partition. For FAT32 systems, both also can be zero, even on non-partitioned, and use FileSystemType in DOS 7.1 EBPB as a 64-bit TotalSectors instead.
}
func (bpb *dos331BPB) equal(a *dos331BPB) bool {
if (bpb == nil && a != nil) || (a == nil && bpb != nil) {
return false
}
if bpb == nil && a == nil {
return true
}
return *bpb.dos20BPB == *a.dos20BPB &&
bpb.sectorsPerTrack == a.sectorsPerTrack &&
bpb.heads == a.heads &&
bpb.hiddenSectors == a.hiddenSectors &&
bpb.totalSectors == a.totalSectors
}
// dos331BPBFromBytes reads the DOS 3.31 BIOS Parameter Block from a slice of exactly 25 bytes
func dos331BPBFromBytes(b []byte) (*dos331BPB, error) {
if b == nil || len(b) != 25 {
return nil, errors.New("cannot read DOS 3.31 BPB from invalid byte slice, must be precisely 25 bytes ")
}
bpb := dos331BPB{}
dos20bpb, err := dos20BPBFromBytes(b[0:13])
if err != nil {
return nil, fmt.Errorf("Error reading embedded DOS 2.0 BPB: %v", err)
}
bpb.dos20BPB = dos20bpb
bpb.sectorsPerTrack = binary.LittleEndian.Uint16(b[13:15])
bpb.heads = binary.LittleEndian.Uint16(b[15:17])
bpb.hiddenSectors = binary.LittleEndian.Uint32(b[17:21])
bpb.totalSectors = binary.LittleEndian.Uint32(b[21:25])
return &bpb, nil
}
// ToBytes returns the bytes for a DOS 3.31 BIOS Parameter Block, ready to be written to disk
func (bpb *dos331BPB) toBytes() ([]byte, error) {
b := make([]byte, 25, 25)
dos20Bytes, err := bpb.dos20BPB.toBytes()
if err != nil {
return nil, fmt.Errorf("Error converting embedded DOS 2.0 BPB to bytes: %v", err)
}
copy(b[0:13], dos20Bytes)
binary.LittleEndian.PutUint16(b[13:15], bpb.sectorsPerTrack)
binary.LittleEndian.PutUint16(b[15:17], bpb.heads)
binary.LittleEndian.PutUint32(b[17:21], bpb.hiddenSectors)
binary.LittleEndian.PutUint32(b[21:25], bpb.totalSectors)
return b, nil
}

View File

@@ -0,0 +1,184 @@
package fat32
import (
"encoding/binary"
"errors"
"fmt"
"regexp"
)
const (
// ShortDos71EBPB indicates that a DOS 7.1 EBPB is of the short 60-byte format
shortDos71EBPB uint8 = 0x28
// LongDos71EBPB indicates that a DOS 7.1 EBPB is of the long 79-byte format
longDos71EBPB uint8 = 0x29
)
const (
// FileSystemTypeFAT32 is the fixed string representation for the FAT32 filesystem type
fileSystemTypeFAT32 string = "FAT32 "
)
// FatVersion is the version of the FAT filesystem
type fatVersion uint16
const (
// FatVersion0 represents version 0 of FAT, the only acceptable version
fatVersion0 fatVersion = 0
)
const (
// FirstRemovableDrive is first removable drive
firstRemovableDrive uint8 = 0x00
// FirstFixedDrive is first fixed drive
firstFixedDrive uint8 = 0x80
)
// Dos71EBPB is the DOS 7.1 Extended BIOS Parameter Block
type dos71EBPB struct {
dos331BPB *dos331BPB // Dos331BPB holds the embedded DOS 3.31 BIOS Parameter BLock
sectorsPerFat uint32 // SectorsPerFat is number of sectors per each table
mirrorFlags uint16 // MirrorFlags determines how FAT mirroring is done. If bit 7 is set, use bits 3-0 to determine active number of FATs (zero-based); if bit 7 is clear, use normal FAT mirroring
version fatVersion // Version is the version of the FAT, must be 0
rootDirectoryCluster uint32 // RootDirectoryCluster is the cluster containing the filesystem root directory, normally 2
fsInformationSector uint16 // FSInformationSector holds the sector which contains the primary DOS 7.1 Filesystem Information Cluster
backupFSInfoSector uint16 // BackupFSInfoSector holds the sector which contains the backup DOS 7.1 Filesystem Information Cluster
bootFileName [12]byte // BootFileName is reserved and should be all 0x00
driveNumber uint8 // DriveNumber is the code for the relative position and type of this drive in the system
reservedFlags uint8 // ReservedFlags are flags used by the operating system and/or BIOS for various purposes, e.g. Windows NT CHKDSK status, OS/2 desired drive letter, etc.
extendedBootSignature uint8 // ExtendedBootSignature contains the flag as to whether this is a short (60-byte) or long (79-byte) DOS 7.1 EBPB
volumeSerialNumber uint32 // VolumeSerialNumber usually generated by some form of date and time
volumeLabel string // VolumeLabel, an arbitrary 11-byte string
fileSystemType string // FileSystemType is the 8-byte string holding the name of the file system type
}
func (bpb *dos71EBPB) equal(a *dos71EBPB) bool {
if (bpb == nil && a != nil) || (a == nil && bpb != nil) {
return false
}
if bpb == nil && a == nil {
return true
}
return bpb.dos331BPB.equal(a.dos331BPB) &&
bpb.sectorsPerFat == a.sectorsPerFat &&
bpb.mirrorFlags == a.mirrorFlags &&
bpb.version == a.version &&
bpb.rootDirectoryCluster == a.rootDirectoryCluster &&
bpb.fsInformationSector == a.fsInformationSector &&
bpb.backupFSInfoSector == a.backupFSInfoSector &&
bpb.bootFileName == a.bootFileName &&
bpb.driveNumber == a.driveNumber &&
bpb.reservedFlags == a.reservedFlags &&
bpb.extendedBootSignature == a.extendedBootSignature &&
bpb.volumeSerialNumber == a.volumeSerialNumber &&
bpb.volumeLabel == a.volumeLabel &&
bpb.fileSystemType == a.fileSystemType
}
// Dos71EBPBFromBytes reads the FAT32 Extended BIOS Parameter Block from a slice of bytes
// these bytes are assumed to start at the beginning of the BPB, but can stretech for any length
// this is because the calling function should know where the EBPB starts, but not necessarily where it ends
func dos71EBPBFromBytes(b []byte) (*dos71EBPB, int, error) {
if b == nil || (len(b) != 60 && len(b) != 79) {
return nil, 0, errors.New("cannot read DOS 7.1 EBPB from invalid byte slice, must be precisely 60 or 79 bytes ")
}
bpb := dos71EBPB{}
size := 0
// extract the embedded DOS 3.31 BPB
dos331bpb, err := dos331BPBFromBytes(b[0:25])
if err != nil {
return nil, 0, fmt.Errorf("Could not read embedded DOS 3.31 BPB: %v", err)
}
bpb.dos331BPB = dos331bpb
bpb.sectorsPerFat = binary.LittleEndian.Uint32(b[25:29])
bpb.mirrorFlags = binary.LittleEndian.Uint16(b[29:31])
version := binary.LittleEndian.Uint16(b[31:33])
if version != uint16(fatVersion0) {
return nil, size, fmt.Errorf("Invalid FAT32 version found: %v", version)
}
bpb.version = fatVersion0
bpb.rootDirectoryCluster = binary.LittleEndian.Uint32(b[33:37])
bpb.fsInformationSector = binary.LittleEndian.Uint16(b[37:39])
bpb.backupFSInfoSector = binary.LittleEndian.Uint16(b[39:41])
bootFileName := b[41:53]
copy(bpb.bootFileName[:], bootFileName)
bpb.driveNumber = uint8(b[53])
bpb.reservedFlags = uint8(b[54])
extendedSignature := uint8(b[55])
bpb.extendedBootSignature = extendedSignature
// is this a longer or shorter one
bpb.volumeSerialNumber = binary.BigEndian.Uint32(b[56:60])
switch extendedSignature {
case shortDos71EBPB:
size = 60
case longDos71EBPB:
size = 79
// remove padding from each
re := regexp.MustCompile("[ ]+$")
bpb.volumeLabel = re.ReplaceAllString(string(b[60:71]), "")
bpb.fileSystemType = re.ReplaceAllString(string(b[71:79]), "")
default:
return nil, size, fmt.Errorf("Unknown DOS 7.1 EBPB Signature: %v", extendedSignature)
}
return &bpb, size, nil
}
// ToBytes returns the Extended BIOS Parameter Block in a slice of bytes directly ready to
// write to disk
func (bpb *dos71EBPB) toBytes() ([]byte, error) {
var b []byte
// how many bytes is it? for extended, add the extended-specific stuff
switch bpb.extendedBootSignature {
case shortDos71EBPB:
b = make([]byte, 60, 60)
case longDos71EBPB:
b = make([]byte, 79, 79)
// do we have a valid volume label?
label := bpb.volumeLabel
if len(label) > 11 {
return nil, fmt.Errorf("Invalid volume label: too long at %d characters, maximum is %d", len(label), 11)
}
labelR := []rune(label)
if len(label) != len(labelR) {
return nil, fmt.Errorf("Invalid volume label: non-ascii characters")
}
// pad with 0x20 = " "
copy(b[60:71], []byte(fmt.Sprintf("%-11s", label)))
// do we have a valid filesystem type?
fstype := bpb.fileSystemType
if len(fstype) > 8 {
return nil, fmt.Errorf("Invalid filesystem type: too long at %d characters, maximum is %d", len(fstype), 8)
}
fstypeR := []rune(fstype)
if len(fstype) != len(fstypeR) {
return nil, fmt.Errorf("Invalid filesystem type: non-ascii characters")
}
// pad with 0x20 = " "
copy(b[71:79], []byte(fmt.Sprintf("%-11s", fstype)))
default:
return nil, fmt.Errorf("Unknown DOS 7.1 EBPB Signature: %v", bpb.extendedBootSignature)
}
// fill in the common parts
dos331Bytes, err := bpb.dos331BPB.toBytes()
if err != nil {
return nil, fmt.Errorf("Error converting embedded DOS 3.31 BPB to bytes: %v", err)
}
copy(b[0:25], dos331Bytes)
binary.LittleEndian.PutUint32(b[25:29], bpb.sectorsPerFat)
binary.LittleEndian.PutUint16(b[29:31], bpb.mirrorFlags)
binary.LittleEndian.PutUint16(b[31:33], uint16(bpb.version))
binary.LittleEndian.PutUint32(b[33:37], bpb.rootDirectoryCluster)
binary.LittleEndian.PutUint16(b[37:39], bpb.fsInformationSector)
binary.LittleEndian.PutUint16(b[39:41], bpb.backupFSInfoSector)
copy(b[41:53], bpb.bootFileName[:])
b[53] = bpb.driveNumber
b[54] = bpb.reservedFlags
b[55] = bpb.extendedBootSignature
binary.BigEndian.PutUint32(b[56:60], bpb.volumeSerialNumber)
return b, nil
}

View File

@@ -0,0 +1,850 @@
package fat32
import (
"errors"
"fmt"
"os"
"path"
"sort"
"strings"
"time"
"github.com/diskfs/go-diskfs/filesystem"
"github.com/diskfs/go-diskfs/util"
)
// MsdosMediaType is the (mostly unused) media type. However, we provide and export the known constants for it.
type MsdosMediaType uint8
const (
// Media8InchDrDos for single-sided 250KB DR-DOS disks
Media8InchDrDos MsdosMediaType = 0xe5
// Media525InchTandy for 5.25 inch floppy disks for Tandy
Media525InchTandy MsdosMediaType = 0xed
// MediaCustomPartitionsDrDos for non-standard custom DR-DOS partitions utilizing non-standard BPB formats
MediaCustomPartitionsDrDos MsdosMediaType = 0xee
// MediaCustomSuperFloppyDrDos for non-standard custom superfloppy disks for DR-DOS
MediaCustomSuperFloppyDrDos MsdosMediaType = 0xef
// Media35Inch for standard 1.44MB and 2.88MB 3.5 inch floppy disks
Media35Inch MsdosMediaType = 0xf0
// MediaDoubleDensityAltos for double-density floppy disks for Altos only
MediaDoubleDensityAltos MsdosMediaType = 0xf4
// MediaFixedDiskAltos for fixed disk 1.95MB for Altos only
MediaFixedDiskAltos MsdosMediaType = 0xf5
// MediaFixedDisk for standard fixed disks - can be used for any partitioned fixed or removable media where the geometry is defined in the BPB
MediaFixedDisk MsdosMediaType = 0xf8
)
// SectorSize indicates what the sector size in bytes is
type SectorSize uint16
const (
// SectorSize512 is a sector size of 512 bytes, used as the logical size for all FAT filesystems
SectorSize512 SectorSize = 512
maxClusterSize int = 128
minClusterSize int = 65529
bytesPerSlot int = 32
maxCharsLongFilename int = 13
)
// FileSystem implememnts the FileSystem interface
type FileSystem struct {
bootSector msDosBootSector
fsis FSInformationSector
table table
dataStart uint32
bytesPerCluster int
size int64
start int64
file util.File
}
// Equal compare if two filesystems are equal
func (fs *FileSystem) Equal(a *FileSystem) bool {
localMatch := fs.file == a.file && fs.dataStart == a.dataStart && fs.bytesPerCluster == a.bytesPerCluster
tableMatch := fs.table.equal(&a.table)
bsMatch := fs.bootSector.equal(&a.bootSector)
fsisMatch := fs.fsis == a.fsis
return localMatch && tableMatch && bsMatch && fsisMatch
}
// Create creates a FAT32 filesystem in a given file or device
//
// requires the util.File where to create the filesystem, size is the size of the filesystem in bytes,
// start is how far in bytes from the beginning of the util.File to create the filesystem,
// and blocksize is is the logical blocksize to use for creating the filesystem
//
// note that you are *not* required to create the filesystem on the entire disk. You could have a disk of size
// 20GB, and create a small filesystem of size 50MB that begins 2GB into the disk.
// This is extremely useful for creating filesystems on disk partitions.
//
// Note, however, that it is much easier to do this using the higher-level APIs at github.com/diskfs/go-diskfs
// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors)
// where a partition starts and ends.
//
// If the provided blocksize is 0, it will use the default of 512 bytes. If it is any number other than 0
// or 512, it will return an error.
func Create(f util.File, size int64, start int64, blocksize int64, volumeLabel string) (*FileSystem, error) {
if volumeLabel == "" {
volumeLabel = "NO NAME"
}
// ensure the volumeLabel is proper sized
volumeLabel = fmt.Sprintf("%-11.11s", volumeLabel)
// blocksize must be <=0 or exactly SectorSize512 or error
if blocksize != int64(SectorSize512) && blocksize > 0 {
return nil, fmt.Errorf("blocksize for FAT32 must be either 512 bytes or 0, not %d", blocksize)
}
if size > Fat32MaxSize {
return nil, fmt.Errorf("requested size is larger than maximum allowed FAT32, requested %d, maximum %d", size, Fat32MaxSize)
}
if size < blocksize*4 {
return nil, fmt.Errorf("requested size is smaller than minimum allowed FAT32, requested %d minimum %d", size, blocksize*4)
}
// FAT filesystems use time-of-day of creation as a volume ID
now := time.Now()
// because we like the fudges other people did for uniqueness
volid := uint32(now.Unix()<<20 | (now.UnixNano() / 1000000))
fsisPrimarySector := uint16(1)
fsisBackupSector := uint16(6)
/*
size calculations
we have the total size of the disk from `size uint64`
we have the blocksize fixed at SectorSize512
so we can calculate diskSectors = size/512
we know the number of reserved sectors is 32
so the number of non-reserved sectors: data + FAT = diskSectos - 32
now we need to figure out cluster size. The allowed number of:
sectors per cluster: 1, 2, 4, 8, 16, 32, 64, 128
bytes per cluster: 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536
since FAT32 uses the least significant 28 bits of a 4-byte entry (uint32) as pointers to a cluster,
the maximum cluster pointer address of a FAT32 entry is 268,435,456. However, several
entries are reserved, notably 0x0FFFFFF7-0x0FFFFFFF flag bad cluster to end of file,
0x0000000 flags an empty cluster, and 0x0000001 is not used, so we only have
a potential 268,435,444 pointer entries
the maximum size of a disk for FAT32 is 16 sectors per cluster = 8KB/cluster * 268435444 = ~2TB
Follow Microsoft's `format` commad as per http://www.win.tue.nl/~aeb/linux/fs/fat/fatgen103.pdf p. 20.
Thanks to github.com/dosfstools/dosfstools for the link
Filesystem size / cluster size
<= 260M / 1 sector = 512 bytes
<= 8G / 8 sector = 4096 bytes
<= 16G / 32 sector = 16384 bytes
<= 32G / 64 sector = 32768 bytes
> 32G / 128 sector = 65536 bytes
*/
var sectorsPerCluster uint8
switch {
case size <= 260*MB:
sectorsPerCluster = 1
case size <= 8*GB:
sectorsPerCluster = 8
case size <= 16*GB:
sectorsPerCluster = 32
case size <= 32*GB:
sectorsPerCluster = 64
case size <= Fat32MaxSize:
sectorsPerCluster = 128
}
// stick with uint32 and round down
totalSectors := uint32(size / int64(SectorSize512))
reservedSectors := uint16(32)
dataSectors := totalSectors - uint32(reservedSectors)
totalClusters := dataSectors / uint32(sectorsPerCluster)
// FAT uses 4 bytes per cluster pointer
// so a 512 byte sector can store 512/4 = 128 pointer entries
// therefore sectors per FAT = totalClusters / 128
sectorsPerFat := uint16(totalClusters / 128)
// what is our FAT ID / Media Type?
mediaType := uint8(MediaFixedDisk)
fatIDbase := uint32(0x0f << 24)
fatID := fatIDbase + 0xffff00 + uint32(mediaType)
// we need an Extended BIOS Parameter Block
dos20bpb := dos20BPB{
sectorsPerCluster: sectorsPerCluster,
reservedSectors: reservedSectors,
fatCount: 2,
totalSectors: 0,
mediaType: mediaType,
bytesPerSector: SectorSize512,
rootDirectoryEntries: 0,
sectorsPerFat: 0,
}
// some fake logic for heads, since everything is LBA access anyways
dos331bpb := dos331BPB{
dos20BPB: &dos20bpb,
totalSectors: totalSectors,
heads: 1,
sectorsPerTrack: 1,
hiddenSectors: 0,
}
ebpb := dos71EBPB{
dos331BPB: &dos331bpb,
version: fatVersion0,
rootDirectoryCluster: 2,
fsInformationSector: fsisPrimarySector,
backupFSInfoSector: fsisBackupSector,
bootFileName: [12]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
extendedBootSignature: longDos71EBPB,
volumeSerialNumber: volid,
volumeLabel: fmt.Sprintf("%-11.11s", volumeLabel), // "NO NAME "
fileSystemType: fileSystemTypeFAT32,
mirrorFlags: 0,
reservedFlags: 0,
driveNumber: 128,
sectorsPerFat: uint32(sectorsPerFat),
}
// we need a new boot sector
bs := msDosBootSector{
oemName: "godiskfs",
jumpInstruction: [3]byte{0xeb, 0x58, 0x90},
bootCode: []byte{},
biosParameterBlock: &ebpb,
}
/*
err := bs.write(f)
if err != nil {
return nil, fmt.Errorf("Error writing MS-DOS Boot Sector: %v", err)
}
*/
b, err := bs.toBytes()
if err != nil {
return nil, fmt.Errorf("Error converting MS-DOS Boot Sector to bytes: %v", err)
}
// write to the file
count, err := f.WriteAt(b, 0+int64(start))
if err != nil {
return nil, fmt.Errorf("Error writing MS-DOS Boot Sector to disk: %v", err)
}
if count != int(SectorSize512) {
return nil, fmt.Errorf("Wrote %d bytes of MS-DOS Boot Sector to disk instead of expected %d", count, SectorSize512)
}
// boot sector is in place
// create and allocate FAT32 FSInformationSector
fsis := FSInformationSector{
lastAllocatedCluster: 0xffffffff,
freeDataClustersCount: 0xffffffff,
}
fsisBytes, err := fsis.toBytes()
if err != nil {
return nil, fmt.Errorf("Could not create a valid byte stream for a FAT32 Filesystem Information Sector: %v", err)
}
fsisPrimary := int64(fsisPrimarySector * uint16(SectorSize512))
fsisBackup := int64(fsisBackupSector * uint16(SectorSize512))
f.WriteAt(fsisBytes, fsisPrimary+int64(start))
f.WriteAt(fsisBytes, fsisBackup+int64(start))
// write FAT tables
eocMarker := uint32(0x0fffffff)
fatPrimaryStart := reservedSectors * uint16(SectorSize512)
fatSize := uint32(sectorsPerFat) * uint32(SectorSize512)
fatSecondaryStart := uint64(fatPrimaryStart) + uint64(fatSize)
maxCluster := fatSize / 4
rootDirCluster := uint32(2)
fat := table{
fatID: fatID,
eocMarker: eocMarker,
size: fatSize,
rootDirCluster: rootDirCluster,
clusters: map[uint32]uint32{
// when we start, there is just one directory with a single cluster
rootDirCluster: eocMarker,
},
maxCluster: maxCluster,
}
fatBytes, err := fat.bytes()
if err != nil {
return nil, fmt.Errorf("Error converting FAT32 table into bytes: %v", err)
}
_, err = f.WriteAt(fatBytes, int64(fatPrimaryStart)+int64(start))
if err != nil {
return nil, fmt.Errorf("Unable to write primary FAT table: %v", err)
}
_, err = f.WriteAt(fatBytes, int64(fatSecondaryStart)+int64(start))
if err != nil {
return nil, fmt.Errorf("Unable to write backup FAT table: %v", err)
}
// where does our data start?
dataStart := uint32(fatSecondaryStart) + fatSize
// create root directory
// there is nothing in there
fs := &FileSystem{
bootSector: bs,
fsis: fsis,
table: fat,
dataStart: dataStart,
bytesPerCluster: int(sectorsPerCluster) * int(SectorSize512),
start: start,
size: size,
file: f,
}
// be sure to zero out the root cluster, so we do not pick up phantom
// entries.
clusterStart := fs.start + int64(fs.dataStart)
// length of cluster in bytes
tmpb := make([]byte, fs.bytesPerCluster)
// zero out the root directory cluster
written, err := f.WriteAt(tmpb, clusterStart)
if err != nil {
return nil, fmt.Errorf("failed to zero out root directory: %v", err)
}
if written != len(tmpb) || written != fs.bytesPerCluster {
return nil, fmt.Errorf("incomplete zero out of root directory, wrote %d bytes instead of expected %d for cluster size %d", written, len(b), fs.bytesPerCluster)
}
// create a volumelabel entry in the root directory
rootDir := &Directory{
directoryEntry: directoryEntry{
clusterLocation: uint32(fs.table.rootDirCluster),
isSubdirectory: true,
filesystem: fs,
},
}
_, err = fs.mkLabel(rootDir, volumeLabel)
if err != nil {
return nil, fmt.Errorf("failed to create volume label root directory entry '%s': %v", volumeLabel, err)
}
// write the root directory entries to disk
err = fs.writeDirectoryEntries(rootDir)
if err != nil {
return nil, fmt.Errorf("Error writing root directory to disk: %v", err)
}
return fs, nil
}
// Read reads a filesystem from a given disk.
//
// requires the util.File where to read the filesystem, size is the size of the filesystem in bytes,
// start is how far in bytes from the beginning of the util.File the filesystem is expected to begin,
// and blocksize is is the logical blocksize to use for creating the filesystem
//
// note that you are *not* required to read a filesystem on the entire disk. You could have a disk of size
// 20GB, and a small filesystem of size 50MB that begins 2GB into the disk.
// This is extremely useful for working with filesystems on disk partitions.
//
// Note, however, that it is much easier to do this using the higher-level APIs at github.com/diskfs/go-diskfs
// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors)
// where a partition starts and ends.
//
// If the provided blocksize is 0, it will use the default of 512 bytes. If it is any number other than 0
// or 512, it will return an error.
func Read(file util.File, size int64, start int64, blocksize int64) (*FileSystem, error) {
// blocksize must be <=0 or exactly SectorSize512 or error
if blocksize != int64(SectorSize512) && blocksize > 0 {
return nil, fmt.Errorf("blocksize for FAT32 must be either 512 bytes or 0, not %d", blocksize)
}
if size > Fat32MaxSize {
return nil, fmt.Errorf("requested size is larger than maximum allowed FAT32 size %d", Fat32MaxSize)
}
if size < blocksize*4 {
return nil, fmt.Errorf("requested size is smaller than minimum allowed FAT32 size %d", blocksize*4)
}
// load the information from the disk
// read first 512 bytes from the file
bsb := make([]byte, SectorSize512, SectorSize512)
n, err := file.ReadAt(bsb, start)
if err != nil {
return nil, fmt.Errorf("Could not read bytes from file: %v", err)
}
if uint16(n) < uint16(SectorSize512) {
return nil, fmt.Errorf("Only could read %d bytes from file", n)
}
bs, err := msDosBootSectorFromBytes(bsb)
if err != nil {
return nil, fmt.Errorf("Error reading MS-DOS Boot Sector: %v", err)
}
sectorsPerFat := bs.biosParameterBlock.sectorsPerFat
fatSize := uint32(sectorsPerFat) * uint32(SectorSize512)
reservedSectors := bs.biosParameterBlock.dos331BPB.dos20BPB.reservedSectors
sectorsPerCluster := bs.biosParameterBlock.dos331BPB.dos20BPB.sectorsPerCluster
fatPrimaryStart := uint64(reservedSectors) * uint64(SectorSize512)
fatSecondaryStart := uint64(fatPrimaryStart) + uint64(fatSize)
fsisBytes := make([]byte, 512, 512)
read, err := file.ReadAt(fsisBytes, int64(bs.biosParameterBlock.fsInformationSector)*int64(blocksize)+int64(start))
if err != nil {
return nil, fmt.Errorf("Unable to read bytes for FSInformationSector: %v", err)
}
if read != 512 {
return nil, fmt.Errorf("Read %d bytes instead of expected %d for FS Information Sector", read, 512)
}
fsis, err := fsInformationSectorFromBytes(fsisBytes)
if err != nil {
return nil, fmt.Errorf("Error reading FileSystem Information Sector: %v", err)
}
b := make([]byte, fatSize, fatSize)
file.ReadAt(b, int64(fatPrimaryStart)+int64(start))
fat, err := tableFromBytes(b)
if err != nil {
return nil, fmt.Errorf("Error reading primary FAT32 Table: %v", err)
}
file.ReadAt(b, int64(fatSecondaryStart)+int64(start))
_, err = tableFromBytes(b)
if err != nil {
return nil, fmt.Errorf("Error reading backup FAT32 Table: %v", err)
}
dataStart := uint32(fatSecondaryStart) + fat.size
return &FileSystem{
bootSector: *bs,
fsis: *fsis,
table: *fat,
dataStart: dataStart,
bytesPerCluster: int(sectorsPerCluster) * int(SectorSize512),
start: start,
size: size,
file: file,
}, nil
}
// Type returns the type code for the filesystem. Always returns filesystem.TypeFat32
func (fs *FileSystem) Type() filesystem.Type {
return filesystem.TypeFat32
}
// Mkdir make a directory at the given path. It is equivalent to `mkdir -p`, i.e. idempotent, in that:
//
// * It will make the entire tree path if it does not exist
// * It will not return an error if the path already exists
func (fs *FileSystem) Mkdir(p string) error {
_, _, err := fs.readDirWithMkdir(p, true)
// we are not interesting in returning the entries
return err
}
// ReadDir return the contents of a given directory in a given filesystem.
//
// Returns a slice of os.FileInfo with all of the entries in the directory.
//
// Will return an error if the directory does not exist or is a regular file and not a directory
func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
_, entries, err := fs.readDirWithMkdir(p, false)
if err != nil {
return nil, fmt.Errorf("Error reading directory %s: %v", p, err)
}
// once we have made it here, looping is done. We have found the final entry
// we need to return all of the file info
count := len(entries)
ret := make([]os.FileInfo, count, count)
for i, e := range entries {
shortName := e.filenameShort
if e.lowercaseShortname {
shortName = strings.ToLower(shortName)
}
fileExtension := e.fileExtension
if e.lowercaseExtension {
shortName = strings.ToLower(fileExtension)
}
if fileExtension != "" {
shortName = fmt.Sprintf("%s.%s", shortName, fileExtension)
}
ret[i] = FileInfo{
modTime: e.modifyTime,
name: e.filenameLong,
shortName: shortName,
size: int64(e.fileSize),
isDir: e.isSubdirectory,
}
}
return ret, nil
}
// OpenFile returns an io.ReadWriter from which you can read the contents of a file
// or write contents to the file
//
// accepts normal os.OpenFile flags
//
// returns an error if the file does not exist
func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
// get the path
dir := path.Dir(p)
filename := path.Base(p)
// if the dir == filename, then it is just /
if dir == filename {
return nil, fmt.Errorf("Cannot open directory %s as file", p)
}
// get the directory entries
parentDir, entries, err := fs.readDirWithMkdir(dir, false)
if err != nil {
return nil, fmt.Errorf("Could not read directory entries for %s", dir)
}
// we now know that the directory exists, see if the file exists
var targetEntry *directoryEntry
for _, e := range entries {
shortName := e.filenameShort
if e.fileExtension != "" {
shortName += "." + e.fileExtension
}
if e.filenameLong != filename && shortName != filename {
continue
}
// cannot do anything with directories
if e.isSubdirectory {
return nil, fmt.Errorf("Cannot open directory %s as file", p)
}
// if we got this far, we have found the file
targetEntry = e
}
// see if the file exists
// if the file does not exist, and is not opened for os.O_CREATE, return an error
if targetEntry == nil {
if flag&os.O_CREATE == 0 {
return nil, fmt.Errorf("Target file %s does not exist and was not asked to create", p)
}
// else create it
targetEntry, err = fs.mkFile(parentDir, filename)
if err != nil {
return nil, fmt.Errorf("failed to create file %s: %v", p, err)
}
// write the directory entries to disk
err = fs.writeDirectoryEntries(parentDir)
if err != nil {
return nil, fmt.Errorf("Error writing directory file %s to disk: %v", p, err)
}
}
offset := int64(0)
if flag&os.O_APPEND == os.O_APPEND {
offset = int64(targetEntry.fileSize)
}
return &File{
directoryEntry: targetEntry,
isReadWrite: flag&os.O_RDWR != 0,
isAppend: flag&os.O_APPEND != 0,
offset: offset,
filesystem: fs,
parent: parentDir,
}, nil
}
// Label get the label of the filesystem
func (fs *FileSystem) Label() string {
// be sane about everything existing
bpb := fs.bootSector.biosParameterBlock
if bpb == nil {
return ""
}
return bpb.volumeLabel
}
// read directory entries for a given cluster
func (fs *FileSystem) getClusterList(firstCluster uint32) ([]uint32, error) {
// first, get the chain of clusters
complete := false
cluster := firstCluster
clusters := fs.table.clusters
// do we even have a valid cluster?
if _, ok := clusters[cluster]; !ok {
return nil, fmt.Errorf("Invalid start cluster: %d", cluster)
}
clusterList := make([]uint32, 0, 5)
for !complete {
// save the current cluster
clusterList = append(clusterList, cluster)
// get the next cluster
newCluster := clusters[cluster]
// if it is EOC, we are done
switch {
case fs.table.isEoc(newCluster):
complete = true
case cluster <= 2:
return nil, fmt.Errorf("Invalid cluster chain at %d", cluster)
}
cluster = newCluster
}
return clusterList, nil
}
// read directory entries for a given cluster
func (fs *FileSystem) readDirectory(dir *Directory) ([]*directoryEntry, error) {
clusterList, err := fs.getClusterList(dir.clusterLocation)
if err != nil {
return nil, fmt.Errorf("Could not read cluster list: %v", err)
}
// read the data from all of the cluster entries in the list
byteCount := len(clusterList) * fs.bytesPerCluster
b := make([]byte, 0, byteCount)
for _, cluster := range clusterList {
// bytes where the cluster starts
clusterStart := fs.start + int64(fs.dataStart) + int64(cluster-2)*int64(fs.bytesPerCluster)
// length of cluster in bytes
tmpb := make([]byte, fs.bytesPerCluster, fs.bytesPerCluster)
// read the entire cluster
fs.file.ReadAt(tmpb, clusterStart)
b = append(b, tmpb...)
}
// get the directory
err = dir.entriesFromBytes(b, fs)
if err != nil {
return nil, err
}
return dir.entries, nil
}
// make a subdirectory
func (fs *FileSystem) mkSubdir(parent *Directory, name string) (*directoryEntry, error) {
// get a cluster chain for the file
clusters, err := fs.allocateSpace(1, 0)
if err != nil {
return nil, fmt.Errorf("Could not allocate disk space for file %s: %v", name, err)
}
// create a directory entry for the file
return parent.createEntry(name, clusters[0], true)
}
func (fs *FileSystem) writeDirectoryEntries(dir *Directory) error {
// we need to save the entries of theparent
b, err := dir.entriesToBytes(fs.bytesPerCluster)
if err != nil {
return fmt.Errorf("Could not create a valid byte stream for a FAT32 Entries: %v", err)
}
// now have to expand with zeros to the a multiple of cluster lengths
// how many clusters do we need, how many do we have?
clusterList, err := fs.getClusterList(dir.clusterLocation)
if err != nil {
return fmt.Errorf("Unable to get clusters for directory: %v", err)
}
extraClusters := len(b)/(int(fs.bootSector.biosParameterBlock.dos331BPB.dos20BPB.sectorsPerCluster)*fs.bytesPerCluster) - len(clusterList)
if extraClusters > 0 {
clusters, err := fs.allocateSpace(uint64(extraClusters), clusterList[len(clusterList)-1])
if err != nil {
return fmt.Errorf("Unable to allocate space for directory entries: %v", err)
}
clusterList = clusters
}
// now write everything out to the cluster list
// read the data from all of the cluster entries in the list
for i, cluster := range clusterList {
// bytes where the cluster starts
clusterStart := fs.start + int64(fs.dataStart) + int64(cluster-2)*int64(fs.bytesPerCluster)
bStart := i * fs.bytesPerCluster
written, err := fs.file.WriteAt(b[bStart:bStart+fs.bytesPerCluster], clusterStart)
if err != nil {
return fmt.Errorf("Error writing directory entries: %v", err)
}
if written != fs.bytesPerCluster {
return fmt.Errorf("Wrote %d bytes to cluster %d instead of expected %d", written, cluster, fs.bytesPerCluster)
}
}
return nil
}
// mkFile make a file in a directory
func (fs *FileSystem) mkFile(parent *Directory, name string) (*directoryEntry, error) {
// get a cluster chain for the file
clusters, err := fs.allocateSpace(1, 0)
if err != nil {
return nil, fmt.Errorf("Could not allocate disk space for directory %s: %v", name, err)
}
// create a directory entry for the file
return parent.createEntry(name, clusters[0], false)
}
// mkLabel make a volume label in a directory
func (fs *FileSystem) mkLabel(parent *Directory, name string) (*directoryEntry, error) {
// create a directory entry for the file
return parent.createVolumeLabel(name)
}
// readDirWithMkdir - walks down a directory tree to the last entry
// if it does not exist, it may or may not make it
func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*directoryEntry, error) {
paths, err := splitPath(p)
if err != nil {
return nil, nil, err
}
// walk down the directory tree until all paths have been walked or we cannot find something
// start with the root directory
var entries []*directoryEntry
currentDir := &Directory{
directoryEntry: directoryEntry{
clusterLocation: uint32(fs.table.rootDirCluster),
isSubdirectory: true,
filesystem: fs,
},
}
entries, err = fs.readDirectory(currentDir)
if err != nil {
return nil, nil, fmt.Errorf("Failed to read directory %s", "/")
}
for i, subp := range paths {
// do we have an entry whose name is the same as this name?
found := false
for _, e := range entries {
if e.filenameLong != subp && e.filenameShort != subp && (!e.lowercaseShortname || (e.lowercaseShortname && strings.ToLower(e.filenameShort) != subp)) {
continue
}
if !e.isSubdirectory {
return nil, nil, fmt.Errorf("Cannot create directory at %s since it is a file", "/"+strings.Join(paths[0:i+1], "/"))
}
// the filename matches, and it is a subdirectory, so we can break after saving the cluster
found = true
currentDir = &Directory{
directoryEntry: *e,
}
break
}
// if not, either make it, retrieve its cluster and entries, and loop;
// or error out
if !found {
if doMake {
var subdirEntry *directoryEntry
subdirEntry, err = fs.mkSubdir(currentDir, subp)
if err != nil {
return nil, nil, fmt.Errorf("Failed to create subdirectory %s", "/"+strings.Join(paths[0:i+1], "/"))
}
// make a basic entry for the new subdir
dir := &Directory{
directoryEntry: directoryEntry{clusterLocation: subdirEntry.clusterLocation},
entries: []*directoryEntry{
{filenameShort: ".", isSubdirectory: true, clusterLocation: subdirEntry.clusterLocation},
{filenameShort: "..", isSubdirectory: true, clusterLocation: currentDir.clusterLocation},
},
}
// write the new directory entries to disk
err = fs.writeDirectoryEntries(dir)
if err != nil {
return nil, nil, fmt.Errorf("Error writing new directory entries to disk: %v", err)
}
// write the parent directory entries to disk
err = fs.writeDirectoryEntries(currentDir)
if err != nil {
return nil, nil, fmt.Errorf("Error writing directory entries to disk: %v", err)
}
// save where we are to search next
currentDir = &Directory{
directoryEntry: *subdirEntry,
}
} else {
return nil, nil, fmt.Errorf("Path %s not found", "/"+strings.Join(paths[0:i+1], "/"))
}
}
// get all of the entries in this directory
entries, err = fs.readDirectory(currentDir)
if err != nil {
return nil, nil, fmt.Errorf("Failed to read directory %s", "/"+strings.Join(paths[0:i+1], "/"))
}
}
// once we have made it here, looping is done; we have found the final entry
return currentDir, entries, nil
}
// allocateSpace ensure that a cluster chain exists to handle a file of a given size.
// arguments are file size in bytes and starting cluster of the chain
// if starting is 0, then we are not (re)sizing an existing chain but creating a new one
// returns the indexes of clusters to be used in order
func (fs *FileSystem) allocateSpace(size uint64, previous uint32) ([]uint32, error) {
var (
clusters []uint32
err error
)
// 1- calculate how many clusters needed
// 2- see how many clusters already are allocated
// 3- if needed, allocate new clusters and extend the chain in the FAT table
keys := make([]uint32, 0, 20)
allocated := make([]uint32, 0, 20)
// what is the total count of clusters needed?
count := int(size / uint64(fs.bytesPerCluster))
if size%uint64(fs.bytesPerCluster) > 0 {
count++
}
extraClusterCount := count
clusters = make([]uint32, 0, 20)
// are we extending an existing chain, or creating a new one?
if previous >= 2 {
clusters, err = fs.getClusterList(previous)
if err != nil {
return nil, fmt.Errorf("Unable to get cluster list: %v", err)
}
originalClusterCount := len(clusters)
extraClusterCount = count - originalClusterCount
}
// what id we do not need to allocate any?
if extraClusterCount < 1 {
return clusters, nil
}
// get a list of allocated clusters, so we can know which ones are unallocated and therefore allocatable
allClusters := fs.table.clusters
maxCluster := fs.table.maxCluster
for k := range allClusters {
keys = append(keys, k)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for i := uint32(2); i < maxCluster && len(allocated) < extraClusterCount; i++ {
if _, ok := allClusters[i]; !ok {
// these become the same at this point
allocated = append(allocated, i)
}
}
// did we allocate them all?
if len(allocated) < extraClusterCount {
return nil, errors.New("No space left on device")
}
// mark last allocated one as EOC
lastAlloc := len(allocated) - 1
// extend the chain and fill them in
if previous > 0 {
allClusters[previous] = allocated[0]
}
for i := 0; i < lastAlloc; i++ {
allClusters[allocated[i]] = allocated[i+1]
}
allClusters[allocated[lastAlloc]] = fs.table.eocMarker
// update the FSIS
fs.fsis.lastAllocatedCluster = allocated[len(allocated)-1]
// write them all
b, err := fs.table.bytes()
if err != nil {
return nil, fmt.Errorf("Error converting FAT table to bytes: %v", err)
}
fs.file.WriteAt(b, int64(fs.bootSector.biosParameterBlock.dos331BPB.dos20BPB.reservedSectors)*int64(SectorSize512)+fs.start)
fsisBytes, err := fs.fsis.toBytes()
if err != nil {
return nil, fmt.Errorf("Could not create a valid byte stream for a FAT32 Filesystem Information Sector: %v", err)
}
fsisPrimary := fs.bootSector.biosParameterBlock.fsInformationSector
fsisBackup := fs.bootSector.biosParameterBlock.backupFSInfoSector
fs.file.WriteAt(fsisBytes, int64(fsisPrimary)*int64(SectorSize512)+fs.start)
fs.file.WriteAt(fsisBytes, int64(fsisBackup)*int64(SectorSize512)+fs.start)
// return all of the clusters
return append(clusters, allocated...), nil
}

View File

@@ -0,0 +1,182 @@
package fat32
import (
"fmt"
"io"
)
// File represents a single file in a FAT32 filesystem
type File struct {
*directoryEntry
isReadWrite bool
isAppend bool
offset int64
parent *Directory
filesystem *FileSystem
}
// Read reads up to len(b) bytes from the File.
// It returns the number of bytes read and any error encountered.
// At end of file, Read returns 0, io.EOF
// reads from the last known offset in the file from last read or write
// use Seek() to set at a particular point
func (fl *File) Read(b []byte) (int, error) {
// we have the DirectoryEntry, so we can get the starting cluster location
// we then get a list of the clusters, and read the data from all of those clusters
// write the content for the file
totalRead := 0
fs := fl.filesystem
bytesPerCluster := fs.bytesPerCluster
start := int(fs.dataStart)
size := int(fl.fileSize) - int(fl.offset)
maxRead := size
file := fs.file
clusters, err := fs.getClusterList(fl.clusterLocation)
if err != nil {
return totalRead, fmt.Errorf("Unable to get list of clusters for file: %v", err)
}
var lastCluster uint32
clusterIndex := 0
// if there is nothing left to read, just return EOF
if size <= 0 {
return totalRead, io.EOF
}
// we stop when we hit the lesser of
// 1- len(b)
// 2- file end
if len(b) < maxRead {
maxRead = len(b)
}
// figure out which cluster we start with
if fl.offset > 0 {
clusterIndex = int(fl.offset / int64(bytesPerCluster))
lastCluster = clusters[clusterIndex]
// read any partials, if needed
remainder := fl.offset % int64(bytesPerCluster)
if remainder != 0 {
offset := int64(lastCluster)*int64(bytesPerCluster) + remainder
toRead := int64(bytesPerCluster) - remainder
if toRead > int64(len(b)) {
toRead = int64(len(b))
}
file.ReadAt(b[0:toRead], int64(offset)+fs.start)
totalRead += int(toRead)
clusterIndex++
}
}
for i := clusterIndex; i < len(clusters); i++ {
left := maxRead - totalRead
toRead := bytesPerCluster
if toRead > left {
toRead = left
}
offset := uint32(start) + (clusters[i]-2)*uint32(bytesPerCluster)
file.ReadAt(b[totalRead:totalRead+toRead], int64(offset)+fs.start)
totalRead += toRead
if totalRead >= maxRead {
break
}
}
fl.offset = fl.offset + int64(totalRead)
var retErr error
if fl.offset >= int64(size) {
retErr = io.EOF
}
return totalRead, retErr
}
// Write writes len(b) bytes to the File.
// It returns the number of bytes written and an error, if any.
// returns a non-nil error when n != len(b)
// writes to the last known offset in the file from last read or write
// use Seek() to set at a particular point
func (fl *File) Write(p []byte) (int, error) {
totalWritten := 0
fs := fl.filesystem
// if the file was not opened RDWR, nothing we can do
if !fl.isReadWrite {
return totalWritten, fmt.Errorf("Cannot write to file opened read-only")
}
// what is the new file size?
writeSize := len(p)
oldSize := int64(fl.fileSize)
newSize := fl.offset + int64(writeSize)
if newSize < oldSize {
newSize = oldSize
}
// 1- ensure we have space and clusters
clusters, err := fs.allocateSpace(uint64(newSize), fl.clusterLocation)
if err != nil {
return 0x00, fmt.Errorf("Unable to allocate clusters for file: %v", err)
}
// update the directory entry size for the file
if oldSize != newSize {
fl.fileSize = uint32(newSize)
}
// write the content for the file
bytesPerCluster := fl.filesystem.bytesPerCluster
file := fl.filesystem.file
start := int(fl.filesystem.dataStart)
clusterIndex := 0
// figure out which cluster we start with
if fl.offset > 0 {
clusterIndex = int(fl.offset) / bytesPerCluster
lastCluster := clusters[clusterIndex]
// write any partials, if needed
remainder := fl.offset % int64(bytesPerCluster)
if remainder != 0 {
offset := int64(start) + int64(lastCluster-2)*int64(bytesPerCluster) + remainder
toWrite := int64(bytesPerCluster) - remainder
// max we can write
if toWrite > int64(len(p)) {
toWrite = int64(len(p))
}
file.WriteAt(p[0:toWrite], int64(offset)+fs.start)
totalWritten += int(toWrite)
clusterIndex++
}
}
for i := clusterIndex; i < len(clusters); i++ {
left := len(p) - totalWritten
toWrite := bytesPerCluster
if toWrite > left {
toWrite = left
}
offset := uint32(start) + (clusters[i]-2)*uint32(bytesPerCluster)
file.WriteAt(p[totalWritten:totalWritten+toWrite], int64(offset)+fs.start)
totalWritten += toWrite
}
// update the parent that we have changed the file size
err = fs.writeDirectoryEntries(fl.parent)
if err != nil {
return 0, fmt.Errorf("Error writing directory entries to disk: %v", err)
}
return totalWritten, nil
}
// Seek set the offset to a particular point in the file
func (fl *File) Seek(offset int64, whence int) (int64, error) {
newOffset := int64(0)
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekEnd:
newOffset = int64(fl.fileSize) + offset
case io.SeekCurrent:
newOffset = fl.offset + offset
}
if newOffset < 0 {
return fl.offset, fmt.Errorf("Cannot set offset %d before start of file", offset)
}
fl.offset = newOffset
return fl.offset, nil
}

View File

@@ -0,0 +1,56 @@
package fat32
import (
"os"
"time"
)
// FileInfo represents the information for an individual file
// it fulfills os.FileInfo interface
type FileInfo struct {
modTime time.Time
mode os.FileMode
name string
shortName string
size int64
isDir bool
}
// IsDir abbreviation for Mode().IsDir()
func (fi FileInfo) IsDir() bool {
return fi.isDir
}
// ModTime modification time
func (fi FileInfo) ModTime() time.Time {
return fi.modTime
}
// Mode returns file mode
func (fi FileInfo) Mode() os.FileMode {
return fi.mode
}
// Name base name of the file
// will return the long name of the file. If none exists, returns the shortname and extension
func (fi FileInfo) Name() string {
if fi.name != "" {
return fi.name
}
return fi.shortName
}
// ShortName just the 8.3 short name of the file
func (fi FileInfo) ShortName() string {
return fi.shortName
}
// Size length in bytes for regular files
func (fi FileInfo) Size() int64 {
return fi.size
}
// Sys underlying data source - not supported yet and so will return nil
func (fi FileInfo) Sys() interface{} {
return nil
}

View File

@@ -0,0 +1,81 @@
package fat32
import (
"encoding/binary"
"fmt"
)
// FSInfoSectorSignature is the signature for every FAT32 FSInformationSector
type fsInfoSectorSignature uint32
const (
// FSInfoSectorSignatureStart is the 4 bytes that signify the beginning of a FAT32 FS Information Sector
fsInfoSectorSignatureStart fsInfoSectorSignature = 0x52526141
// FSInfoSectorSignatureMid is the 4 bytes that signify the middle bytes 484-487 of a FAT32 FS Information Sector
fsInfoSectorSignatureMid fsInfoSectorSignature = 0x72724161
// FSInfoSectorSignatureEnd is the 4 bytes that signify the end of a FAT32 FS Information Sector
fsInfoSectorSignatureEnd fsInfoSectorSignature = 0x000055AA
)
const (
// UnknownFreeDataClusterCount is the fixed flag for unknown number of free data clusters
unknownFreeDataClusterCount uint32 = 0xffffffff
// UnknownlastAllocatedCluster is the fixed flag for unknown most recently allocated cluster
unknownlastAllocatedCluster uint32 = 0xffffffff
)
// FSInformationSector is a structure holding the FAT32 filesystem information sector
type FSInformationSector struct {
freeDataClustersCount uint32
lastAllocatedCluster uint32
}
// FSInformationSectorFromBytes create an FSInformationSector struct from bytes
func fsInformationSectorFromBytes(b []byte) (*FSInformationSector, error) {
bLen := len(b)
if bLen != int(SectorSize512) {
return nil, fmt.Errorf("Cannot read FAT32 FS Information Sector from %d bytes instead of expected %d", bLen, SectorSize512)
}
fsis := FSInformationSector{}
// validate the signatures
signatureStart := binary.BigEndian.Uint32(b[0:4])
signatureMid := binary.BigEndian.Uint32(b[484:488])
signatureEnd := binary.BigEndian.Uint32(b[508:512])
if signatureStart != uint32(fsInfoSectorSignatureStart) {
return nil, fmt.Errorf("Invalid signature at beginning of FAT 32 Filesystem Information Sector: %x", signatureStart)
}
if signatureMid != uint32(fsInfoSectorSignatureMid) {
return nil, fmt.Errorf("Invalid signature at middle of FAT 32 Filesystem Information Sector: %x", signatureMid)
}
if signatureEnd != uint32(fsInfoSectorSignatureEnd) {
return nil, fmt.Errorf("Invalid signature at end of FAT 32 Filesystem Information Sector: %x", signatureEnd)
}
// validated, so just read the data
fsis.freeDataClustersCount = binary.LittleEndian.Uint32(b[488:492])
fsis.lastAllocatedCluster = binary.LittleEndian.Uint32(b[492:496])
return &fsis, nil
}
// ToBytes returns a FAT32 Filesystem Information Sector ready to be written to disk
func (fsis *FSInformationSector) toBytes() ([]byte, error) {
b := make([]byte, SectorSize512, SectorSize512)
// signatures
binary.BigEndian.PutUint32(b[0:4], uint32(fsInfoSectorSignatureStart))
binary.BigEndian.PutUint32(b[484:488], uint32(fsInfoSectorSignatureMid))
binary.BigEndian.PutUint32(b[508:512], uint32(fsInfoSectorSignatureEnd))
// reserved 0x00
// these are set to 0 by default, so not much to do
// actual data
binary.LittleEndian.PutUint32(b[488:492], fsis.freeDataClustersCount)
binary.LittleEndian.PutUint32(b[492:496], fsis.lastAllocatedCluster)
return b, nil
}

View File

@@ -0,0 +1,101 @@
package fat32
import (
"bytes"
"encoding/binary"
"fmt"
)
// MsDosBootSectorSignature is the required last 2 bytes of the MS-DOS boot sector
const msDosBootSectorSignature uint16 = 0x55aa
// MsDosBootSector is the structure representing an msdos boot structure
type msDosBootSector struct {
jumpInstruction [3]byte // JumpInstruction is the instruction set to jump to for booting
oemName string // OEMName is the 8-byte OEM Name
biosParameterBlock *dos71EBPB // BIOSParameterBlock is the FAT32 Extended BIOS Parameter Block
bootCode []byte // BootCode represents the actual boot code
}
func (m *msDosBootSector) equal(a *msDosBootSector) bool {
if (m == nil && a != nil) || (a == nil && m != nil) {
return false
}
if m == nil && a == nil {
return true
}
return m.biosParameterBlock.equal(a.biosParameterBlock) &&
m.oemName == a.oemName &&
m.jumpInstruction == a.jumpInstruction &&
bytes.Compare(m.bootCode, a.bootCode) == 0
}
// MsDosBootSectorFromBytes create an MsDosBootSector from a byte slice
func msDosBootSectorFromBytes(b []byte) (*msDosBootSector, error) {
if len(b) != int(SectorSize512) {
return nil, fmt.Errorf("Cannot parse MS-DOS Boot Sector from %d bytes, must be exactly %d", len(b), SectorSize512)
}
bs := msDosBootSector{}
// extract the jump instruction
copy(bs.jumpInstruction[:], b[0:3])
// extract the OEM name
bs.oemName = string(b[3:11])
// extract the EBPB and its size
bpb, bpbSize, err := dos71EBPBFromBytes(b[11:90])
if err != nil {
return nil, fmt.Errorf("Could not read FAT32 BIOS Parameter Block from boot sector: %v", err)
}
bs.biosParameterBlock = bpb
// we have the size of the EBPB, we can figure out the size of the boot code
bootSectorStart := 11 + bpbSize
bootSectorEnd := SectorSize512 - 2
bs.bootCode = b[bootSectorStart:bootSectorEnd]
// validate boot sector signature
if bsSignature := binary.BigEndian.Uint16(b[bootSectorEnd:]); bsSignature != msDosBootSectorSignature {
return nil, fmt.Errorf("Invalid signature in last 2 bytes of boot sector: %v", bsSignature)
}
return &bs, nil
}
// ToBytes output a byte slice representing the boot sector
func (m *msDosBootSector) toBytes() ([]byte, error) {
// exactly one sector
b := make([]byte, SectorSize512)
// copy the 3-byte jump instruction
copy(b[0:3], m.jumpInstruction[:])
// make sure OEMName is <= 8 bytes
name := m.oemName
if len(name) > 8 {
return nil, fmt.Errorf("Cannot use OEM Name > 8 bytes long: %s", m.oemName)
}
nameR := []rune(name)
if len(nameR) != len(name) {
return nil, fmt.Errorf("Invalid OEM Name: non-ascii characters")
}
oemName := fmt.Sprintf("%-8s", m.oemName)
copy(b[3:11], []byte(oemName))
// bytes for the EBPB
bpbBytes, err := m.biosParameterBlock.toBytes()
if err != nil {
return nil, fmt.Errorf("Error getting FAT32 EBPB: %v", err)
}
copy(b[11:], bpbBytes)
bpbLen := len(bpbBytes)
// bytes for the boot sector
if len(m.bootCode) > int(SectorSize512)-2-(11+bpbLen) {
return nil, fmt.Errorf("boot code too long at %d bytes", len(m.bootCode))
}
copy(b[11+bpbLen:SectorSize512-2], m.bootCode)
// bytes for the signature
binary.BigEndian.PutUint16(b[SectorSize512-2:], msDosBootSectorSignature)
return b, nil
}

View File

@@ -0,0 +1,85 @@
package fat32
import (
"encoding/binary"
"reflect"
)
// table a FAT32 table
type table struct {
fatID uint32
eocMarker uint32
clusters map[uint32]uint32
rootDirCluster uint32
size uint32
maxCluster uint32
}
func (t *table) equal(a *table) bool {
if (t == nil && a != nil) || (t != nil && a == nil) {
return false
}
if t == nil && a == nil {
return true
}
return t.fatID == a.fatID &&
t.eocMarker == a.eocMarker &&
t.rootDirCluster == a.rootDirCluster &&
t.size == a.size &&
t.maxCluster == a.maxCluster &&
reflect.DeepEqual(t.clusters, a.clusters)
}
/*
when reading from disk, remember that *any* of the following is a valid eocMarker:
0x?ffffff8 - 0x?fffffff
*/
func tableFromBytes(b []byte) (*table, error) {
t := table{
fatID: binary.LittleEndian.Uint32(b[0:4]),
eocMarker: binary.LittleEndian.Uint32(b[4:8]),
size: uint32(len(b)),
clusters: map[uint32]uint32{},
maxCluster: uint32(len(b) / 4),
rootDirCluster: 2, // always 2 for FAT32
}
// just need to map the clusters in
for i := uint32(2); i < t.maxCluster; i++ {
bStart := i * 4
bEnd := bStart + 4
val := binary.LittleEndian.Uint32(b[bStart:bEnd])
// 0 indicates an empty cluster, so we can ignore
if val != 0 {
t.clusters[i] = val
}
}
return &t, nil
}
// bytes returns a FAT32 table as bytes ready to be written to disk
func (t *table) bytes() ([]byte, error) {
b := make([]byte, t.size, t.size)
// FAT ID and fixed values
binary.LittleEndian.PutUint32(b[0:4], t.fatID)
// End-of-Cluster marker
binary.LittleEndian.PutUint32(b[4:8], t.eocMarker)
// now just clusters
numClusters := t.maxCluster
for i := uint32(2); i < numClusters; i++ {
bStart := i * 4
bEnd := bStart + 4
val := uint32(0)
if cluster, ok := t.clusters[i]; ok {
val = cluster
}
binary.LittleEndian.PutUint32(b[bStart:bEnd], val)
}
return b, nil
}
func (t *table) isEoc(cluster uint32) bool {
return cluster&0xFFFFFF8 == 0xFFFFFF8
}

View File

@@ -0,0 +1,44 @@
package fat32
import (
"errors"
"strings"
)
const (
// KB represents one KB
KB int64 = 1024
// MB represents one MB
MB int64 = 1024 * KB
// GB represents one GB
GB int64 = 1024 * MB
// TB represents one TB
TB int64 = 1024 * GB
// Fat32MaxSize is maximum size of a FAT32 filesystem in bytes
Fat32MaxSize int64 = 2198754099200
)
func universalizePath(p string) (string, error) {
// globalize the separator
ps := strings.Replace(p, "\\", "/", 0)
if ps[0] != '/' {
return "", errors.New("Must use absolute paths")
}
return ps, nil
}
func splitPath(p string) ([]string, error) {
ps, err := universalizePath(p)
if err != nil {
return nil, err
}
// we need to split such that each one ends in "/", except possibly the last one
parts := strings.Split(ps, "/")
// eliminate empty parts
ret := make([]string, 0)
for _, sub := range parts {
if sub != "" {
ret = append(ret, sub)
}
}
return ret, nil
}

View File

@@ -0,0 +1,10 @@
package filesystem
import "io"
// File a reference to a single file on disk
type File interface {
io.ReadWriteSeeker
//io.ReaderAt
//io.WriterAt
}

View File

@@ -0,0 +1,32 @@
// Package filesystem provides interfaces and constants required for filesystem implementations.
// All interesting implementations are in subpackages, e.g. github.com/diskfs/go-diskfs/filesystem/fat32
package filesystem
import (
"os"
)
// FileSystem is a reference to a single filesystem on a disk
type FileSystem interface {
// Type return the type of filesystem
Type() Type
// Mkdir make a directory
Mkdir(string) error
// ReadDir read the contents of a directory
ReadDir(string) ([]os.FileInfo, error)
// OpenFile open a handle to read or write to a file
OpenFile(string, int) (File, error)
// Label get the label for the filesystem, or "" if none. Be careful to trim it, as it may contain
// leading or following whitespace. The label is passed as-is and not cleaned up at all.
Label() string
}
// Type represents the type of disk this is
type Type int
const (
// TypeFat32 is a FAT32 compatible filesystem
TypeFat32 Type = iota
// TypeISO9660 is an iso filesystem
TypeISO9660
)

View File

@@ -0,0 +1,53 @@
package iso9660
// Directory represents a single directory in a FAT32 filesystem
type Directory struct {
directoryEntry
entries []*directoryEntry
}
// dirEntriesFromBytes loads the directory entries from the raw bytes
func (d *Directory) entriesFromBytes(b []byte, f *FileSystem) error {
entries, err := parseDirEntries(b, f)
if err != nil {
return err
}
d.entries = entries
return nil
}
// entriesToBytes convert our entries to raw bytes
func (d *Directory) entriesToBytes(ceBlockLocations []uint32) ([][]byte, error) {
b := make([]byte, 0)
ceBlocks := make([][]byte, 0)
blocksize := int(d.filesystem.blocksize)
for _, de := range d.entries {
b2, err := de.toBytes(false, ceBlockLocations)
if err != nil {
return nil, err
}
recBytes := b2[0]
// a directory entry cannot cross a block boundary
// so if adding this puts us past it, then pad it
// but only if we are not already exactly at the boundary
newlength := len(b) + len(recBytes)
left := blocksize - len(b)%blocksize
if left != 0 && newlength/blocksize > len(b)/blocksize {
b = append(b, make([]byte, left)...)
}
b = append(b, recBytes...)
if len(b2) > 1 {
ceBlocks = append(ceBlocks, b2[1:]...)
}
}
// in the end, must pad to exact blocks
left := blocksize - len(b)%blocksize
if left > 0 {
b = append(b, make([]byte, left)...)
}
ret := [][]byte{b}
if len(ceBlocks) > 0 {
ret = append(ret, ceBlocks...)
}
return ret, nil
}

View File

@@ -0,0 +1,596 @@
package iso9660
import (
"encoding/binary"
"fmt"
"os"
"path"
"regexp"
"strings"
"time"
)
const (
directoryEntryMinSize uint8 = 34 // min size is all the required fields (33 bytes) plus 1 byte for the filename
directoryEntryMaxSize int = 254 // max size allowed
)
// directoryEntry is a single directory entry
// also fulfills os.FileInfo
// Name() string // base name of the file
// Size() int64 // length in bytes for regular files; system-dependent for others
// Mode() FileMode // file mode bits
// ModTime() time.Time // modification time
// IsDir() bool // abbreviation for Mode().IsDir()
// Sys() interface{} // underlying data source (can return nil)
type directoryEntry struct {
extAttrSize uint8
location uint32
size uint32
creation time.Time
isHidden bool
isSubdirectory bool
isAssociated bool
hasExtendedAttrs bool
hasOwnerGroupPermissions bool
hasMoreEntries bool
isSelf bool
isParent bool
volumeSequence uint16
filesystem *FileSystem
filename string
extensions []directoryEntrySystemUseExtension
}
func (de *directoryEntry) countNamelenBytes() int {
// size includes the ";1" at the end as two bytes if a filename
var namelen int
switch {
case de.isSelf:
namelen = 1
case de.isParent:
namelen = 1
default:
namelen = len(de.filename)
}
return namelen
}
func (de *directoryEntry) countBaseBytes() int {
namelen := de.countNamelenBytes()
// if even, we add one byte of padding to always end on an even byte
if namelen%2 == 0 {
namelen++
}
return 33 + namelen
}
func (de *directoryEntry) toBytes(skipExt bool, ceBlocks []uint32) ([][]byte, error) {
baseRecordSize := de.countBaseBytes()
namelen := de.countNamelenBytes()
b := make([]byte, baseRecordSize)
b[1] = de.extAttrSize
binary.LittleEndian.PutUint32(b[2:6], de.location)
binary.BigEndian.PutUint32(b[6:10], de.location)
binary.LittleEndian.PutUint32(b[10:14], de.size)
binary.BigEndian.PutUint32(b[14:18], de.size)
copy(b[18:25], timeToBytes(de.creation))
// set the flags
var flagByte byte = 0x00
if de.isHidden {
flagByte = flagByte | 0x01
}
if de.isSubdirectory {
flagByte = flagByte | 0x02
}
if de.isAssociated {
flagByte = flagByte | 0x04
}
if de.hasExtendedAttrs {
flagByte = flagByte | 0x08
}
if de.hasOwnerGroupPermissions {
flagByte = flagByte | 0x10
}
if de.hasMoreEntries {
flagByte = flagByte | 0x80
}
b[25] = flagByte
// volume sequence number - uint16 in both endian
binary.LittleEndian.PutUint16(b[28:30], de.volumeSequence)
binary.BigEndian.PutUint16(b[30:32], de.volumeSequence)
b[32] = uint8(namelen)
// save the filename
var filenameBytes []byte
var err error
switch {
case de.isSelf:
filenameBytes = []byte{0x00}
case de.isParent:
filenameBytes = []byte{0x01}
default:
// first validate the filename
err = validateFilename(de.filename, de.isSubdirectory)
if err != nil {
nametype := "filename"
if de.isSubdirectory {
nametype = "directory"
}
return nil, fmt.Errorf("Invalid %s %s: %v", nametype, de.filename, err)
}
filenameBytes, err = stringToASCIIBytes(de.filename)
if err != nil {
return nil, fmt.Errorf("Error converting filename to bytes: %v", err)
}
}
// copy it over
copy(b[33:], filenameBytes)
// output directory entry extensions - but only if we did not skip it
var extBytes [][]byte
if !skipExt {
extBytes, err = dirEntryExtensionsToBytes(de.extensions, directoryEntryMaxSize-len(b), de.filesystem.blocksize, ceBlocks)
if err != nil {
return nil, fmt.Errorf("Unable to convert directory entry SUSP extensions to bytes: %v", err)
}
b = append(b, extBytes[0]...)
}
// always end on an even
if len(b)%2 != 0 {
b = append(b, 0x00)
}
// update the record size
b[0] = uint8(len(b))
recWithCE := [][]byte{b}
if len(extBytes) > 1 {
recWithCE = append(recWithCE, extBytes[1:]...)
}
return recWithCE, nil
}
// dirEntryExtensionsToBytes converts slice of SUSP extensions to slice ot []byte: first is dir entry, rest are continuation areas
// returns:
// slice of []byte
func dirEntryExtensionsToBytes(extensions []directoryEntrySystemUseExtension, maxSize int, blocksize int64, ceBlocks []uint32) ([][]byte, error) {
// output directory entries
var (
err error
b []byte
continuedBytes [][]byte
)
ret := make([][]byte, 0)
for i, e := range extensions {
b2 := e.Bytes()
// do we overrun the size
if len(b)+len(b2) > maxSize {
// we need an extension, so pop the first one off the slice, use it as a pointer, and pass the rest
nextCeBlock := ceBlocks[0]
continuedBytes, err = dirEntryExtensionsToBytes(extensions[i:], int(blocksize), blocksize, ceBlocks[1:])
if err != nil {
return nil, err
}
// use a continuation entry until the end of the
ce := &directoryEntrySystemUseContinuation{
offset: 0,
location: nextCeBlock,
continuationLength: uint32(len(continuedBytes[0])),
}
b = append(b, ce.Bytes()...)
break
} else {
b = append(b, b2...)
}
}
ret = append(ret, b)
if len(continuedBytes) > 0 {
ret = append(ret, continuedBytes...)
}
return ret, nil
}
func dirEntryFromBytes(b []byte, ext []suspExtension) (*directoryEntry, error) {
// has to be at least 34 bytes
if len(b) < int(directoryEntryMinSize) {
return nil, fmt.Errorf("Cannot read directoryEntry from %d bytes, fewer than minimum of %d bytes", len(b), directoryEntryMinSize)
}
recordSize := b[0]
// what if it is not the right size?
if len(b) != int(recordSize) {
return nil, fmt.Errorf("directoryEntry should be size %d bytes according to first byte, but have %d bytes", recordSize, len(b))
}
extAttrSize := b[1]
location := binary.LittleEndian.Uint32(b[2:6])
size := binary.LittleEndian.Uint32(b[10:14])
creation := bytesToTime(b[18:25])
// get the flags
flagByte := b[25]
isHidden := flagByte&0x01 == 0x01
isSubdirectory := flagByte&0x02 == 0x02
isAssociated := flagByte&0x04 == 0x04
hasExtendedAttrs := flagByte&0x08 == 0x08
hasOwnerGroupPermissions := flagByte&0x10 == 0x10
hasMoreEntries := flagByte&0x80 == 0x80
volumeSequence := binary.LittleEndian.Uint16(b[28:30])
// size includes the ";1" at the end as two bytes and any padding
namelen := b[32]
nameLenWithPadding := namelen
// get the filename itself
nameBytes := b[33 : 33+namelen]
if namelen > 1 && namelen%2 == 0 {
nameLenWithPadding++
}
var filename string
var isSelf, isParent bool
switch {
case namelen == 1 && nameBytes[0] == 0x00:
filename = ""
isSelf = true
case namelen == 1 && nameBytes[0] == 0x01:
filename = ""
isParent = true
default:
filename = string(nameBytes)
}
// and now for extensions in the system use area
suspFields := make([]directoryEntrySystemUseExtension, 0)
if len(b) > 33+int(nameLenWithPadding) {
var err error
suspFields, err = parseDirectoryEntryExtensions(b[33+nameLenWithPadding:], ext)
if err != nil {
return nil, fmt.Errorf("Unable to parse directory entry extensions: %v", err)
}
}
return &directoryEntry{
extAttrSize: extAttrSize,
location: location,
size: size,
creation: creation,
isHidden: isHidden,
isSubdirectory: isSubdirectory,
isAssociated: isAssociated,
hasExtendedAttrs: hasExtendedAttrs,
hasOwnerGroupPermissions: hasOwnerGroupPermissions,
hasMoreEntries: hasMoreEntries,
isSelf: isSelf,
isParent: isParent,
volumeSequence: volumeSequence,
filename: filename,
extensions: suspFields,
}, nil
}
// parseDirEntry takes the bytes of a single directory entry
// and parses it, including pulling in continuation entry bytes
func parseDirEntry(b []byte, f *FileSystem) (*directoryEntry, error) {
// empty entry means nothing more to read - this might not actually be accurate, but work with it for now
entryLen := int(b[0])
if entryLen == 0 {
return nil, nil
}
// get the bytes
de, err := dirEntryFromBytes(b[:entryLen], f.suspExtensions)
if err != nil {
return nil, fmt.Errorf("Invalid directory entry : %v", err)
}
de.filesystem = f
if f.suspEnabled && len(de.extensions) > 0 {
// if the last entry is a continuation SUSP entry and SUSP is enabled, we need to follow and parse them
// because the extensions can be a linked list directory -> CE area -> CE area ...
// we need to loop until it is no more
for {
if ce, ok := de.extensions[len(de.extensions)-1].(directoryEntrySystemUseContinuation); ok {
location := int64(ce.Location())
size := int(ce.ContinuationLength())
offset := int64(ce.Offset())
// read it from disk
continuationBytes := make([]byte, size)
read, err := f.file.ReadAt(continuationBytes, location*f.blocksize+offset)
if err != nil {
return nil, fmt.Errorf("Error reading continuation entry data at %d: %v", location, err)
}
if read != size {
return nil, fmt.Errorf("Read continuation entry data %d bytes instead of expected %d", read, size)
}
// parse and append
entries, err := parseDirectoryEntryExtensions(continuationBytes, f.suspExtensions)
if err != nil {
return nil, fmt.Errorf("Error parsing continuation entry data at %d: %v", location, err)
}
// remove the CE one from the extensions array and append our new ones
de.extensions = append(de.extensions[:len(de.extensions)-1], entries...)
} else {
break
}
}
}
return de, nil
}
// parseDirEntries takes all of the bytes in a special file (i.e. a directory)
// and gets all of the DirectoryEntry for that directory
// this is, essentially, the equivalent of `ls -l` or if you prefer `dir`
func parseDirEntries(b []byte, f *FileSystem) ([]*directoryEntry, error) {
dirEntries := make([]*directoryEntry, 0, 20)
count := 0
for i := 0; i < len(b); count++ {
// empty entry means nothing more to read - this might not actually be accurate, but work with it for now
entryLen := int(b[i+0])
if entryLen == 0 {
i += (int(f.blocksize) - i%int(f.blocksize))
continue
}
de, err := parseDirEntry(b[i+0:i+entryLen], f)
if err != nil {
return nil, fmt.Errorf("Invalid directory entry %d at byte %d: %v", count, i, err)
}
// some extensions to directory relocation, so check if we should ignore it
if f.suspEnabled {
for _, e := range f.suspExtensions {
if e.Relocated(de) {
de = nil
break
}
}
}
if de != nil {
dirEntries = append(dirEntries, de)
}
i += entryLen
}
return dirEntries, nil
}
// get the location of a particular path relative to this directory
func (de *directoryEntry) getLocation(p string) (uint32, uint32, error) {
// break path down into parts and levels
parts, err := splitPath(p)
if err != nil {
return 0, 0, fmt.Errorf("Could not parse path: %v", err)
}
var location, size uint32
if len(parts) == 0 {
location = de.location
size = de.size
} else {
current := parts[0]
// read the directory bytes
dirb := make([]byte, de.size, de.size)
n, err := de.filesystem.file.ReadAt(dirb, int64(de.location)*de.filesystem.blocksize)
if err != nil {
return 0, 0, fmt.Errorf("Could not read directory: %v", err)
}
if n != len(dirb) {
return 0, 0, fmt.Errorf("Read %d bytes instead of expected %d", n, len(dirb))
}
// parse those entries
dirEntries, err := parseDirEntries(dirb, de.filesystem)
if err != nil {
return 0, 0, fmt.Errorf("Could not parse directory: %v", err)
}
// find the entry among the children that has the desired name
for _, entry := range dirEntries {
// do we have an alternate name?
// only care if not self or parent entry
checkFilename := entry.filename
if de.filesystem.suspEnabled && !entry.isSelf && !entry.isParent {
for _, e := range de.filesystem.suspExtensions {
filename, err2 := e.GetFilename(entry)
switch {
case err2 != nil && err2 == ErrSuspFilenameUnsupported:
continue
case err2 != nil:
return 0, 0, fmt.Errorf("Extension %s count not find a filename property: %v", e.ID(), err2)
default:
checkFilename = filename
break
}
}
}
if checkFilename == current {
if len(parts) > 1 {
// just dig down further - what if it looks like a file, but is a relocated directory?
if !entry.isSubdirectory && de.filesystem.suspEnabled && !entry.isSelf && !entry.isParent {
for _, e := range de.filesystem.suspExtensions {
location2 := e.GetDirectoryLocation(entry)
if location2 != 0 {
// need to get the directory entry for the child
dirb := make([]byte, de.filesystem.blocksize)
n, err2 := de.filesystem.file.ReadAt(dirb, int64(location2)*de.filesystem.blocksize)
if err2 != nil {
return 0, 0, fmt.Errorf("Could not read bytes of relocated directory %s from block %d: %v", checkFilename, location2, err2)
}
if n != len(dirb) {
return 0, 0, fmt.Errorf("Read %d bytes instead of expected %d for relocated directory %s from block %d: %v", n, len(dirb), checkFilename, location2, err)
}
// get the size of the actual directory entry
size2 := dirb[0]
entry, err2 = parseDirEntry(dirb[:size2], de.filesystem)
if err2 != nil {
return 0, 0, fmt.Errorf("Error converting bytes to a directory entry for relocated directory %s from block %d: %v", checkFilename, location2, err2)
}
break
}
}
}
location, size, err = entry.getLocation(path.Join(parts[1:]...))
if err != nil {
return 0, 0, fmt.Errorf("Could not get location: %v", err)
}
} else {
// this is the final one, we found it, keep it
location = entry.location
size = entry.size
}
break
}
}
}
return location, size, nil
}
// Name() string // base name of the file
func (de *directoryEntry) Name() string {
name := de.filename
if de.filesystem.suspEnabled {
for _, e := range de.filesystem.suspExtensions {
filename, err := e.GetFilename(de)
switch {
case err != nil:
continue
default:
name = filename
break
}
}
}
// check if we have an extension that overrides it
// filenames should have the ';1' stripped off, as well as the leading or trailing '.'
if !de.IsDir() {
name = strings.TrimSuffix(name, ";1")
name = strings.TrimSuffix(name, ".")
name = strings.TrimPrefix(name, ".")
}
return name
}
// Size() int64 // length in bytes for regular files; system-dependent for others
func (de *directoryEntry) Size() int64 {
return int64(de.size)
}
// Mode() FileMode // file mode bits
func (de *directoryEntry) Mode() os.FileMode {
return 0755
}
// ModTime() time.Time // modification time
func (de *directoryEntry) ModTime() time.Time {
return de.creation
}
// IsDir() bool // abbreviation for Mode().IsDir()
func (de *directoryEntry) IsDir() bool {
return de.isSubdirectory
}
// Sys() interface{} // underlying data source (can return nil)
func (de *directoryEntry) Sys() interface{} {
return nil
}
// utilities
func bytesToTime(b []byte) time.Time {
year := int(b[0])
month := time.Month(b[1])
date := int(b[2])
hour := int(b[3])
minute := int(b[4])
second := int(b[5])
offset := int(int8(b[6]))
location := time.FixedZone("iso", offset*15*60)
return time.Date(year+1900, month, date, hour, minute, second, 0, location)
}
func timeToBytes(t time.Time) []byte {
year := t.Year()
month := t.Month()
date := t.Day()
second := t.Second()
minute := t.Minute()
hour := t.Hour()
_, offset := t.Zone()
b := make([]byte, 7, 7)
b[0] = byte(year - 1900)
b[1] = byte(month)
b[2] = byte(date)
b[3] = byte(hour)
b[4] = byte(minute)
b[5] = byte(second)
b[6] = byte(int8(offset / 60 / 15))
return b
}
// convert a string to ascii bytes, but only accept valid d-characters
func validateFilename(s string, isDir bool) error {
var err error
// return nil, fmt.Errorf("Invalid d-character")
if isDir {
// directory only allowed up to 8 characters of A-Z,0-9,_
re := regexp.MustCompile("^[A-Z0-9_]{1,30}$")
if !re.MatchString(s) {
err = fmt.Errorf("Directory name must be of up to 30 characters from A-Z0-9_")
}
} else {
// filename only allowed up to 8 characters of A-Z,0-9,_, plus an optional '.' plus up to 3 characters of A-Z,0-9,_, plus must have ";1"
re := regexp.MustCompile("^[A-Z0-9_]+(.[A-Z0-9_]*)?;1$")
switch {
case !re.MatchString(s):
err = fmt.Errorf("File name must be of characters from A-Z0-9_, followed by an optional '.' and an extension of the same characters")
case len(strings.Replace(s, ".", "", -1)) > 30:
err = fmt.Errorf("File name must be at most 30 characters, not including the separator '.'")
}
}
return err
}
// convert a string to a byte array, if all characters are valid ascii
func stringToASCIIBytes(s string) ([]byte, error) {
length := len(s)
b := make([]byte, length, length)
// convert the name into 11 bytes
r := []rune(s)
// take the first 8 characters
for i := 0; i < length; i++ {
val := int(r[i])
// we only can handle values less than max byte = 255
if val > 255 {
return nil, fmt.Errorf("Non-ASCII character in name: %s", s)
}
b[i] = byte(val)
}
return b, nil
}
// converts a string into upper-case with only valid characters
func uCaseValid(name string) string {
// easiest way to do this is to go through the name one char at a time
r := []rune(name)
r2 := make([]rune, 0, len(r))
for _, val := range r {
switch {
case (0x30 <= val && val <= 0x39) || (0x41 <= val && val <= 0x5a) || (val == 0x7e):
// naturally valid characters
r2 = append(r2, val)
case (0x61 <= val && val <= 0x7a):
// lower-case characters should be upper-cased
r2 = append(r2, val-32)
case val == ' ' || val == '.':
// remove spaces and periods
continue
default:
// replace the rest with _
r2 = append(r2, '_')
}
}
return string(r2)
}

View File

@@ -0,0 +1,558 @@
package iso9660
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
)
const (
suspExtensionContinuationArea = "CE"
suspExtensionPaddingField = "PD"
suspExtensionSharingProtocolIndicator = "SP"
suspExtensionSharingProtocolTerminator = "ST"
suspExtensionExtensionsReference = "ER"
suspExtensionExtensionsSelector = "ES"
suspExtensionCheckBytes = 0xbeef
)
var (
// ErrSuspNoHandler error to show gracefully that we do not have a handler for this signature. Opposed to processing error
ErrSuspNoHandler = errors.New("NoHandler")
// ErrSuspFilenameUnsupported error to show that this extension does not support searching by path
ErrSuspFilenameUnsupported = errors.New("FilenameUnsupported")
// ErrSuspRelocatedDirectoryUnsupported error to indicate that this extension does not support relocated directories
ErrSuspRelocatedDirectoryUnsupported = errors.New("RelocatedDirectoryUnsupported")
)
// suspExtension master for an extension that is registered with an "ER" entry
type suspExtension interface {
ID() string
Process(string, []byte) (directoryEntrySystemUseExtension, error)
GetFilename(*directoryEntry) (string, error)
Relocated(*directoryEntry) bool
UsePathtable() bool
GetDirectoryLocation(*directoryEntry) uint32
Descriptor() string
Source() string
Version() uint8
GetFileExtensions(string, bool, bool) ([]directoryEntrySystemUseExtension, error)
GetFinalizeExtensions(*finalizeFileInfo) ([]directoryEntrySystemUseExtension, error)
Relocatable() bool
Relocate(map[string]*finalizeFileInfo) ([]*finalizeFileInfo, map[string]*finalizeFileInfo, error)
}
type directoryEntrySystemUseExtension interface {
Equal(directoryEntrySystemUseExtension) bool
Signature() string
Length() int
Version() uint8
Data() []byte
Bytes() []byte
Continuable() bool // if this one is continuable to the next one of the same signature
Merge([]directoryEntrySystemUseExtension) directoryEntrySystemUseExtension // merge
}
// directoryEntrySystemUseExtensionRaw raw holder, common to all
type directoryEntrySystemUseExtensionRaw struct {
signature string
length uint8
version uint8
data []byte
}
func (d directoryEntrySystemUseExtensionRaw) Equal(o directoryEntrySystemUseExtension) bool {
t, ok := o.(directoryEntrySystemUseExtensionRaw)
return ok && t.signature == d.signature && t.length == d.length && t.version == d.version && bytes.Compare(d.data, t.data) == 0
}
func (d directoryEntrySystemUseExtensionRaw) Signature() string {
return d.signature
}
func (d directoryEntrySystemUseExtensionRaw) Length() int {
return int(d.length)
}
func (d directoryEntrySystemUseExtensionRaw) Version() uint8 {
return d.version
}
func (d directoryEntrySystemUseExtensionRaw) Data() []byte {
return d.data
}
func (d directoryEntrySystemUseExtensionRaw) Bytes() []byte {
ret := make([]byte, 4)
copy(ret[0:2], []byte(d.Signature()))
ret[2] = d.length
ret[3] = d.Version()
ret = append(ret, d.Data()...)
return ret
}
func (d directoryEntrySystemUseExtensionRaw) Continuable() bool {
return false
}
func (d directoryEntrySystemUseExtensionRaw) Merge([]directoryEntrySystemUseExtension) directoryEntrySystemUseExtension {
return nil
}
func parseSystemUseExtensionRaw(b []byte) (directoryEntrySystemUseExtension, error) {
size := len(b)
signature := string(b[:2])
version := b[3]
data := make([]byte, 0)
if size > 4 {
data = b[4:]
}
return directoryEntrySystemUseExtensionRaw{
signature: signature,
length: uint8(size),
version: version,
data: data,
}, nil
}
// directoryEntrySystemUseExtensionSharingProtocolIndicator single appearance in root entry
type directoryEntrySystemUseExtensionSharingProtocolIndicator struct {
skipBytes uint8
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) Equal(o directoryEntrySystemUseExtension) bool {
t, ok := o.(directoryEntrySystemUseExtensionSharingProtocolIndicator)
return ok && t == d
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) Signature() string {
return suspExtensionSharingProtocolIndicator
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) Length() int {
return 7
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) Version() uint8 {
return 1
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) Data() []byte {
ret := make([]byte, 3)
binary.BigEndian.PutUint16(ret[0:2], suspExtensionCheckBytes)
ret[2] = d.skipBytes
return ret
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) Bytes() []byte {
ret := make([]byte, 4)
copy(ret[0:2], []byte(suspExtensionSharingProtocolIndicator))
ret[2] = uint8(d.Length())
ret[3] = d.Version()
ret = append(ret, d.Data()...)
return ret
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) SkipBytes() uint8 {
return d.skipBytes
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) Continuable() bool {
return false
}
func (d directoryEntrySystemUseExtensionSharingProtocolIndicator) Merge([]directoryEntrySystemUseExtension) directoryEntrySystemUseExtension {
return nil
}
func parseSystemUseExtensionSharingProtocolIndicator(b []byte) (directoryEntrySystemUseExtension, error) {
targetSize := 7
if len(b) != targetSize {
return nil, fmt.Errorf("SP extension must be %d bytes, but received %d", targetSize, len(b))
}
size := b[2]
if size != uint8(targetSize) {
return nil, fmt.Errorf("SP extension must be %d bytes, but byte 2 indicated %d", targetSize, size)
}
version := b[3]
if version != 1 {
return nil, fmt.Errorf("SP extension must be version 1, was %d", version)
}
checkBytes := binary.BigEndian.Uint16(b[4:6])
if checkBytes != suspExtensionCheckBytes {
return nil, fmt.Errorf("SP extension must had mismatched check bytes, received % x instead of % x", checkBytes, suspExtensionCheckBytes)
}
return directoryEntrySystemUseExtensionSharingProtocolIndicator{
skipBytes: b[6],
}, nil
}
// directoryEntrySystemUseExtensionPadding padding
type directoryEntrySystemUseExtensionPadding struct {
length uint8
}
func (d directoryEntrySystemUseExtensionPadding) Equal(o directoryEntrySystemUseExtension) bool {
t, ok := o.(directoryEntrySystemUseExtensionPadding)
return ok && t == d
}
func (d directoryEntrySystemUseExtensionPadding) Signature() string {
return suspExtensionPaddingField
}
func (d directoryEntrySystemUseExtensionPadding) Length() int {
return int(d.length)
}
func (d directoryEntrySystemUseExtensionPadding) Version() uint8 {
return 1
}
func (d directoryEntrySystemUseExtensionPadding) Data() []byte {
ret := make([]byte, d.Length()-4)
return ret
}
func (d directoryEntrySystemUseExtensionPadding) Bytes() []byte {
ret := make([]byte, 4)
copy(ret[0:2], []byte(suspExtensionPaddingField))
ret[2] = d.length
ret[3] = d.Version()
ret = append(ret, d.Data()...)
return ret
}
func (d directoryEntrySystemUseExtensionPadding) Continuable() bool {
return false
}
func (d directoryEntrySystemUseExtensionPadding) Merge([]directoryEntrySystemUseExtension) directoryEntrySystemUseExtension {
return nil
}
func parseSystemUseExtensionPadding(b []byte) (directoryEntrySystemUseExtension, error) {
size := b[2]
if int(size) != len(b) {
return nil, fmt.Errorf("PD extension received %d bytes, but byte 2 indicated %d", len(b), size)
}
version := b[3]
if version != 1 {
return nil, fmt.Errorf("PD extension must be version 1, was %d", version)
}
return directoryEntrySystemUseExtensionPadding{
length: size,
}, nil
}
// directoryEntrySystemUseTerminator termination
type directoryEntrySystemUseTerminator struct {
}
func (d directoryEntrySystemUseTerminator) Equal(o directoryEntrySystemUseExtension) bool {
t, ok := o.(directoryEntrySystemUseTerminator)
return ok && t == d
}
func (d directoryEntrySystemUseTerminator) Signature() string {
return suspExtensionSharingProtocolTerminator
}
func (d directoryEntrySystemUseTerminator) Length() int {
return 4
}
func (d directoryEntrySystemUseTerminator) Version() uint8 {
return 1
}
func (d directoryEntrySystemUseTerminator) Data() []byte {
return []byte{}
}
func (d directoryEntrySystemUseTerminator) Bytes() []byte {
ret := make([]byte, 4)
copy(ret[0:2], []byte(suspExtensionSharingProtocolTerminator))
ret[2] = uint8(d.Length())
ret[3] = d.Version()
return ret
}
func (d directoryEntrySystemUseTerminator) Continuable() bool {
return false
}
func (d directoryEntrySystemUseTerminator) Merge([]directoryEntrySystemUseExtension) directoryEntrySystemUseExtension {
return nil
}
func parseSystemUseExtensionTerminator(b []byte) (directoryEntrySystemUseExtension, error) {
targetSize := 4
if len(b) != targetSize {
return nil, fmt.Errorf("ST extension must be %d bytes, but received %d", targetSize, len(b))
}
size := b[2]
if size != uint8(targetSize) {
return nil, fmt.Errorf("ST extension must be %d bytes, but byte 2 indicated %d", targetSize, size)
}
version := b[3]
if version != 1 {
return nil, fmt.Errorf("ST extension must be version 1, was %d", version)
}
return directoryEntrySystemUseTerminator{}, nil
}
// directoryEntrySystemUseContinuation termination
type directoryEntrySystemUseContinuation struct {
location uint32
offset uint32
continuationLength uint32
}
func (d directoryEntrySystemUseContinuation) Equal(o directoryEntrySystemUseExtension) bool {
t, ok := o.(directoryEntrySystemUseContinuation)
return ok && t == d
}
func (d directoryEntrySystemUseContinuation) Signature() string {
return suspExtensionContinuationArea
}
func (d directoryEntrySystemUseContinuation) Length() int {
return 28
}
func (d directoryEntrySystemUseContinuation) Version() uint8 {
return 1
}
func (d directoryEntrySystemUseContinuation) Data() []byte {
b := make([]byte, 24)
binary.LittleEndian.PutUint32(b[0:4], d.location)
binary.BigEndian.PutUint32(b[4:8], d.location)
binary.LittleEndian.PutUint32(b[8:12], d.offset)
binary.BigEndian.PutUint32(b[12:16], d.offset)
binary.LittleEndian.PutUint32(b[16:20], d.continuationLength)
binary.BigEndian.PutUint32(b[20:24], d.continuationLength)
return b
}
func (d directoryEntrySystemUseContinuation) Bytes() []byte {
ret := make([]byte, 4)
copy(ret[0:2], []byte(suspExtensionContinuationArea))
ret[2] = uint8(d.Length())
ret[3] = d.Version()
ret = append(ret, d.Data()...)
return ret
}
func (d directoryEntrySystemUseContinuation) Location() uint32 {
return d.location
}
func (d directoryEntrySystemUseContinuation) Offset() uint32 {
return d.offset
}
func (d directoryEntrySystemUseContinuation) ContinuationLength() uint32 {
return d.continuationLength
}
func (d directoryEntrySystemUseContinuation) Continuable() bool {
return false
}
func (d directoryEntrySystemUseContinuation) Merge([]directoryEntrySystemUseExtension) directoryEntrySystemUseExtension {
return nil
}
func parseSystemUseExtensionContinuationArea(b []byte) (directoryEntrySystemUseExtension, error) {
targetSize := 28
if len(b) != targetSize {
return nil, fmt.Errorf("CE extension must be %d bytes, but received %d", targetSize, len(b))
}
size := b[2]
if size != uint8(targetSize) {
return nil, fmt.Errorf("CE extension must be %d bytes, but byte 2 indicated %d", targetSize, size)
}
version := b[3]
if version != 1 {
return nil, fmt.Errorf("CE extension must be version 1, was %d", version)
}
location := binary.LittleEndian.Uint32(b[4:8])
offset := binary.LittleEndian.Uint32(b[12:16])
continuationLength := binary.LittleEndian.Uint32(b[20:24])
return directoryEntrySystemUseContinuation{
location: location,
offset: offset,
continuationLength: continuationLength,
}, nil
}
// directoryEntrySystemUseExtensionSelector termination
type directoryEntrySystemUseExtensionSelector struct {
sequence uint8
}
func (d directoryEntrySystemUseExtensionSelector) Equal(o directoryEntrySystemUseExtension) bool {
t, ok := o.(directoryEntrySystemUseExtensionSelector)
return ok && t == d
}
func (d directoryEntrySystemUseExtensionSelector) Signature() string {
return suspExtensionExtensionsSelector
}
func (d directoryEntrySystemUseExtensionSelector) Length() int {
return 5
}
func (d directoryEntrySystemUseExtensionSelector) Version() uint8 {
return 1
}
func (d directoryEntrySystemUseExtensionSelector) Data() []byte {
return []byte{d.sequence}
}
func (d directoryEntrySystemUseExtensionSelector) Bytes() []byte {
ret := make([]byte, 4)
copy(ret[0:2], []byte(suspExtensionExtensionsSelector))
ret[2] = uint8(d.Length())
ret[3] = d.Version()
ret = append(ret, d.Data()...)
return ret
}
func (d directoryEntrySystemUseExtensionSelector) Sequence() uint8 {
return d.sequence
}
func (d directoryEntrySystemUseExtensionSelector) Continuable() bool {
return false
}
func (d directoryEntrySystemUseExtensionSelector) Merge([]directoryEntrySystemUseExtension) directoryEntrySystemUseExtension {
return nil
}
func parseSystemUseExtensionExtensionsSelector(b []byte) (directoryEntrySystemUseExtension, error) {
targetSize := 5
if len(b) != targetSize {
return nil, fmt.Errorf("ES extension must be %d bytes, but received %d", targetSize, len(b))
}
size := b[2]
if size != uint8(targetSize) {
return nil, fmt.Errorf("ES extension must be %d bytes, but byte 2 indicated %d", targetSize, size)
}
version := b[3]
if version != 1 {
return nil, fmt.Errorf("ES extension must be version 1, was %d", version)
}
sequence := b[4]
return directoryEntrySystemUseExtensionSelector{
sequence: sequence,
}, nil
}
// directoryEntrySystemUseExtensionReference termination
type directoryEntrySystemUseExtensionReference struct {
id string
descriptor string
source string
extensionVersion uint8
}
func (d directoryEntrySystemUseExtensionReference) Equal(o directoryEntrySystemUseExtension) bool {
t, ok := o.(directoryEntrySystemUseExtensionReference)
return ok && t == d
}
func (d directoryEntrySystemUseExtensionReference) Signature() string {
return suspExtensionExtensionsReference
}
func (d directoryEntrySystemUseExtensionReference) Length() int {
return 8 + len(d.id) + len(d.descriptor) + len(d.source)
}
func (d directoryEntrySystemUseExtensionReference) Version() uint8 {
return 1
}
func (d directoryEntrySystemUseExtensionReference) Data() []byte {
ret := make([]byte, 4)
ret[0] = uint8(len(d.id))
ret[1] = uint8(len(d.descriptor))
ret[2] = uint8(len(d.source))
ret[3] = d.extensionVersion
ret = append(ret, []byte(d.id)...)
ret = append(ret, []byte(d.descriptor)...)
ret = append(ret, []byte(d.source)...)
return ret
}
func (d directoryEntrySystemUseExtensionReference) Bytes() []byte {
ret := make([]byte, 4)
copy(ret[0:2], []byte(suspExtensionExtensionsReference))
ret[2] = uint8(d.Length())
ret[3] = d.Version()
ret = append(ret, d.Data()...)
return ret
}
func (d directoryEntrySystemUseExtensionReference) ExtensionVersion() uint8 {
return d.extensionVersion
}
func (d directoryEntrySystemUseExtensionReference) ExtensionID() string {
return d.id
}
func (d directoryEntrySystemUseExtensionReference) ExtensionDescriptor() string {
return d.descriptor
}
func (d directoryEntrySystemUseExtensionReference) ExtensionSource() string {
return d.source
}
func (d directoryEntrySystemUseExtensionReference) Continuable() bool {
return false
}
func (d directoryEntrySystemUseExtensionReference) Merge([]directoryEntrySystemUseExtension) directoryEntrySystemUseExtension {
return nil
}
func parseSystemUseExtensionExtensionsReference(b []byte) (directoryEntrySystemUseExtension, error) {
size := b[2]
if len(b) != int(size) {
return nil, fmt.Errorf("ER extension byte 2 indicated size of %d bytes, but received %d", size, len(b))
}
version := b[3]
if version != 1 {
return nil, fmt.Errorf("EE extension must be version 1, was %d", version)
}
idSize := int(b[4])
descriptorSize := int(b[5])
sourceSize := int(b[6])
extVersion := b[7]
idStart := 8
descriptorStart := 8 + idSize
sourceStart := 8 + idSize + descriptorSize
id := string(b[idStart : idStart+idSize])
descriptor := string(b[descriptorStart : descriptorStart+descriptorSize])
source := string(b[sourceStart : sourceStart+sourceSize])
return directoryEntrySystemUseExtensionReference{
id: id,
descriptor: descriptor,
source: source,
extensionVersion: extVersion,
}, nil
}
var suspExtensionParser = map[string]func([]byte) (directoryEntrySystemUseExtension, error){
// base extensions
suspExtensionSharingProtocolIndicator: parseSystemUseExtensionSharingProtocolIndicator,
suspExtensionSharingProtocolTerminator: parseSystemUseExtensionTerminator,
suspExtensionExtensionsSelector: parseSystemUseExtensionExtensionsSelector,
suspExtensionExtensionsReference: parseSystemUseExtensionExtensionsReference,
suspExtensionPaddingField: parseSystemUseExtensionPadding,
suspExtensionContinuationArea: parseSystemUseExtensionContinuationArea,
}
// parseDirectoryEntryExtensions parse system use extensions area of a directory entry
func parseDirectoryEntryExtensions(b []byte, handlers []suspExtension) ([]directoryEntrySystemUseExtension, error) {
// and now for extensions in the system use area
entries := make([]directoryEntrySystemUseExtension, 0)
lastEntryBySignature := map[string]directoryEntrySystemUseExtension{}
// minimum size of 4 bytes for any SUSP entry
for i := 0; i+4 < len(b); {
// get the indicator
signature := string(b[i : i+2])
size := b[i+2]
suspBytes := b[i : i+int(size)]
var (
entry directoryEntrySystemUseExtension
err error
)
// if we have a parser, use it, else use the raw parser
if parser, ok := suspExtensionParser[signature]; ok {
entry, err = parser(suspBytes)
if err != nil {
return nil, fmt.Errorf("Error parsing %s extension at byte position %d: %v", signature, i, err)
}
} else {
// go through each extension we have and see if it can process
for _, ext := range handlers {
entry, err = ext.Process(signature, suspBytes)
if err != nil && err != ErrSuspNoHandler {
return nil, fmt.Errorf("SUSP Extension handler %s error processing extension %s: %v", ext.ID(), signature, err)
}
if err == nil {
break
}
}
if entry == nil {
entry, _ = parseSystemUseExtensionRaw(suspBytes)
}
}
// we now have the entry - see if there was a prior continuable one
if last, ok := lastEntryBySignature[signature]; ok {
entry = last.Merge([]directoryEntrySystemUseExtension{entry})
if entry.Continuable() {
lastEntryBySignature[signature] = entry
} else {
delete(lastEntryBySignature, signature)
}
}
entries = append(entries, entry)
i += int(size)
}
return entries, nil
}

View File

@@ -0,0 +1,10 @@
// Package iso9660 provides utilities to interact with, manipulate and create an iso9660 filesystem on a block device or
// a disk image.
//
// Reference documentation
// ISO9660 https://wiki.osdev.org/ISO_9660
// ISO9660 / ECMA-119 http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-119.pdf
// System Use Sharing Protocol http://cdrtools.sourceforge.net/private/RRIP/susp.ps
// Rock Ridge http://cdrtools.sourceforge.net/private/RRIP/rrip.ps
// El Torito https://wiki.osdev.org/El-Torito
package iso9660

View File

@@ -0,0 +1,136 @@
package iso9660
import (
"encoding/binary"
"github.com/diskfs/go-diskfs/partition/mbr"
"github.com/diskfs/go-diskfs/util"
)
const (
elToritoSector = 0x11
elToritoDefaultBlocks = 4
)
// Platform target booting system for a bootable iso
type Platform uint8
const (
// BIOS classic PC-BIOS x86
BIOS Platform = 0x0
// PPC PowerPC
PPC Platform = 0x1
// Mac some Macintosh system,s
Mac Platform = 0x2
// EFI newer extensible firmware interface
EFI Platform = 0xef
// default name for a boot catalog
elToritoDefaultCatalog = "BOOT.CAT"
elToritoDefaultCatalogRR = "boot.catalog"
)
// Emulation what emulation should be used for booting, normally none
type Emulation uint8
const (
// NoEmulation do not do any emulation, the normal mode
NoEmulation Emulation = 0
// Floppy12Emulation emulate a 1.2 M floppy
Floppy12Emulation Emulation = 1
// Floppy144Emulation emulate a 1.44 M floppy
Floppy144Emulation Emulation = 2
// Floppy288Emulation emulate a 2.88 M floppy
Floppy288Emulation Emulation = 3
// HardDiskEmulation emulate a hard disk
HardDiskEmulation Emulation = 4
)
// ElTorito boot structure for a disk
type ElTorito struct {
// BootCatalog path to save the boot catalog in the file structure. Defaults to "/BOOT.CAT" in iso9660 and "/boot.catalog" in Rock Ridge
BootCatalog string
// HideBootCatalog if the boot catalog should be hidden in the file system. Defaults to false
HideBootCatalog bool
// Entries list of ElToritoEntry boot entires
Entries []*ElToritoEntry
// Platform supported platform
Platform Platform
}
// ElToritoEntry single entry in an el torito boot catalog
type ElToritoEntry struct {
Platform Platform
Emulation Emulation
BootFile string
HideBootFile bool
LoadSegment uint16
// SystemType type of system the partition is, accordinng to the MBR standard
SystemType mbr.Type
size uint16
location uint32
}
// generateCatalog generate the el torito boot catalog file
func (et *ElTorito) generateCatalog() ([]byte, error) {
b := make([]byte, 0)
b = append(b, et.validationEntry()...)
for i, e := range et.Entries {
// only subsequent entries have a header, not the first
if i != 0 {
b = append(b, e.headerBytes(i == len(et.Entries)-1, 1)...)
}
b = append(b, e.entryBytes()...)
}
return b, nil
}
func (et *ElTorito) validationEntry() []byte {
b := make([]byte, 0x20)
b[0] = 1
b[1] = byte(et.Platform)
copy(b[4:0x1c], []byte(util.AppNameVersion))
b[0x1e] = 0x55
b[0x1f] = 0xaa
// calculate checksum
checksum := uint16(0x0)
for i := 0; i < len(b); i += 2 {
checksum += binary.LittleEndian.Uint16(b[i : i+2])
}
binary.LittleEndian.PutUint16(b[0x1c:0x1e], -checksum)
return b
}
// toHeaderBytes provide header bytes
func (e *ElToritoEntry) headerBytes(last bool, entries uint16) []byte {
b := make([]byte, 0x20)
b[0] = 0x90
if last {
b[0] = 0x91
}
b[1] = byte(e.Platform)
binary.LittleEndian.PutUint16(b[2:4], entries)
// we do not use the section identifier for now
return b
}
// toBytes convert ElToritoEntry to appropriate entry bytes
func (e *ElToritoEntry) entryBytes() []byte {
blocks := e.size / 512
if e.size%512 > 1 {
blocks++
}
b := make([]byte, 0x20)
b[0] = 0x88
b[1] = byte(e.Emulation)
binary.LittleEndian.PutUint16(b[2:4], e.LoadSegment)
// b[4] is system type, taken from byte 5 in the partition table in the boot image
b[4] = byte(e.SystemType)
// b[5] is unused and must be 0
// b[6:8] is the number of emulated (512-byte) sectors, i.e. the size of the file
binary.LittleEndian.PutUint16(b[6:8], blocks)
// b[8:0xc] is the location of the boot image on disk, in disk (2048) sectors
binary.LittleEndian.PutUint32(b[8:12], e.location)
// b[0xc] is selection criteria type. We do not yet support it, so leave as 0.
// b[0xd:] is vendor unique selectiomn critiera. We do not yet support it, so leave as 0.
return b
}

View File

@@ -0,0 +1,77 @@
package iso9660
import (
"fmt"
"io"
)
// File represents a single file in an iso9660 filesystem
// it is NOT used when working in a workspace, where we just use the underlying OS
type File struct {
*directoryEntry
isReadWrite bool
isAppend bool
offset int64
}
// Read reads up to len(b) bytes from the File.
// It returns the number of bytes read and any error encountered.
// At end of file, Read returns 0, io.EOF
// reads from the last known offset in the file from last read or write
// use Seek() to set at a particular point
func (fl *File) Read(b []byte) (int, error) {
// we have the DirectoryEntry, so we can get the starting location and size
// since iso9660 files are contiguous, we only need the starting location and size
// to get the entire file
fs := fl.filesystem
size := int(fl.size) - int(fl.offset)
location := int(fl.location)
maxRead := size
file := fs.file
// if there is nothing left to read, just return EOF
if size <= 0 {
return 0, io.EOF
}
// we stop when we hit the lesser of
// 1- len(b)
// 2- file end
if len(b) < maxRead {
maxRead = len(b)
}
// just read the requested number of bytes and change our offset
file.ReadAt(b[0:maxRead], int64(location)*fs.blocksize+int64(fl.offset))
fl.offset = fl.offset + int64(maxRead)
var retErr error
if fl.offset >= int64(size) {
retErr = io.EOF
}
return maxRead, retErr
}
// Write writes len(b) bytes to the File.
// you cannot write to an iso, so this returns an error
func (fl *File) Write(p []byte) (int, error) {
return 0, fmt.Errorf("Cannot write to a read-only iso filesystem")
}
// Seek set the offset to a particular point in the file
func (fl *File) Seek(offset int64, whence int) (int64, error) {
newOffset := int64(0)
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekEnd:
newOffset = int64(fl.size) + offset
case io.SeekCurrent:
newOffset = fl.offset + offset
}
if newOffset < 0 {
return fl.offset, fmt.Errorf("Cannot set offset %d before start of file", offset)
}
fl.offset = newOffset
return fl.offset, nil
}

View File

@@ -0,0 +1,855 @@
package iso9660
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
"github.com/diskfs/go-diskfs/util"
)
const (
dataStartSector = 16
defaultVolumeIdentifier = "ISOIMAGE"
)
// fileInfoFinder a struct that represents an ability to find a path and return its entry
type fileInfoFinder interface {
findEntry(string) (*finalizeFileInfo, error)
}
// FinalizeOptions options to pass to finalize
type FinalizeOptions struct {
// RockRidge enable Rock Ridge extensions
RockRidge bool
// DeepDirectories allow directories deeper than 8
DeepDirectories bool
// ElTorito slice of el torito entry configs
ElTorito *ElTorito
// VolumeIdentifier custom volume name, defaults to "ISOIMAGE"
VolumeIdentifier string
}
// finalizeFileInfo is a file info useful for finalization
// fulfills os.FileInfo
// Name() string // base name of the file
// Size() int64 // length in bytes for regular files; system-dependent for others
// Mode() FileMode // file mode bits
// ModTime() time.Time // modification time
// IsDir() bool // abbreviation for Mode().IsDir()
// Sys() interface{} // underlying data source (can return nil)
type finalizeFileInfo struct {
path string
target string
shortname string
extension string
location uint32
blocks uint32 // blocks for the directory itself and its entries
continuationBlocks uint32 // blocks for CE entries
recordSize uint8
depth int
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
isRoot bool
bytes [][]byte
parent *finalizeFileInfo
children []*finalizeFileInfo
trueParent *finalizeFileInfo
trueChild *finalizeFileInfo
content []byte
}
func (fi *finalizeFileInfo) Name() string {
// we are using plain iso9660 (without extensions), so just shortname possibly with extension
ret := fi.shortname
if !fi.isDir {
ret = fmt.Sprintf("%s.%s;1", fi.shortname, fi.extension)
}
// shortname already is ucased
return ret
}
func (fi *finalizeFileInfo) Size() int64 {
return fi.size
}
func (fi *finalizeFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi *finalizeFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi *finalizeFileInfo) IsDir() bool {
return fi.isDir
}
func (fi *finalizeFileInfo) Sys() interface{} {
return nil
}
func (fi *finalizeFileInfo) updateDepth(depth int) {
fi.depth = depth
if fi.isDir {
for _, e := range fi.children {
e.updateDepth(depth + 1)
}
}
}
func (fi *finalizeFileInfo) toDirectoryEntry(fs *FileSystem, isSelf, isParent bool) (*directoryEntry, error) {
de := &directoryEntry{
extAttrSize: 0,
location: fi.location,
size: uint32(fi.Size()),
creation: fi.ModTime(),
isHidden: false,
isSubdirectory: fi.IsDir(),
isAssociated: false,
hasExtendedAttrs: false,
hasOwnerGroupPermissions: false,
hasMoreEntries: false,
isSelf: isSelf,
isParent: isParent,
volumeSequence: 1,
filesystem: fs,
// we keep the full filename until after processing
filename: fi.Name(),
}
// if it is root, and we have susp enabled, add the necessary entries
if fs.suspEnabled {
if fi.isRoot && isSelf {
de.extensions = append(de.extensions, directoryEntrySystemUseExtensionSharingProtocolIndicator{skipBytes: 0})
}
// add appropriate PX, TF, SL, NM extensions
for _, e := range fs.suspExtensions {
ext, err := e.GetFileExtensions(path.Join(fs.workspace, fi.path), isSelf, isParent)
if err != nil {
return nil, fmt.Errorf("Error getting extensions for %s at path %s: %v", e.ID(), fi.path, err)
}
ext2, err := e.GetFinalizeExtensions(fi)
if err != nil {
return nil, fmt.Errorf("Error getting finalize extensions for %s at path %s: %v", e.ID(), fi.path, err)
}
ext = append(ext, ext2...)
de.extensions = append(de.extensions, ext...)
}
if fi.isRoot && isSelf {
for _, e := range fs.suspExtensions {
de.extensions = append(de.extensions, directoryEntrySystemUseExtensionReference{id: e.ID(), descriptor: e.Descriptor(), source: e.Source(), extensionVersion: e.Version()})
}
}
}
return de, nil
}
func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) {
// also need to add self and parent to it
var (
self, parent, dirEntry *directoryEntry
err error
)
if !fi.IsDir() {
return nil, fmt.Errorf("Cannot convert a file entry to a directtory")
}
self, err = fi.toDirectoryEntry(fs, true, false)
if err != nil {
return nil, fmt.Errorf("Could not convert self entry %s to dirEntry: %v", fi.path, err)
}
// if we have no parent, we are the root entry
// we also need to put in the SUSP if it is enabled
parentEntry := fi.parent
if fi.isRoot {
parentEntry = fi
}
parent, err = parentEntry.toDirectoryEntry(fs, false, true)
if err != nil {
return nil, fmt.Errorf("Could not convert parent entry %s to dirEntry: %v", fi.parent.path, err)
}
entries := []*directoryEntry{self, parent}
for _, child := range fi.children {
dirEntry, err = child.toDirectoryEntry(fs, false, false)
if err != nil {
return nil, fmt.Errorf("Could not convert child entry %s to dirEntry: %v", child.path, err)
}
entries = append(entries, dirEntry)
}
d := &Directory{
directoryEntry: *self,
entries: entries,
}
return d, nil
}
// calculate the size of a directory entry single record
func (fi *finalizeFileInfo) calculateRecordSize(fs *FileSystem, isSelf, isParent bool) (int, int, error) {
// we do not actually need the the continuation blocks to calculate size, just length, so use an empty slice
extTmpBlocks := make([]uint32, 100)
dirEntry, err := fi.toDirectoryEntry(fs, isSelf, isParent)
if err != nil {
return 0, 0, fmt.Errorf("Could not convert to dirEntry: %v", err)
}
dirBytes, err := dirEntry.toBytes(false, extTmpBlocks)
if err != nil {
return 0, 0, fmt.Errorf("Could not convert dirEntry to bytes: %v", err)
}
// first entry is the bytes to store in the directory
// rest are continuation blocks
return len(dirBytes[0]), len(dirBytes) - 1, nil
}
// calculate the size of a directory, similar to a file size
func (fi *finalizeFileInfo) calculateDirectorySize(fs *FileSystem) (int, int, error) {
var (
recSize, recCE int
err error
)
if !fi.IsDir() {
return 0, 0, fmt.Errorf("Cannot convert a file entry to a directtory")
}
ceBlocks := 0
size := 0
recSize, recCE, err = fi.calculateRecordSize(fs, true, false)
if err != nil {
return 0, 0, fmt.Errorf("Could not calculate self entry size %s: %v", fi.path, err)
}
size += recSize
ceBlocks += recCE
recSize, recCE, err = fi.calculateRecordSize(fs, false, true)
if err != nil {
return 0, 0, fmt.Errorf("Could not calculate parent entry size %s: %v", fi.path, err)
}
size += recSize
ceBlocks += recCE
for _, e := range fi.children {
// get size of data and CE blocks
recSize, recCE, err = e.calculateRecordSize(fs, false, false)
if err != nil {
return 0, 0, fmt.Errorf("Could not calculate child %s entry size %s: %v", e.path, fi.path, err)
}
// do not go over a block boundary; pad if necessary
newSize := size + recSize
blocksize := int(fs.blocksize)
left := blocksize - size%blocksize
if left != 0 && newSize/blocksize > size/blocksize {
size += left
}
ceBlocks += recCE
size += recSize
}
return size, ceBlocks, nil
}
// add depth to all children
func (fi *finalizeFileInfo) addProperties(depth int) {
fi.depth = depth
for _, e := range fi.children {
e.parent = fi
e.addProperties(depth + 1)
}
}
// sort all of the directory children recursively - this is for ordering into blocks
func (fi *finalizeFileInfo) collapseAndSortChildren() ([]*finalizeFileInfo, []*finalizeFileInfo) {
dirs := make([]*finalizeFileInfo, 0)
files := make([]*finalizeFileInfo, 0)
// first extract all of the directories
for _, e := range fi.children {
if e.IsDir() {
dirs = append(dirs, e)
} else {
files = append(files, e)
}
}
// next sort them
sort.Slice(dirs, func(i, j int) bool {
// just sort by filename; as good as anything else
return dirs[i].Name() < dirs[j].Name()
})
sort.Slice(files, func(i, j int) bool {
// just sort by filename; as good as anything else
return files[i].Name() < files[j].Name()
})
// finally add in the children going down
finalDirs := make([]*finalizeFileInfo, 0)
finalFiles := files
for _, e := range dirs {
finalDirs = append(finalDirs, e)
// now get any children
d, f := e.collapseAndSortChildren()
finalDirs = append(finalDirs, d...)
finalFiles = append(finalFiles, f...)
}
return finalDirs, finalFiles
}
func (fi *finalizeFileInfo) findEntry(p string) (*finalizeFileInfo, error) {
// break path down into parts and levels
parts, err := splitPath(p)
if err != nil {
return nil, fmt.Errorf("Could not parse path: %v", err)
}
var target *finalizeFileInfo
if len(parts) == 0 {
target = fi
} else {
current := parts[0]
// read the directory bytes
for _, e := range fi.children {
// do we have an alternate name?
// only care if not self or parent entry
checkFilename := e.name
if checkFilename == current {
if len(parts) > 1 {
target, err = e.findEntry(path.Join(parts[1:]...))
if err != nil {
return nil, fmt.Errorf("Could not get entry: %v", err)
}
} else {
// this is the final one, we found it, keep it
target = e
}
break
}
}
}
return target, nil
}
func (fi *finalizeFileInfo) removeChild(p string) *finalizeFileInfo {
var removed *finalizeFileInfo
children := make([]*finalizeFileInfo, 0)
for _, e := range fi.children {
if e.name != p {
children = append(children, e)
} else {
removed = e
}
}
fi.children = children
return removed
}
func (fi *finalizeFileInfo) addChild(entry *finalizeFileInfo) {
fi.children = append(fi.children, entry)
}
func finalizeFileInfoNames(fi []*finalizeFileInfo) []string {
ret := make([]string, len(fi))
for i, v := range fi {
ret[i] = v.name
}
return ret
}
// Finalize finalize a read-only filesystem by writing it out to a read-only format
func (fs *FileSystem) Finalize(options FinalizeOptions) error {
if fs.workspace == "" {
return fmt.Errorf("Cannot finalize an already finalized filesystem")
}
// did we ask for susp?
if options.RockRidge {
fs.suspEnabled = true
fs.suspExtensions = append(fs.suspExtensions, getRockRidgeExtension(rockRidge112))
}
/*
There is nothing in the iso9660 spec about the order of directories and files,
other than that they must be accessible in the location specified in directory entry and/or path table
However, most implementations seem to it as follows:
- each directory follows its parent
- data (i.e. file) sectors in each directory are immediately after its directory and immediately before the next sibling directory to its parent
to keep it simple, we will follow what xorriso/mkisofs on linux does, in the following order:
- volume descriptor set, beginning at sector 16
- root directory entry
- all other directory entries, sorted alphabetically, depth first
- L path table
- M path table
- data sectors for files, sorted alphabetically, matching order of directories
this is where we build our filesystem
1- blank out sectors 0-15 for system use
2- skip sectors 16-17 for PVD and terminator (fill later)
3- calculate how many sectors required for root directory
4- calculate each child directory, working our way down, including number of sectors and location
5- write path tables (L & M)
6- write files for root directory
7- write root directory entry into its sector (18)
8- repeat steps 6&7 for all other directories
9- write PVD
10- write volume descriptor set terminator
*/
f := fs.file
blocksize := int(fs.blocksize)
// 1- blank out sectors 0-15
b := make([]byte, dataStartSector*fs.blocksize)
n, err := f.WriteAt(b, 0)
if err != nil {
return fmt.Errorf("Could not write blank system area: %v", err)
}
if n != len(b) {
return fmt.Errorf("Only wrote %d bytes instead of expected %d to system area", n, len(b))
}
// 3- build out file tree
fileList, dirList, err := walkTree(fs.Workspace())
if err != nil {
return fmt.Errorf("Error walking tree: %v", err)
}
// starting point
root := dirList["."]
root.addProperties(1)
// if we need to relocate directories, must do them here, before finalizing order and sizes
// do not bother if enabled DeepDirectories, i.e. non-ISO9660 compliant
if !options.DeepDirectories {
if fs.suspEnabled {
var handler suspExtension
for _, e := range fs.suspExtensions {
if e.Relocatable() {
handler = e
break
}
}
var relocateFiles []*finalizeFileInfo
relocateFiles, dirList, err = handler.Relocate(dirList)
if err != nil {
return fmt.Errorf("Unable to use extension %s to relocate directories from depth > 8: %v", handler.ID(), err)
}
fileList = append(fileList, relocateFiles...)
}
// check if there are any deeper than 9
for _, e := range dirList {
if e.depth > 8 {
return fmt.Errorf("directory %s deeper than 8 deep and DeepDirectories override not enabled", e.path)
}
}
}
// convert sizes to required blocks for files
for _, e := range fileList {
e.blocks = calculateBlocks(e.size, fs.blocksize)
}
// we now have list of all of the files and directories and their properties, as well as children of every directory
// store them in a flat sorted slice, beginning with root so we can write them out in order to blocks after
dirs := make([]*finalizeFileInfo, 0, 20)
dirs = append(dirs, root)
subdirs, files := root.collapseAndSortChildren()
dirs = append(dirs, subdirs...)
// calculate the sizes and locations of the directories from the flat list and assign blocks
rootLocation := uint32(dataStartSector + 2)
// if el torito was enabled, use one sector for boot volume entry
if options.ElTorito != nil {
rootLocation++
}
location := rootLocation
var size, ceBlocks int
for _, dir := range dirs {
dir.location = location
size, ceBlocks, err = dir.calculateDirectorySize(fs)
if err != nil {
return fmt.Errorf("Unable to calculate size of directory for %s: %v", dir.path, err)
}
dir.size = int64(size)
dir.blocks = calculateBlocks(int64(size), int64(blocksize))
dir.continuationBlocks = uint32(ceBlocks)
location += dir.blocks + dir.continuationBlocks
}
// we now have sorted list of block order, with sizes and number of blocks on each
// next assign the blocks to each, and then we can enter the data in the directory entries
// create the pathtables (L & M)
// with the list of directories, we can make a path table
pathTable := createPathTable(dirs)
// how big is the path table? we will take LSB for now, because they are the same size
pathTableLBytes := pathTable.toLBytes()
pathTableMBytes := pathTable.toMBytes()
pathTableSize := len(pathTableLBytes)
pathTableBlocks := uint32(pathTableSize / blocksize)
if pathTableSize%blocksize > 0 {
pathTableBlocks++
}
// we do not do optional path tables yet
pathTableLLocation := location
location += pathTableBlocks
pathTableMLocation := location
location += pathTableBlocks
// if we asked for ElTorito, need to generate the boot catalog and save it
var (
catEntry *finalizeFileInfo
bootcat []byte
volIdentifier string = defaultVolumeIdentifier
)
if options.VolumeIdentifier != "" {
volIdentifier = options.VolumeIdentifier
}
if options.ElTorito != nil {
bootcat, err = options.ElTorito.generateCatalog()
if err != nil {
return fmt.Errorf("Unable to generate El Torito boot catalog: %v", err)
}
// figure out where to save it on disk
catname := options.ElTorito.BootCatalog
switch {
case catname == "" && options.RockRidge:
catname = elToritoDefaultCatalogRR
case catname == "":
catname = elToritoDefaultCatalog
}
shortname, extension := calculateShortnameExtension(path.Base(catname))
// break down the catalog basename from the parent dir
catEntry = &finalizeFileInfo{
content: bootcat,
size: int64(len(bootcat)),
path: catname,
name: path.Base(catname),
shortname: shortname,
extension: extension,
}
catEntry.location = location
catEntry.blocks = calculateBlocks(catEntry.size, fs.blocksize)
location += catEntry.blocks
// make it the first file
files = append([]*finalizeFileInfo{catEntry}, files...)
// if we were not told to hide the catalog, add it to its parent
if !options.ElTorito.HideBootCatalog {
var parent *finalizeFileInfo
parent, err = root.findEntry(path.Dir(catname))
if err != nil {
return fmt.Errorf("Error finding parent for boot catalog %s: %v", catname, err)
}
parent.addChild(catEntry)
}
for _, e := range options.ElTorito.Entries {
var parent, child *finalizeFileInfo
parent, err = root.findEntry(path.Dir(e.BootFile))
if err != nil {
return fmt.Errorf("Error finding parent for boot image file %s: %v", e.BootFile, err)
}
// did we ask to hide any image files?
if e.HideBootFile {
child = parent.removeChild(path.Base(e.BootFile))
} else {
child, err = parent.findEntry(path.Base(e.BootFile))
if err != nil {
return fmt.Errorf("Unable to find image child %s: %v", e.BootFile, err)
}
}
e.size = uint16(child.size)
e.location = child.location
}
}
for _, e := range files {
e.location = location
location += e.blocks
}
// now that we have all of the files with their locations, we can rebuild the boot catalog using the correct data
if catEntry != nil {
bootcat, err = options.ElTorito.generateCatalog()
if err != nil {
return fmt.Errorf("Unable to generate El Torito boot catalog: %v", err)
}
catEntry.content = bootcat
}
// now we can write each one out - dirs first then files
for _, e := range dirs {
writeAt := int64(e.location) * int64(blocksize)
var d *Directory
d, err = e.toDirectory(fs)
if err != nil {
return fmt.Errorf("Unable to convert entry to directory: %v", err)
}
// Directory.toBytes() always returns whole blocks
// get the continuation entry locations
ceLocations := make([]uint32, 0)
ceLocationStart := e.location + e.blocks
for i := 0; i < int(e.continuationBlocks); i++ {
ceLocations = append(ceLocations, ceLocationStart+uint32(i))
}
var p [][]byte
p, err = d.entriesToBytes(ceLocations)
if err != nil {
return fmt.Errorf("Could not convert directory to bytes: %v", err)
}
for i, e := range p {
f.WriteAt(e, writeAt+int64(i*blocksize))
}
}
// now write out the path tables, L & M
writeAt := int64(pathTableLLocation) * int64(blocksize)
f.WriteAt(pathTableLBytes, writeAt)
writeAt = int64(pathTableMLocation) * int64(blocksize)
f.WriteAt(pathTableMBytes, writeAt)
var (
from *os.File
copied int
)
for _, e := range files {
writeAt := int64(e.location) * int64(blocksize)
if e.content == nil {
// for file, just copy the data across
from, err = os.Open(path.Join(fs.workspace, e.path))
if err != nil {
return fmt.Errorf("failed to open file for reading %s: %v", e.path, err)
}
defer from.Close()
copied, err = copyFileData(from, f, 0, writeAt)
if err != nil {
return fmt.Errorf("failed to copy file to disk %s: %v", e.path, err)
}
if copied != int(e.Size()) {
return fmt.Errorf("error copying file %s to disk, copied %d bytes, expected %d", e.path, copied, e.Size())
}
} else {
copied = len(e.content)
if _, err = f.WriteAt(e.content, writeAt); err != nil {
return fmt.Errorf("Failed to write content of %s to disk: %v", e.path, err)
}
}
// fill in
left := blocksize - (copied % blocksize)
if left > 0 {
b2 := make([]byte, left)
f.WriteAt(b2, writeAt+int64(copied))
}
}
totalSize := location
location = dataStartSector
// create and write the primary volume descriptor, supplementary and boot, and volume descriptor set terminator
now := time.Now()
rootDE, err := root.toDirectoryEntry(fs, true, false)
if err != nil {
return fmt.Errorf("Could not convert root entry for primary volume descriptor to dirEntry: %v", err)
}
pvd := &primaryVolumeDescriptor{
systemIdentifier: "",
volumeIdentifier: volIdentifier,
volumeSize: totalSize,
setSize: 1,
sequenceNumber: 1,
blocksize: uint16(fs.blocksize),
pathTableSize: uint32(pathTableSize),
pathTableLLocation: pathTableLLocation,
pathTableLOptionalLocation: 0,
pathTableMLocation: pathTableMLocation,
pathTableMOptionalLocation: 0,
volumeSetIdentifier: "",
publisherIdentifier: "",
preparerIdentifier: util.AppNameVersion,
applicationIdentifier: "",
copyrightFile: "", // 37 bytes
abstractFile: "", // 37 bytes
bibliographicFile: "", // 37 bytes
creation: now,
modification: now,
expiration: now,
effective: now,
rootDirectoryEntry: rootDE,
}
b = pvd.toBytes()
f.WriteAt(b, int64(location)*int64(blocksize))
location++
// do we have a boot sector?
if options.ElTorito != nil {
bvd := &bootVolumeDescriptor{location: catEntry.location}
b = bvd.toBytes()
f.WriteAt(b, int64(location)*int64(blocksize))
location++
}
terminator := &terminatorVolumeDescriptor{}
b = terminator.toBytes()
f.WriteAt(b, int64(location)*int64(blocksize))
// finish by setting as finalized
fs.workspace = ""
return nil
}
func copyFileData(from, to util.File, fromOffset, toOffset int64) (int, error) {
buf := make([]byte, 2048)
copied := 0
for {
n, err := from.ReadAt(buf, fromOffset+int64(copied))
if err != nil && err != io.EOF {
return copied, err
}
if n == 0 {
break
}
if _, err := to.WriteAt(buf[:n], toOffset+int64(copied)); err != nil {
return copied, err
}
copied += n
}
return copied, nil
}
// sort path table entries
func sortFinalizeFileInfoPathTable(left, right *finalizeFileInfo) bool {
switch {
case left.parent == right.parent:
// same parents = same depth, just sort on name
lname := left.Name()
rname := right.Name()
maxLen := maxInt(len(lname), len(rname))
format := fmt.Sprintf("%%-%ds", maxLen)
return fmt.Sprintf(format, lname) < fmt.Sprintf(format, rname)
case left.depth < right.depth:
// different parents with different depth, lower first
return true
case right.depth > left.depth:
return false
case left.parent == nil && right.parent != nil:
return true
case left.parent != nil && right.parent == nil:
return false
default:
// same depth, different parents, it depends on the sort order of the parents
return sortFinalizeFileInfoPathTable(left.parent, right.parent)
}
}
// create a path table from a slice of *finalizeFileInfo that are directories
func createPathTable(fi []*finalizeFileInfo) *pathTable {
// copy so we do not modify the original
fs := make([]*finalizeFileInfo, len(fi))
copy(fs, fi)
// sort via the rules
sort.Slice(fs, func(i, j int) bool {
return sortFinalizeFileInfoPathTable(fs[i], fs[j])
})
indexMap := make(map[*finalizeFileInfo]int)
// now that it is sorted, create the ordered path table entries
entries := make([]*pathTableEntry, 0)
for i, e := range fs {
name := e.Name()
nameSize := len(name)
size := 8 + uint16(nameSize)
if nameSize%2 != 0 {
size++
}
ownIndex := i + 1
indexMap[e] = ownIndex
// root just points to itself
parentIndex := ownIndex
if ip, ok := indexMap[e.parent]; ok {
parentIndex = ip
}
pte := &pathTableEntry{
nameSize: uint8(nameSize),
size: size,
extAttrLength: 0,
location: e.location,
parentIndex: uint16(parentIndex),
dirname: name,
}
entries = append(entries, pte)
}
return &pathTable{
records: entries,
}
}
func walkTree(workspace string) ([]*finalizeFileInfo, map[string]*finalizeFileInfo, error) {
cwd, err := os.Getwd()
if err != nil {
return nil, nil, fmt.Errorf("Could not get pwd: %v", err)
}
// make everything relative to the workspace
os.Chdir(workspace)
dirList := make(map[string]*finalizeFileInfo)
fileList := make([]*finalizeFileInfo, 0)
var entry *finalizeFileInfo
filepath.Walk(".", func(fp string, fi os.FileInfo, err error) error {
isRoot := fp == "."
name := fi.Name()
shortname, extension := calculateShortnameExtension(name)
if isRoot {
name = string([]byte{0x00})
shortname = name
}
entry = &finalizeFileInfo{path: fp, name: name, isDir: fi.IsDir(), isRoot: isRoot, modTime: fi.ModTime(), mode: fi.Mode(), size: fi.Size(), shortname: shortname}
// we will have to save it as its parent
parentDir := filepath.Dir(fp)
parentDirInfo := dirList[parentDir]
if fi.IsDir() {
entry.children = make([]*finalizeFileInfo, 0, 20)
dirList[fp] = entry
if !isRoot {
parentDirInfo.children = append(parentDirInfo.children, entry)
dirList[parentDir] = parentDirInfo
}
} else {
// calculate blocks
entry.size = fi.Size()
entry.extension = extension
parentDirInfo.children = append(parentDirInfo.children, entry)
dirList[parentDir] = parentDirInfo
fileList = append(fileList, entry)
}
return nil
})
// reset the workspace
os.Chdir(cwd)
return fileList, dirList, nil
}
func calculateBlocks(size, blocksize int64) uint32 {
blocks := uint32(size / blocksize)
// add one for partial
if size%blocksize > 0 {
blocks++
}
return blocks
}
func calculateShortnameExtension(name string) (string, string) {
parts := strings.SplitN(name, ".", 2)
shortname := parts[0]
extension := ""
if len(parts) > 1 {
extension = parts[1]
}
// shortname and extension must be upper-case
shortname = strings.ToUpper(shortname)
extension = strings.ToUpper(extension)
// replace illegal characters in shortname and extension with _
re := regexp.MustCompile("[^A-Z0-9_]")
shortname = re.ReplaceAllString(shortname, "_")
extension = re.ReplaceAllString(extension, "_")
return shortname, extension
}

View File

@@ -0,0 +1,480 @@
package iso9660
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path"
"github.com/diskfs/go-diskfs/filesystem"
"github.com/diskfs/go-diskfs/util"
)
const (
volumeDescriptorSize int64 = 2 * KB // each volume descriptor is 2KB
systemAreaSize int64 = 32 * KB // 32KB system area size
defaultSectorSize int64 = 2 * KB
// MaxBlocks maximum number of blocks allowed in an iso9660 filesystem
MaxBlocks int64 = 4.294967296e+09 // 2^32
)
// FileSystem implements the FileSystem interface
type FileSystem struct {
workspace string
size int64
start int64
file util.File
blocksize int64
volumes volumeDescriptors
pathTable *pathTable
rootDir *directoryEntry
suspEnabled bool // is the SUSP in use?
suspSkip uint8 // how many bytes to skip in each directory record
suspExtensions []suspExtension
}
// Equal compare if two filesystems are equal
func (fs *FileSystem) Equal(a *FileSystem) bool {
localMatch := fs.file == a.file && fs.size == a.size
vdMatch := fs.volumes.equal(&a.volumes)
return localMatch && vdMatch
}
// Workspace get the workspace path
func (fs *FileSystem) Workspace() string {
return fs.workspace
}
// Create creates an ISO9660 filesystem in a given directory
//
// requires the util.File where to create the filesystem, size is the size of the filesystem in bytes,
// start is how far in bytes from the beginning of the util.File to create the filesystem,
// and blocksize is is the logical blocksize to use for creating the filesystem
//
// note that you are *not* required to create the filesystem on the entire disk. You could have a disk of size
// 20GB, and create a small filesystem of size 50MB that begins 2GB into the disk.
// This is extremely useful for creating filesystems on disk partitions.
//
// Note, however, that it is much easier to do this using the higher-level APIs at github.com/diskfs/go-diskfs
// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors)
// where a partition starts and ends.
//
// If the provided blocksize is 0, it will use the default of 2 KB.
func Create(f util.File, size int64, start int64, blocksize int64) (*FileSystem, error) {
if blocksize == 0 {
blocksize = defaultSectorSize
}
// make sure it is an allowed blocksize
if err := validateBlocksize(blocksize); err != nil {
return nil, err
}
// size of 0 means to use defaults
if size != 0 && size > MaxBlocks*blocksize {
return nil, fmt.Errorf("requested size is larger than maximum allowed ISO9660 size of %d blocks", MaxBlocks)
}
// at bare minimum, it must have enough space for the system area, one volume descriptor, one volume decriptor set terminator, and one block of data
if size != 0 && size < systemAreaSize+2*volumeDescriptorSize+blocksize {
return nil, fmt.Errorf("requested size is smaller than minimum allowed ISO9660 size: system area (%d), one volume descriptor (%d), one volume descriptor set terminator (%d), and one block (%d)", systemAreaSize, volumeDescriptorSize, volumeDescriptorSize, blocksize)
}
// create a temporary working area where we can create the filesystem.
// It is only on `Finalize()` that we write it out to the actual disk file
tmpdir, err := ioutil.TempDir("", "diskfs_iso")
if err != nil {
return nil, fmt.Errorf("Could not create working directory: %v", err)
}
// create root directory
// there is nothing in there
return &FileSystem{
workspace: tmpdir,
start: start,
size: size,
file: f,
volumes: volumeDescriptors{},
blocksize: blocksize,
}, nil
}
// Read reads a filesystem from a given disk.
//
// requires the util.File where to read the filesystem, size is the size of the filesystem in bytes,
// start is how far in bytes from the beginning of the util.File the filesystem is expected to begin,
// and blocksize is is the physical blocksize to use for reading the filesystem
//
// note that you are *not* required to read a filesystem on the entire disk. You could have a disk of size
// 20GB, and a small filesystem of size 50MB that begins 2GB into the disk.
// This is extremely useful for working with filesystems on disk partitions.
//
// Note, however, that it is much easier to do this using the higher-level APIs at github.com/diskfs/go-diskfs
// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors)
// where a partition starts and ends.
//
// If the provided blocksize is 0, it will use the default of 2K bytes
func Read(file util.File, size int64, start int64, blocksize int64) (*FileSystem, error) {
var read int
if blocksize == 0 {
blocksize = defaultSectorSize
}
// make sure it is an allowed blocksize
if err := validateBlocksize(blocksize); err != nil {
return nil, err
}
// default size of 0 means use whatever size is available
if size != 0 && size > MaxBlocks*blocksize {
return nil, fmt.Errorf("requested size is larger than maximum allowed ISO9660 size of %d blocks", MaxBlocks)
}
// at bare minimum, it must have enough space for the system area, one volume descriptor, one volume decriptor set terminator, and one block of data
if size != 0 && size < systemAreaSize+2*volumeDescriptorSize+blocksize {
return nil, fmt.Errorf("requested size is too small to allow for system area (%d), one volume descriptor (%d), one volume descriptor set terminator (%d), and one block (%d)", systemAreaSize, volumeDescriptorSize, volumeDescriptorSize, blocksize)
}
// load the information from the disk
// read system area
systemArea := make([]byte, systemAreaSize, systemAreaSize)
n, err := file.ReadAt(systemArea, start)
if err != nil {
return nil, fmt.Errorf("Could not read bytes from file: %v", err)
}
if uint16(n) < uint16(systemAreaSize) {
return nil, fmt.Errorf("Only could read %d bytes from file", n)
}
// we do not do anything with the system area for now
// next read the volume descriptors, one at a time, until we hit the terminator
vds := make([]volumeDescriptor, 2)
terminated := false
var (
pvd *primaryVolumeDescriptor
vd volumeDescriptor
)
for i := 0; !terminated; i++ {
vdBytes := make([]byte, volumeDescriptorSize, volumeDescriptorSize)
// read vdBytes
read, err = file.ReadAt(vdBytes, start+systemAreaSize+int64(i)*volumeDescriptorSize)
if err != nil {
return nil, fmt.Errorf("Unable to read bytes for volume descriptor %d: %v", i, err)
}
if int64(read) != volumeDescriptorSize {
return nil, fmt.Errorf("Read %d bytes instead of expected %d for volume descriptor %d", read, volumeDescriptorSize, i)
}
// convert to a vd structure
vd, err = volumeDescriptorFromBytes(vdBytes)
if err != nil {
return nil, fmt.Errorf("Error reading Volume Descriptor: %v", err)
}
// is this a terminator?
switch vd.Type() {
case volumeDescriptorTerminator:
terminated = true
case volumeDescriptorPrimary:
vds = append(vds, vd)
pvd = vd.(*primaryVolumeDescriptor)
default:
vds = append(vds, vd)
}
}
// load up our path table and root directory entry
var (
pt *pathTable
rootDirEntry *directoryEntry
)
if pvd != nil {
rootDirEntry = pvd.rootDirectoryEntry
pathTableBytes := make([]byte, pvd.pathTableSize, pvd.pathTableSize)
pathTableLocation := pvd.pathTableLLocation * uint32(pvd.blocksize)
read, err = file.ReadAt(pathTableBytes, int64(pathTableLocation))
if err != nil {
return nil, fmt.Errorf("Unable to read path table of size %d at location %d: %v", pvd.pathTableSize, pathTableLocation, err)
}
if read != len(pathTableBytes) {
return nil, fmt.Errorf("Read %d bytes of path table instead of expected %d at location %d", read, pvd.pathTableSize, pathTableLocation)
}
pt, err = parsePathTable(pathTableBytes)
if err != nil {
return nil, fmt.Errorf("Unable to parse path table of size %d at location %d: %v", pvd.pathTableSize, pathTableLocation, err)
}
}
// is system use enabled?
location := int64(rootDirEntry.location) * blocksize
// get the size of the directory entry
b := make([]byte, 1)
read, err = file.ReadAt(b, location)
if err != nil {
return nil, fmt.Errorf("Unable to read root directory size at location %d: %v", location, err)
}
if read != len(b) {
return nil, fmt.Errorf("Root directory entry size, read %d bytes instead of expected %d", read, len(b))
}
// now read the whole entry
b = make([]byte, b[0])
read, err = file.ReadAt(b, location)
if err != nil {
return nil, fmt.Errorf("Unable to read root directory entry at location %d: %v", location, err)
}
if read != len(b) {
return nil, fmt.Errorf("Root directory entry, read %d bytes instead of expected %d", read, len(b))
}
// parse it - we do not have any handlers yet
de, err := parseDirEntry(b, &FileSystem{
suspEnabled: true,
file: file,
blocksize: blocksize,
})
if err != nil {
return nil, fmt.Errorf("Error parsing root entry from bytes: %v", err)
}
// is the SUSP in use?
var (
suspEnabled bool
skipBytes uint8
suspHandlers []suspExtension
)
for _, ext := range de.extensions {
if s, ok := ext.(directoryEntrySystemUseExtensionSharingProtocolIndicator); ok {
suspEnabled = true
skipBytes = s.SkipBytes()
}
// register any extension handlers
if s, ok := ext.(directoryEntrySystemUseExtensionReference); suspEnabled && ok {
extHandler := getRockRidgeExtension(s.ExtensionID())
if extHandler != nil {
suspHandlers = append(suspHandlers, extHandler)
}
}
}
fs := &FileSystem{
workspace: "", // no workspace when we do nothing with it
start: start,
size: size,
file: file,
volumes: volumeDescriptors{
descriptors: vds,
primary: pvd,
},
blocksize: blocksize,
pathTable: pt,
rootDir: rootDirEntry,
suspEnabled: suspEnabled,
suspSkip: skipBytes,
suspExtensions: suspHandlers,
}
rootDirEntry.filesystem = fs
return fs, nil
}
// Type returns the type code for the filesystem. Always returns filesystem.TypeFat32
func (fs *FileSystem) Type() filesystem.Type {
return filesystem.TypeISO9660
}
// Mkdir make a directory at the given path. It is equivalent to `mkdir -p`, i.e. idempotent, in that:
//
// * It will make the entire tree path if it does not exist
// * It will not return an error if the path already exists
//
// if readonly and not in workspace, will return an error
func (fs *FileSystem) Mkdir(p string) error {
if fs.workspace == "" {
return fmt.Errorf("Cannot write to read-only filesystem")
}
err := os.MkdirAll(path.Join(fs.workspace, p), 0755)
if err != nil {
return fmt.Errorf("Could not create directory %s: %v", p, err)
}
// we are not interesting in returning the entries
return err
}
// ReadDir return the contents of a given directory in a given filesystem.
//
// Returns a slice of os.FileInfo with all of the entries in the directory.
//
// Will return an error if the directory does not exist or is a regular file and not a directory
func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
var fi []os.FileInfo
var err error
// non-workspace: read from iso9660
// workspace: read from regular filesystem
if fs.workspace != "" {
fullPath := path.Join(fs.workspace, p)
// read the entries
fi, err = ioutil.ReadDir(fullPath)
if err != nil {
return nil, fmt.Errorf("Could not read directory %s: %v", p, err)
}
} else {
dirEntries, err := fs.readDirectory(p)
if err != nil {
return nil, fmt.Errorf("Error reading directory %s: %v", p, err)
}
fi = make([]os.FileInfo, 0, len(dirEntries))
for _, entry := range dirEntries {
// ignore any entry that is current directory or parent
if entry.isSelf || entry.isParent {
continue
}
fi = append(fi, entry)
}
}
return fi, nil
}
// OpenFile returns an io.ReadWriter from which you can read the contents of a file
// or write contents to the file
//
// accepts normal os.OpenFile flags
//
// returns an error if the file does not exist
func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
var f filesystem.File
var err error
// get the path and filename
dir := path.Dir(p)
filename := path.Base(p)
// if the dir == filename, then it is just /
if dir == filename {
return nil, fmt.Errorf("Cannot open directory %s as file", p)
}
// cannot open to write or append or create if we do not have a workspace
writeMode := flag&os.O_WRONLY != 0 || flag&os.O_RDWR != 0 || flag&os.O_APPEND != 0 || flag&os.O_CREATE != 0 || flag&os.O_TRUNC != 0 || flag&os.O_EXCL != 0
if fs.workspace == "" {
if writeMode {
return nil, fmt.Errorf("Cannot write to read-only filesystem")
}
// get the directory entries
var entries []*directoryEntry
entries, err = fs.readDirectory(dir)
if err != nil {
return nil, fmt.Errorf("Could not read directory entries for %s", dir)
}
// we now know that the directory exists, see if the file exists
var targetEntry *directoryEntry
for _, e := range entries {
eName := e.Name()
// cannot do anything with directories
if eName == filename && e.IsDir() {
return nil, fmt.Errorf("Cannot open directory %s as file", p)
}
if eName == filename {
// if we got this far, we have found the file
targetEntry = e
break
}
}
// see if the file exists
// if the file does not exist, and is not opened for os.O_CREATE, return an error
if targetEntry == nil {
return nil, fmt.Errorf("Target file %s does not exist", p)
}
// now open the file
f = &File{
directoryEntry: targetEntry,
isReadWrite: false,
isAppend: false,
offset: 0,
}
} else {
f, err = os.OpenFile(path.Join(fs.workspace, p), flag, 0644)
if err != nil {
return nil, fmt.Errorf("Target file %s does not exist: %v", p, err)
}
}
return f, nil
}
// readDirectory - read directory entry on iso only (not workspace)
func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) {
var (
location, size uint32
err error
n int
)
// try from path table, then walk the directory tree, unless we were told explicitly not to
usePathtable := true
for _, e := range fs.suspExtensions {
usePathtable = e.UsePathtable()
if !usePathtable {
break
}
}
if usePathtable {
location, err = fs.pathTable.getLocation(p)
if err != nil {
return nil, fmt.Errorf("Unable to read path %s from path table: %v", p, err)
}
}
// if we found it, read the first directory entry to get the size
if location != 0 {
// we need 4 bytes to read the size of the directory; it is at offset 10 from beginning
dirb := make([]byte, 4)
n, err = fs.file.ReadAt(dirb, int64(location)*fs.blocksize+10)
if err != nil {
return nil, fmt.Errorf("Could not read directory %s: %v", p, err)
}
if n != len(dirb) {
return nil, fmt.Errorf("Read %d bytes instead of expected %d", n, len(dirb))
}
// convert to uint32
size = binary.LittleEndian.Uint32(dirb)
} else {
// if we could not find the location in the path table, try reading directly from the disk
// it is slow, but this is how Unix does it, since many iso creators *do* create illegitimate disks
location, size, err = fs.rootDir.getLocation(p)
if err != nil {
return nil, fmt.Errorf("Unable to read directory tree for %s: %v", p, err)
}
}
// did we still not find it?
if location == 0 {
return nil, fmt.Errorf("Could not find directory %s", p)
}
// we have a location, let's read the directories from it
b := make([]byte, size, size)
n, err = fs.file.ReadAt(b, int64(location)*fs.blocksize)
if err != nil {
return nil, fmt.Errorf("Could not read directory entries for %s: %v", p, err)
}
if n != int(size) {
return nil, fmt.Errorf("Reading directory %s returned %d bytes read instead of expected %d", p, n, size)
}
// parse the entries
entries, err := parseDirEntries(b, fs)
if err != nil {
return nil, fmt.Errorf("Could not parse directory entries for %s: %v", p, err)
}
return entries, nil
}
func validateBlocksize(blocksize int64) error {
switch blocksize {
case 0, 2048, 4096, 8192:
return nil
default:
return fmt.Errorf("blocksize for ISO9660 must be one of 2048, 4096, 8192")
}
}
func (fs *FileSystem) Label() string {
if fs.volumes.primary == nil {
return ""
}
return fs.volumes.primary.volumeIdentifier
}

View File

@@ -0,0 +1,159 @@
package iso9660
import (
"encoding/binary"
"fmt"
)
// pathTable represents an on-iso path table
type pathTable struct {
records []*pathTableEntry
}
type pathTableEntry struct {
nameSize uint8
size uint16
extAttrLength uint8
location uint32
parentIndex uint16
dirname string
}
func (pt *pathTable) equal(b *pathTable) bool {
switch {
case (pt == nil && b != nil) || (pt != nil && b == nil):
return false
case len(pt.records) != len(b.records):
return false
default:
for i, e := range pt.records {
if *e != *b.records[i] {
return false
}
}
}
return true
}
func (pt *pathTable) names() []string {
ret := make([]string, len(pt.records))
for i, v := range pt.records {
ret[i] = v.dirname
}
return ret
}
func (pt *pathTable) toLBytes() []byte {
b := make([]byte, 0)
for _, e := range pt.records {
name := []byte(e.dirname)
nameSize := len(name)
size := 8 + uint16(nameSize)
if nameSize%2 != 0 {
size++
}
b2 := make([]byte, size, size)
b2[0] = uint8(nameSize)
b2[1] = e.extAttrLength
binary.LittleEndian.PutUint32(b2[2:6], e.location)
binary.LittleEndian.PutUint16(b2[6:8], e.parentIndex)
copy(b2[8:8+nameSize], name)
if nameSize%2 != 0 {
b2[8+nameSize] = 0
}
b = append(b, b2...)
}
return b
}
func (pt *pathTable) toMBytes() []byte {
b := make([]byte, 0)
for _, e := range pt.records {
name := []byte(e.dirname)
nameSize := len(name)
size := 8 + uint16(nameSize)
if nameSize%2 != 0 {
size++
}
b2 := make([]byte, size, size)
b2[0] = uint8(nameSize)
b2[1] = e.extAttrLength
binary.BigEndian.PutUint32(b2[2:6], e.location)
binary.BigEndian.PutUint16(b2[6:8], e.parentIndex)
copy(b2[8:8+nameSize], name)
if nameSize%2 != 0 {
b2[8+nameSize] = 0
}
b = append(b, b2...)
}
return b
}
// getLocation gets the location of the extent that contains this path
// we can get the size because the first record always points to the current directory
func (pt *pathTable) getLocation(p string) (uint32, error) {
// break path down into parts and levels
parts, err := splitPath(p)
if err != nil {
return 0, fmt.Errorf("Could not parse path: %v", err)
}
// level represents the level of the parent
var level uint16 = 1
var location uint32
if len(parts) == 0 {
location = pt.records[0].location
} else {
current := parts[0]
// loop through the path table until we find our entry
// we always can go forward because of the known depth ordering of path table
for i, entry := range pt.records {
// did we find a match for our current level?
if entry.parentIndex == level && entry.dirname == current {
level = uint16(i)
if len(parts) > 1 {
parts = parts[1:]
} else {
// this is the final one, we found it, keep it
location = entry.location
break
}
}
}
}
return location, nil
}
// parsePathTable load pathtable bytes into structures
func parsePathTable(b []byte) (*pathTable, error) {
totalSize := len(b)
entries := make([]*pathTableEntry, 0, 20)
for i := 0; i < totalSize; {
nameSize := uint8(b[i])
// is it zeroes? If so, we are at the end
if nameSize == 0 {
break
}
size := 8 + uint16(nameSize)
if nameSize%2 != 0 {
size++
}
extAttrSize := uint8(b[i+1])
location := binary.LittleEndian.Uint32(b[i+2 : i+6])
parent := binary.LittleEndian.Uint16(b[i+6 : i+8])
name := string(b[i+8 : i+8+int(nameSize)])
entry := &pathTableEntry{
nameSize: nameSize,
size: size,
extAttrLength: extAttrSize,
location: location,
parentIndex: parent,
dirname: name,
}
entries = append(entries, entry)
i += int(size)
}
return &pathTable{
records: entries,
}, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,18 @@
// +build !windows
package iso9660
import (
"os"
"syscall"
)
func statt(fi os.FileInfo) (uint32, uint32, uint32) {
if sys := fi.Sys(); sys != nil {
if stat, ok := sys.(*syscall.Stat_t); ok {
return uint32(stat.Nlink), uint32(stat.Uid), uint32(stat.Gid)
}
}
return uint32(0), uint32(0), uint32(0)
}

View File

@@ -0,0 +1,7 @@
// +build windows
package iso9660
func statt(sys interface{}) (uint32, uint32, uint32) {
return uint32(0), uint32(0), uint32(0)
}

View File

@@ -0,0 +1,80 @@
package iso9660
import (
"strings"
)
const (
// KB represents one KB
KB int64 = 1024
// MB represents one MB
MB int64 = 1024 * KB
// GB represents one GB
GB int64 = 1024 * MB
// TB represents one TB
TB int64 = 1024 * GB
)
func universalizePath(p string) (string, error) {
// globalize the separator
ps := strings.Replace(p, `\`, "/", -1)
//if ps[0] != '/' {
//return "", errors.New("Must use absolute paths")
//}
return ps, nil
}
func splitPath(p string) ([]string, error) {
ps, err := universalizePath(p)
if err != nil {
return nil, err
}
// we need to split such that each one ends in "/", except possibly the last one
parts := strings.Split(ps, "/")
// eliminate empty parts
ret := make([]string, 0)
for _, sub := range parts {
if sub != "" {
ret = append(ret, sub)
}
}
return ret, nil
}
func ucs2StringToBytes(s string) []byte {
rs := []rune(s)
l := len(rs)
b := make([]byte, 0, 2*l)
// big endian
for _, r := range rs {
tmpb := []byte{byte(r >> 8), byte(r & 0x00ff)}
b = append(b, tmpb...)
}
return b
}
// bytesToUCS2String convert bytes to UCS-2. We aren't 100% sure that this is right,
// as it is possible to pass it an odd number of characters. But good enough for now.
func bytesToUCS2String(b []byte) string {
r := make([]rune, 0, 30)
// now we can iterate - be careful in case we were given an odd number of bytes
for i := 0; i < len(b); {
// little endian
var val uint16
if i >= len(b)-1 {
val = uint16(b[i])
} else {
val = uint16(b[i])<<8 + uint16(b[i+1])
}
r = append(r, rune(val))
i += 2
}
return string(r)
}
// maxInt returns the larger of x or y.
func maxInt(x, y int) int {
if x < y {
return y
}
return x
}

View File

@@ -0,0 +1,502 @@
package iso9660
import (
"bytes"
"encoding/binary"
"fmt"
"strconv"
"time"
)
type volumeDescriptorType uint8
const (
volumeDescriptorBoot volumeDescriptorType = 0x00
volumeDescriptorPrimary volumeDescriptorType = 0x01
volumeDescriptorSupplementary volumeDescriptorType = 0x02
volumeDescriptorPartition volumeDescriptorType = 0x03
volumeDescriptorTerminator volumeDescriptorType = 0xff
)
const (
isoIdentifier uint64 = 0x4344303031 // string "CD001"
isoVersion uint8 = 0x01
bootSystemIdentifier = "EL TORITO SPECIFICATION"
)
// volumeDescriptor interface for any given type of volume descriptor
type volumeDescriptor interface {
Type() volumeDescriptorType
toBytes() []byte
equal(volumeDescriptor) bool
}
type primaryVolumeDescriptor struct {
systemIdentifier string // length 32 bytes
volumeIdentifier string // length 32 bytes
volumeSize uint32 // in blocks
setSize uint16
sequenceNumber uint16
blocksize uint16
pathTableSize uint32
pathTableLLocation uint32
pathTableLOptionalLocation uint32
pathTableMLocation uint32
pathTableMOptionalLocation uint32
rootDirectoryEntry *directoryEntry
volumeSetIdentifier string // 128 bytes
publisherIdentifier string // 128 bytes
preparerIdentifier string // 128 bytes
applicationIdentifier string // 128 bytes
copyrightFile string // 37 bytes
abstractFile string // 37 bytes
bibliographicFile string // 37 bytes
creation time.Time
modification time.Time
expiration time.Time
effective time.Time
}
type bootVolumeDescriptor struct {
location uint32 // length 1977 bytes; trailing 0x00 are stripped off
}
type terminatorVolumeDescriptor struct {
}
type supplementaryVolumeDescriptor struct {
volumeFlags uint8
systemIdentifier string // length 32 bytes
volumeIdentifier string // length 32 bytes
volumeSize uint64 // in bytes
escapeSequences []byte // 32 bytes
setSize uint16
sequenceNumber uint16
blocksize uint16
pathTableSize uint32
pathTableLLocation uint32
pathTableLOptionalLocation uint32
pathTableMLocation uint32
pathTableMOptionalLocation uint32
rootDirectoryEntry *directoryEntry
volumeSetIdentifier string // 128 bytes
publisherIdentifier string // 128 bytes
preparerIdentifier string // 128 bytes
applicationIdentifier string // 128 bytes
copyrightFile string // 37 bytes
abstractFile string // 37 bytes
bibliographicFile string // 37 bytes
creation time.Time
modification time.Time
expiration time.Time
effective time.Time
}
type partitionVolumeDescriptor struct {
data []byte // length 2048 bytes; trailing 0x00 are stripped off
}
type volumeDescriptors struct {
descriptors []volumeDescriptor
primary *primaryVolumeDescriptor
}
func (v *volumeDescriptors) equal(a *volumeDescriptors) bool {
if len(v.descriptors) != len(a.descriptors) {
return false
}
// just convert everything to bytes and compare
return bytes.Compare(v.toBytes(), a.toBytes()) == 0
}
func (v *volumeDescriptors) toBytes() []byte {
b := make([]byte, 0, 20)
for _, d := range v.descriptors {
b = append(b, d.toBytes()...)
}
return b
}
// primaryVolumeDescriptor
func (v *primaryVolumeDescriptor) Type() volumeDescriptorType {
return volumeDescriptorPrimary
}
func (v *primaryVolumeDescriptor) equal(a volumeDescriptor) bool {
return bytes.Compare(v.toBytes(), a.toBytes()) == 0
}
func (v *primaryVolumeDescriptor) toBytes() []byte {
b := volumeDescriptorFirstBytes(volumeDescriptorPrimary)
copy(b[8:40], []byte(v.systemIdentifier))
copy(b[40:72], []byte(v.volumeIdentifier))
binary.LittleEndian.PutUint32(b[80:84], v.volumeSize)
binary.BigEndian.PutUint32(b[84:88], v.volumeSize)
binary.LittleEndian.PutUint16(b[120:122], v.setSize)
binary.BigEndian.PutUint16(b[122:124], v.setSize)
binary.LittleEndian.PutUint16(b[124:126], v.sequenceNumber)
binary.BigEndian.PutUint16(b[126:128], v.sequenceNumber)
binary.LittleEndian.PutUint16(b[128:130], v.blocksize)
binary.BigEndian.PutUint16(b[130:132], v.blocksize)
binary.LittleEndian.PutUint32(b[132:136], v.pathTableSize)
binary.BigEndian.PutUint32(b[136:140], v.pathTableSize)
binary.LittleEndian.PutUint32(b[140:144], v.pathTableLLocation)
binary.LittleEndian.PutUint32(b[144:148], v.pathTableLOptionalLocation)
binary.BigEndian.PutUint32(b[148:152], v.pathTableMLocation)
binary.BigEndian.PutUint32(b[152:156], v.pathTableMOptionalLocation)
rootDirEntry := make([]byte, 34)
if v.rootDirectoryEntry != nil {
// we will skip the extensions anyways, so the CE blocks do not matter
rootDirEntrySlice, _ := v.rootDirectoryEntry.toBytes(true, []uint32{})
rootDirEntry = rootDirEntrySlice[0]
}
copy(b[156:156+34], rootDirEntry)
copy(b[190:190+128], []byte(v.volumeSetIdentifier))
copy(b[318:318+128], []byte(v.publisherIdentifier))
copy(b[446:446+128], []byte(v.preparerIdentifier))
copy(b[574:574+128], []byte(v.applicationIdentifier))
copy(b[702:702+37], []byte(v.copyrightFile))
copy(b[739:739+37], []byte(v.abstractFile))
copy(b[776:776+37], []byte(v.bibliographicFile))
copy(b[813:813+17], timeToDecBytes(v.creation))
copy(b[830:830+17], timeToDecBytes(v.modification))
copy(b[847:847+17], timeToDecBytes(v.expiration))
copy(b[864:864+17], timeToDecBytes(v.effective))
// these two are set by the standard
b[881] = 1
b[882] = 0
return b
}
// volumeDescriptorFromBytes create a volumeDescriptor struct from bytes
func volumeDescriptorFromBytes(b []byte) (volumeDescriptor, error) {
if len(b) != int(volumeDescriptorSize) {
return nil, fmt.Errorf("Cannot read volume descriptor from bytes of length %d, must be %d", len(b), volumeDescriptorSize)
}
// validate the signature
tmpb := make([]byte, 8, 8)
copy(tmpb[3:8], b[1:6])
signature := binary.BigEndian.Uint64(tmpb)
if signature != isoIdentifier {
return nil, fmt.Errorf("Mismatched ISO identifier in Volume Descriptor. Found %x expected %x", signature, isoIdentifier)
}
// validate the version
version := b[6]
if version != isoVersion {
return nil, fmt.Errorf("Mismatched ISO version in Volume Descriptor. Found %x expected %x", version, isoVersion)
}
// get the type and data - later we will be more intelligent about this and read actual primary volume info
vdType := volumeDescriptorType(b[0])
var vd volumeDescriptor
var err error
switch vdType {
case volumeDescriptorPrimary:
vd, err = parsePrimaryVolumeDescriptor(b)
if err != nil {
return nil, fmt.Errorf("Unable to parse primary volume descriptor bytes: %v", err)
}
case volumeDescriptorBoot:
vd, err = parseBootVolumeDescriptor(b)
if err != nil {
return nil, fmt.Errorf("Unable to parse primary volume descriptor bytes: %v", err)
}
case volumeDescriptorTerminator:
vd = &terminatorVolumeDescriptor{}
case volumeDescriptorPartition:
vd = &partitionVolumeDescriptor{
data: b[8:volumeDescriptorSize],
}
case volumeDescriptorSupplementary:
vd, err = parseSupplementaryVolumeDescriptor(b)
if err != nil {
return nil, fmt.Errorf("Unable to parse primary volume descriptor bytes: %v", err)
}
default:
return nil, fmt.Errorf("Unknown volume descriptor type %d", vdType)
}
return vd, nil
}
func parsePrimaryVolumeDescriptor(b []byte) (*primaryVolumeDescriptor, error) {
blocksize := binary.LittleEndian.Uint16(b[128:130])
creation, err := decBytesToTime(b[813 : 813+17])
if err != nil {
return nil, fmt.Errorf("Unable to convert creation date/time from bytes: %v", err)
}
modification, err := decBytesToTime(b[830 : 830+17])
if err != nil {
return nil, fmt.Errorf("Unable to convert modification date/time from bytes: %v", err)
}
// expiration can be never
nullBytes := []byte{48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 0}
var expiration, effective time.Time
expirationBytes := b[847 : 847+17]
effectiveBytes := b[864 : 864+17]
if bytes.Compare(expirationBytes, nullBytes) != 0 {
expiration, err = decBytesToTime(expirationBytes)
if err != nil {
return nil, fmt.Errorf("Unable to convert expiration date/time from bytes: %v", err)
}
}
if bytes.Compare(effectiveBytes, nullBytes) != 0 {
effective, err = decBytesToTime(effectiveBytes)
if err != nil {
return nil, fmt.Errorf("Unable to convert effective date/time from bytes: %v", err)
}
}
rootDirEntry, err := dirEntryFromBytes(b[156:156+34], nil)
if err != nil {
return nil, fmt.Errorf("Unable to read root directory entry: %v", err)
}
return &primaryVolumeDescriptor{
systemIdentifier: string(b[8:40]),
volumeIdentifier: string(b[40:72]),
volumeSize: binary.LittleEndian.Uint32(b[80:84]),
setSize: binary.LittleEndian.Uint16(b[120:122]),
sequenceNumber: binary.LittleEndian.Uint16(b[124:126]),
blocksize: blocksize,
pathTableSize: binary.LittleEndian.Uint32(b[132:136]),
pathTableLLocation: binary.LittleEndian.Uint32(b[140:144]),
pathTableLOptionalLocation: binary.LittleEndian.Uint32(b[144:148]),
pathTableMLocation: binary.BigEndian.Uint32(b[148:152]),
pathTableMOptionalLocation: binary.BigEndian.Uint32(b[152:156]),
volumeSetIdentifier: string(b[190 : 190+128]),
publisherIdentifier: string(b[318 : 318+128]),
preparerIdentifier: string(b[446 : 446+128]),
applicationIdentifier: string(b[574 : 574+128]),
copyrightFile: string(b[702 : 702+37]),
abstractFile: string(b[739 : 739+37]),
bibliographicFile: string(b[776 : 776+37]),
creation: creation,
modification: modification,
expiration: expiration,
effective: effective,
rootDirectoryEntry: rootDirEntry,
}, nil
}
// terminatorVolumeDescriptor
func (v *terminatorVolumeDescriptor) Type() volumeDescriptorType {
return volumeDescriptorTerminator
}
func (v *terminatorVolumeDescriptor) equal(a volumeDescriptor) bool {
return bytes.Compare(v.toBytes(), a.toBytes()) == 0
}
func (v *terminatorVolumeDescriptor) toBytes() []byte {
b := volumeDescriptorFirstBytes(volumeDescriptorTerminator)
return b
}
// bootVolumeDescriptor
func (v *bootVolumeDescriptor) Type() volumeDescriptorType {
return volumeDescriptorBoot
}
func (v *bootVolumeDescriptor) equal(a volumeDescriptor) bool {
return bytes.Compare(v.toBytes(), a.toBytes()) == 0
}
func (v *bootVolumeDescriptor) toBytes() []byte {
b := volumeDescriptorFirstBytes(volumeDescriptorBoot)
copy(b[7:39], []byte(bootSystemIdentifier))
binary.LittleEndian.PutUint32(b[0x47:0x4b], v.location)
return b
}
// parseBootVolumeDescriptor
func parseBootVolumeDescriptor(b []byte) (*bootVolumeDescriptor, error) {
systemIdentifier := string(b[0x7 : 0x7+len(bootSystemIdentifier)])
if systemIdentifier != bootSystemIdentifier {
return nil, fmt.Errorf("Incorrect specification, actual '%s' expected '%s'", systemIdentifier, bootSystemIdentifier)
}
location := binary.LittleEndian.Uint32(b[0x47:0x4b])
return &bootVolumeDescriptor{location: location}, nil
}
// supplementaryVolumeDescriptor
func parseSupplementaryVolumeDescriptor(b []byte) (*supplementaryVolumeDescriptor, error) {
blocksize := binary.LittleEndian.Uint16(b[128:130])
volumesize := binary.LittleEndian.Uint32(b[80:84])
volumesizeBytes := uint64(blocksize) * uint64(volumesize)
creation, err := decBytesToTime(b[813 : 813+17])
if err != nil {
return nil, fmt.Errorf("Unable to convert creation date/time from bytes: %v", err)
}
modification, err := decBytesToTime(b[830 : 830+17])
if err != nil {
return nil, fmt.Errorf("Unable to convert modification date/time from bytes: %v", err)
}
// expiration can be never
nullBytes := []byte{48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 0}
var expiration, effective time.Time
expirationBytes := b[847 : 847+17]
effectiveBytes := b[864 : 864+17]
if bytes.Compare(expirationBytes, nullBytes) != 0 {
expiration, err = decBytesToTime(expirationBytes)
if err != nil {
return nil, fmt.Errorf("Unable to convert expiration date/time from bytes: %v", err)
}
}
if bytes.Compare(effectiveBytes, nullBytes) != 0 {
effective, err = decBytesToTime(effectiveBytes)
if err != nil {
return nil, fmt.Errorf("Unable to convert effective date/time from bytes: %v", err)
}
}
// no susp extensions for the dir entry in the volume descriptor
rootDirEntry, err := dirEntryFromBytes(b[156:156+34], nil)
if err != nil {
return nil, fmt.Errorf("Unable to read root directory entry: %v", err)
}
return &supplementaryVolumeDescriptor{
systemIdentifier: string(b[8:40]),
volumeIdentifier: string(b[40:72]),
volumeSize: volumesizeBytes,
setSize: binary.LittleEndian.Uint16(b[120:122]),
sequenceNumber: binary.LittleEndian.Uint16(b[124:126]),
blocksize: blocksize,
pathTableSize: binary.LittleEndian.Uint32(b[132:136]),
pathTableLLocation: binary.LittleEndian.Uint32(b[140:144]),
pathTableLOptionalLocation: binary.LittleEndian.Uint32(b[144:148]),
pathTableMLocation: binary.BigEndian.Uint32(b[148:152]),
pathTableMOptionalLocation: binary.BigEndian.Uint32(b[152:156]),
volumeSetIdentifier: bytesToUCS2String(b[190 : 190+128]),
publisherIdentifier: bytesToUCS2String(b[318 : 318+128]),
preparerIdentifier: bytesToUCS2String(b[446 : 446+128]),
applicationIdentifier: bytesToUCS2String(b[574 : 574+128]),
copyrightFile: bytesToUCS2String(b[702 : 702+37]),
abstractFile: bytesToUCS2String(b[739 : 739+37]),
bibliographicFile: bytesToUCS2String(b[776 : 776+37]),
creation: creation,
modification: modification,
expiration: expiration,
effective: effective,
rootDirectoryEntry: rootDirEntry,
}, nil
}
func (v *supplementaryVolumeDescriptor) Type() volumeDescriptorType {
return volumeDescriptorSupplementary
}
func (v *supplementaryVolumeDescriptor) equal(a volumeDescriptor) bool {
return bytes.Compare(v.toBytes(), a.toBytes()) == 0
}
func (v *supplementaryVolumeDescriptor) toBytes() []byte {
b := volumeDescriptorFirstBytes(volumeDescriptorSupplementary)
copy(b[8:40], []byte(v.systemIdentifier))
copy(b[40:72], []byte(v.volumeIdentifier))
blockcount := uint32(v.volumeSize / uint64(v.blocksize))
binary.LittleEndian.PutUint32(b[80:84], blockcount)
binary.BigEndian.PutUint32(b[84:88], blockcount)
binary.LittleEndian.PutUint16(b[120:122], v.setSize)
binary.BigEndian.PutUint16(b[122:124], v.setSize)
binary.LittleEndian.PutUint16(b[124:126], v.sequenceNumber)
binary.BigEndian.PutUint16(b[126:128], v.sequenceNumber)
binary.LittleEndian.PutUint16(b[128:130], v.blocksize)
binary.BigEndian.PutUint16(b[130:132], v.blocksize)
binary.LittleEndian.PutUint32(b[132:136], v.pathTableSize)
binary.BigEndian.PutUint32(b[136:140], v.pathTableSize)
binary.LittleEndian.PutUint32(b[140:144], v.pathTableLLocation)
binary.LittleEndian.PutUint32(b[144:148], v.pathTableLOptionalLocation)
binary.BigEndian.PutUint32(b[148:152], v.pathTableMLocation)
binary.BigEndian.PutUint32(b[152:156], v.pathTableMOptionalLocation)
rootDirEntry := make([]byte, 34)
if v.rootDirectoryEntry != nil {
// we will skip the extensions anyways, so the CE blocks do not matter
rootDirEntrySlice, _ := v.rootDirectoryEntry.toBytes(true, []uint32{})
rootDirEntry = rootDirEntrySlice[0]
}
copy(b[156:156+34], rootDirEntry)
copy(b[190:190+128], ucs2StringToBytes(v.volumeSetIdentifier))
copy(b[318:318+128], ucs2StringToBytes(v.publisherIdentifier))
copy(b[446:446+128], ucs2StringToBytes(v.preparerIdentifier))
copy(b[574:574+128], ucs2StringToBytes(v.applicationIdentifier))
copy(b[702:702+37], ucs2StringToBytes(v.copyrightFile))
copy(b[739:739+37], ucs2StringToBytes(v.abstractFile))
copy(b[776:776+37], ucs2StringToBytes(v.bibliographicFile))
copy(b[813:813+17], timeToDecBytes(v.creation))
copy(b[830:830+17], timeToDecBytes(v.modification))
copy(b[847:847+17], timeToDecBytes(v.expiration))
copy(b[864:864+17], timeToDecBytes(v.effective))
return b
}
// partitionVolumeDescriptor
func (v *partitionVolumeDescriptor) Type() volumeDescriptorType {
return volumeDescriptorPartition
}
func (v *partitionVolumeDescriptor) equal(a volumeDescriptor) bool {
return bytes.Compare(v.toBytes(), a.toBytes()) == 0
}
func (v *partitionVolumeDescriptor) toBytes() []byte {
b := volumeDescriptorFirstBytes(volumeDescriptorPartition)
copy(b[7:], v.data)
return b
}
// utilities
func volumeDescriptorFirstBytes(t volumeDescriptorType) []byte {
b := make([]byte, volumeDescriptorSize, volumeDescriptorSize)
b[0] = byte(t)
tmpb := make([]byte, 8, 8)
binary.BigEndian.PutUint64(tmpb[:], isoIdentifier)
copy(b[1:6], tmpb[3:8])
b[6] = isoVersion
return b
}
func decBytesToTime(b []byte) (time.Time, error) {
year := string(b[0:4])
month := string(b[4:6])
date := string(b[6:8])
hour := string(b[8:10])
minute := string(b[10:12])
second := string(b[12:14])
csec := string(b[14:16])
offset := int(int8(b[16]))
location := offset * 15
format := "2006-01-02T15:04:05-07:00"
offsetHr := location / 60
offsetMin := location % 60
offsetString := ""
// if negative offset, show it just on the hour part, not twice, so we end up with "-06:30" and not "-06:-30"
switch {
case offset == 0:
offsetString = "+00:00"
case offset < 0:
offsetString = fmt.Sprintf("-%02d:%02d", -offsetHr, -offsetMin)
case offset > 0:
offsetString = fmt.Sprintf("+%02d:%02d", offsetHr, offsetMin)
}
return time.Parse(format, fmt.Sprintf("%s-%s-%sT%s:%s:%s.%s%s", year, month, date, hour, minute, second, csec, offsetString))
}
func timeToDecBytes(t time.Time) []byte {
year := strconv.Itoa(t.Year())
month := strconv.Itoa(int(t.Month()))
date := strconv.Itoa(t.Day())
hour := strconv.Itoa(t.Hour())
minute := strconv.Itoa(t.Minute())
second := strconv.Itoa(t.Second())
csec := strconv.Itoa(t.Nanosecond() / 1e+7)
_, offset := t.Zone()
b := make([]byte, 17, 17)
copy(b[0:4], []byte(fmt.Sprintf("%04s", year)))
copy(b[4:6], []byte(fmt.Sprintf("%02s", month)))
copy(b[6:8], []byte(fmt.Sprintf("%02s", date)))
copy(b[8:10], []byte(fmt.Sprintf("%02s", hour)))
copy(b[10:12], []byte(fmt.Sprintf("%02s", minute)))
copy(b[12:14], []byte(fmt.Sprintf("%02s", second)))
copy(b[14:16], []byte(fmt.Sprintf("%02s", csec)))
b[16] = byte(offset / 60 / 15)
return b
}

View File

@@ -0,0 +1,9 @@
module github.com/diskfs/go-diskfs
go 1.12
require (
github.com/google/uuid v1.1.1
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
gopkg.in/djherbis/times.v1 v1.2.0
)

View File

@@ -0,0 +1,17 @@
package gpt
// move all bytes to big endian to fix how GPT stores UUIDs
func bytesToUUIDBytes(in []byte) []byte {
// first 3 sections (4 bytes, 2 bytes, 2 bytes) are little-endian, last 2 section are big-endian
b := make([]byte, 0, 16)
b = append(b, in[0:16]...)
tmpb := b[0:4]
reverseSlice(tmpb)
tmpb = b[4:6]
reverseSlice(tmpb)
tmpb = b[6:8]
reverseSlice(tmpb)
return b
}

View File

@@ -0,0 +1,26 @@
// Package gpt provides an interface to GUID Partition Table (GPT) partitioned disks.
//
// You can use this package to manipulate existing GPT disks, read existing disks, or create entirely
// new partition tables on disks or disk files.
//
// gpt.Table implements the Table interface in github.com/diskfs/go-diskfs/partition
//
// Normally, the best way to interact with a disk is to use the github.com/diskfs/go-diskfs package,
// which, when necessary, will call this one. When creating a new disk or manipulating an existing one,
// You will, however, need to interact with an gpt.Table and gpt.Partition structs.
//
// Here is a simple example of a GPT Table with a single 10MB Linux partition:
//
// table := &gpt.Table{
// LogicalSectorSize: 512,
// PhysicalSectorSize: 512,
// Partitions: []*mbr.Partition{
// {
// LogicalSectorSize: 512,
// PhysicalSectorSize: 512,
// ProtectiveMBR: true,
// GUID: "43E51892-3273-42F7-BCDA-B43B80CDFC48",
// },
// },
// }
package gpt

View File

@@ -0,0 +1,280 @@
package gpt
import (
"encoding/binary"
"fmt"
"io"
"reflect"
"strings"
"unicode/utf16"
"github.com/diskfs/go-diskfs/util"
uuid "github.com/google/uuid"
)
// PartitionEntrySize fixed size of a GPT partition entry
const PartitionEntrySize = 128
// Partition represents the structure of a single partition on the disk
type Partition struct {
Start uint64 // start sector for the partition
End uint64 // end sector for the partition
Size uint64 // size of the partition in bytes
Type Type // parttype for the partition
Name string // name for the partition
GUID string // partition GUID, can be left blank to auto-generate
Attributes uint64 // Attributes flags
logicalSectorSize int
physicalSectorSize int
}
func reverseSlice(s interface{}) {
size := reflect.ValueOf(s).Len()
swap := reflect.Swapper(s)
for i, j := 0, size-1; i < j; i, j = i+1, j-1 {
swap(i, j)
}
}
// toBytes return the 128 bytes for this partition
func (p *Partition) toBytes() ([]byte, error) {
b := make([]byte, PartitionEntrySize, PartitionEntrySize)
// if the Type is Unused, just return all zeroes
if p.Type == Unused {
return b, nil
}
// partition type GUID is first 16 bytes
typeGUID, err := uuid.Parse(string(p.Type))
if err != nil {
return nil, fmt.Errorf("Unable to parse partition type GUID: %v", err)
}
copy(b[0:16], bytesToUUIDBytes(typeGUID[0:16]))
// partition identifier GUID is next 16 bytes
idGUID, err := uuid.Parse(p.GUID)
if err != nil {
return nil, fmt.Errorf("Unable to parse partition identifier GUID: %v", err)
}
copy(b[16:32], bytesToUUIDBytes(idGUID[0:16]))
// next is first LBA and last LBA, uint64 = 8 bytes each
binary.LittleEndian.PutUint64(b[32:40], p.Start)
binary.LittleEndian.PutUint64(b[40:48], p.End)
binary.LittleEndian.PutUint64(b[48:56], p.Attributes)
// now the partition name - it is UTF16LE encoded, max 36 code units for 72 bytes
r := make([]rune, 0, len(p.Name))
// first convert to runes
for _, s := range p.Name {
r = append(r, rune(s))
}
if len(r) > 36 {
return nil, fmt.Errorf("Cannot use %s as partition name, has %d Unicode code units, maximum size is 36", p.Name, len(r))
}
// next convert the runes to uint16
nameb := utf16.Encode(r)
// and then convert to little-endian bytes
for i, u := range nameb {
pos := 56 + i*2
binary.LittleEndian.PutUint16(b[pos:pos+2], u)
}
return b, nil
}
// FromBytes create a partition entry from bytes
func partitionFromBytes(b []byte, logicalSectorSize, physicalSectorSize int) (*Partition, error) {
if len(b) != PartitionEntrySize {
return nil, fmt.Errorf("Data for partition was %d bytes instead of expected %d", len(b), PartitionEntrySize)
}
// is it all zeroes?
typeGUID, err := uuid.FromBytes(bytesToUUIDBytes(b[0:16]))
if err != nil {
return nil, fmt.Errorf("unable to read partition type GUID: %v", err)
}
typeString := typeGUID.String()
uuid, err := uuid.FromBytes(bytesToUUIDBytes(b[16:32]))
if err != nil {
return nil, fmt.Errorf("unable to read partition identifier GUID: %v", err)
}
firstLBA := binary.LittleEndian.Uint64(b[32:40])
lastLBA := binary.LittleEndian.Uint64(b[40:48])
attribs := binary.LittleEndian.Uint64(b[48:56])
// get the partition name
nameb := b[56:]
u := make([]uint16, 0, 72)
for i := 0; i < len(nameb); i += 2 {
// strip any 0s off of the end
entry := binary.LittleEndian.Uint16(nameb[i : i+2])
if entry == 0 {
break
}
u = append(u, entry)
}
r := utf16.Decode(u)
name := string(r)
return &Partition{
Start: firstLBA,
End: lastLBA,
Name: name,
GUID: strings.ToUpper(uuid.String()),
Attributes: attribs,
Type: Type(strings.ToUpper(typeString)),
logicalSectorSize: logicalSectorSize,
physicalSectorSize: physicalSectorSize,
}, nil
}
func (p *Partition) GetSize() int64 {
// size already is in Bytes
return int64(p.Size)
}
func (p *Partition) GetStart() int64 {
_, lss := p.sectorSizes()
return int64(p.Start) * int64(lss)
}
// WriteContents fills the partition with the contents provided
// reads from beginning of reader to exactly size of partition in bytes
func (p *Partition) WriteContents(f util.File, contents io.Reader) (uint64, error) {
pss, lss := p.sectorSizes()
total := uint64(0)
// validate start/end/size
calculatedSize := (p.End - p.Start + 1) * uint64(lss)
switch {
case p.Size <= 0 && p.End > p.Start:
p.Size = calculatedSize
case p.Size > 0 && p.End <= p.Start:
p.End = p.Start + p.Size/uint64(lss)
case p.Size > 0 && p.Size == calculatedSize:
// all is good
default:
return total, fmt.Errorf("Cannot reconcile partition size %d with start %d / end %d", p.Size, p.Start, p.End)
}
// chunks of physical sector size for efficient writing
b := make([]byte, pss, pss)
// we start at the correct byte location
start := p.Start * uint64(lss)
// loop in physical sector sizes
for {
read, err := contents.Read(b)
if err != nil && err != io.EOF {
return total, fmt.Errorf("Could not read contents to pass to partition: %v", err)
}
tmpTotal := uint64(read) + total
if uint64(tmpTotal) > p.Size {
return total, fmt.Errorf("Requested to write at least %d bytes to partition but maximum size is %d", tmpTotal, p.Size)
}
if read > 0 {
var written int
written, err = f.WriteAt(b[:read], int64(start+total))
if err != nil {
return total, fmt.Errorf("Error writing to file: %v", err)
}
total = total + uint64(written)
}
// increment our total
// is this the end of the data?
if err == io.EOF {
break
}
}
// did the total written equal the size of the partition?
if uint64(total) != p.Size {
return total, fmt.Errorf("Write %d bytes to partition but actual size is %d", total, p.Size)
}
return total, nil
}
// ReadContents reads the contents of the partition into a writer
// streams the entire partition to the writer
func (p *Partition) ReadContents(f util.File, out io.Writer) (int64, error) {
pss, lss := p.sectorSizes()
total := int64(0)
// chunks of physical sector size for efficient writing
b := make([]byte, pss, pss)
// we start at the correct byte location
start := p.Start * uint64(lss)
size := p.Size * uint64(lss)
// loop in physical sector sizes
for {
read, err := f.ReadAt(b, int64(start)+total)
if err != nil && err != io.EOF {
return total, fmt.Errorf("Error reading from file: %v", err)
}
if read > 0 {
out.Write(b[:read])
}
// increment our total
total += int64(read)
// is this the end of the data?
if err == io.EOF || total >= int64(size) {
break
}
}
return total, nil
}
// initEntry adjust the Start/End/Size entries and ensure it has a GUID
func (p *Partition) initEntry(blocksize uint64, starting uint64) error {
part := p
if part.Type == Unused {
return nil
}
var guid uuid.UUID
if part.GUID == "" {
guid, _ = uuid.NewRandom()
} else {
var err error
guid, err = uuid.Parse(part.GUID)
if err != nil {
return fmt.Errorf("Invalid UUID: %s", part.GUID)
}
}
part.GUID = strings.ToUpper(guid.String())
// check size matches sectors
// valid possibilities:
// 1- size=0, start>=0, end>start - valid - begin at start, go until end
// 2- size>0, start>=0, end=0 - valid - begin at start for size bytes
// 3- size>0, start=0, end=0 - valid - begin at end of previous partition, go for size bytes
// anything else is an error
size, start, end := part.Size, part.Start, part.End
calculatedSize := (end - start + 1) * blocksize
switch {
case start >= 0 && end > start && size == calculatedSize:
case size == 0 && start >= 0 && end > start:
// provided specific start and end, so calculate size
part.Size = uint64(calculatedSize)
case size > 0 && start > 0 && end == 0:
part.End = start + size/uint64(blocksize) - 1
case size > 0 && start == 0 && end == 0:
// we start right after the end of the previous
start = uint64(starting)
end = start + size/uint64(blocksize) - 1
part.Start = start
part.End = end
default:
return fmt.Errorf("Invalid partition entry, size %d bytes does not match start sector %d and end sector %d", size, start, end)
}
return nil
}
func (p *Partition) sectorSizes() (physical, logical int) {
physical, logical = p.physicalSectorSize, p.logicalSectorSize
if physical == 0 {
physical = physicalSectorSize
}
if logical == 0 {
logical = logicalSectorSize
}
return physical, logical
}

View File

@@ -0,0 +1,527 @@
package gpt
import (
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
"strings"
"github.com/diskfs/go-diskfs/partition/part"
"github.com/diskfs/go-diskfs/util"
uuid "github.com/google/uuid"
)
// gptSize max potential size for partition array reserved 16384
const (
gptSize = 128 * 128
mbrPartitionEntriesStart = 446
mbrPartitionEntriesCount = 4
mbrpartitionEntrySize = 16
// just defaults
physicalSectorSize = 512
logicalSectorSize = 512
)
// Table represents a partition table to be applied to a disk or read from a disk
type Table struct {
Partitions []*Partition // slice of Partition
LogicalSectorSize int // logical size of a sector
PhysicalSectorSize int // physical size of the sector
GUID string // disk GUID, can be left blank to auto-generate
ProtectiveMBR bool // whether or not a protective MBR is in place
partitionArraySize int // how many entries are in the partition array size
partitionEntrySize uint32 // size of the partition entry in the table, usually 128 bytes
primaryHeader uint64 // LBA of primary header, always 1
secondaryHeader uint64 // LBA of secondary header, always last sectors on disk
firstDataSector uint64 // LBA of first data sector
lastDataSector uint64 // LBA of last data sector
initialized bool
}
func getEfiSignature() []byte {
return []byte{0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54}
}
func getEfiRevision() []byte {
return []byte{0x00, 0x00, 0x01, 0x00}
}
func getEfiHeaderSize() []byte {
return []byte{0x5c, 0x00, 0x00, 0x00}
}
func getEfiZeroes() []byte {
return []byte{0x00, 0x00, 0x00, 0x00}
}
func getMbrSignature() []byte {
return []byte{0x55, 0xaa}
}
// check if a byte slice is all zeroes
func zeroMatch(b []byte) bool {
if len(b) < 1 {
return true
}
for _, val := range b {
if val != 0 {
return false
}
}
return true
}
// ensure that a blank table is initialized
func (t *Table) initTable(size int64) {
// default settings
if t.LogicalSectorSize == 0 {
t.LogicalSectorSize = 512
}
if t.PhysicalSectorSize == 0 {
t.PhysicalSectorSize = 512
}
if t.primaryHeader == 0 {
t.primaryHeader = 1
}
if t.GUID == "" {
guid, _ := uuid.NewRandom()
t.GUID = guid.String()
}
if t.partitionArraySize == 0 {
t.partitionArraySize = 128
}
if t.partitionEntrySize == 0 {
t.partitionEntrySize = 128
}
// how many sectors on the disk?
diskSectors := uint64(size) / uint64(t.LogicalSectorSize)
// how many sectors used for partition entries?
partSectors := uint64(t.partitionArraySize) * uint64(t.partitionEntrySize) / uint64(t.LogicalSectorSize)
if t.firstDataSector == 0 {
t.firstDataSector = 2 + partSectors
}
if t.secondaryHeader == 0 {
t.secondaryHeader = diskSectors - 1
}
if t.lastDataSector == 0 {
t.lastDataSector = diskSectors - 1 - partSectors
}
t.initialized = true
}
// Equal check if another table is functionally equal to this one
func (t *Table) Equal(t2 *Table) bool {
if t2 == nil {
return false
}
// neither is nil, so now we need to compare
basicMatch := t.LogicalSectorSize == t2.LogicalSectorSize &&
t.PhysicalSectorSize == t2.PhysicalSectorSize &&
t.partitionEntrySize == t2.partitionEntrySize &&
t.primaryHeader == t2.primaryHeader &&
t.secondaryHeader == t2.secondaryHeader &&
t.firstDataSector == t2.firstDataSector &&
t.lastDataSector == t2.lastDataSector &&
t.partitionArraySize == t2.partitionArraySize &&
t.ProtectiveMBR == t2.ProtectiveMBR &&
t.GUID == t2.GUID
partMatch := comparePartitionArray(t.Partitions, t2.Partitions)
return basicMatch && partMatch
}
func comparePartitionArray(p1, p2 []*Partition) bool {
if (p1 == nil && p2 != nil) || (p2 == nil && p1 != nil) {
return false
}
if p1 == nil && p2 == nil {
return true
}
// neither is nil, so now we need to compare
if len(p1) != len(p2) {
return false
}
matches := true
for i, p := range p1 {
if p.Type == Unused && p2[i].Type == Unused {
continue
}
if *p != *p2[i] {
matches = false
break
}
}
return matches
}
// readProtectiveMBR reads whether or not a protectiveMBR exists in a byte slice
func readProtectiveMBR(b []byte, sectors uint32) bool {
size := len(b)
if size < 512 {
return false
}
// check for MBR signature
if bytes.Compare(b[size-2:], getMbrSignature()) != 0 {
return false
}
// get the partitions
parts := b[mbrPartitionEntriesStart : mbrPartitionEntriesStart+mbrpartitionEntrySize*mbrPartitionEntriesCount]
// should have all except the first partition by zeroes
for i := 1; i < mbrPartitionEntriesCount; i++ {
if !zeroMatch(parts[i*mbrpartitionEntrySize : (i+1)*mbrpartitionEntrySize]) {
return false
}
}
// finally the first one should be a partition of type 0xee that covers the whole disk and has non-bootable
// non-bootable
if parts[0] != 0x00 {
return false
}
// we ignore head/cylinder/sector
// partition type 0xee
if parts[4] != 0xee {
return false
}
if binary.LittleEndian.Uint32(parts[8:12]) != 1 {
return false
}
if binary.LittleEndian.Uint32(parts[12:16]) != sectors {
return false
}
return true
}
// partitionArraySector get the sector that holds the primary or secondary partition array
func (t *Table) partitionArraySector(primary bool) uint64 {
if primary {
return t.primaryHeader + 1
}
return t.secondaryHeader - uint64(t.partitionArraySize)*uint64(t.partitionEntrySize)/uint64(t.LogicalSectorSize)
}
func (t *Table) generateProtectiveMBR() []byte {
b := make([]byte, 512, 512)
// we don't do anything to the first 446 bytes
copy(b[510:], getMbrSignature())
// create the single all disk partition
parts := b[mbrPartitionEntriesStart : mbrPartitionEntriesStart+mbrpartitionEntrySize]
// non-bootable
parts[0] = 0x00
// ignore CHS entirely
// partition type 0xee
parts[4] = 0xee
// ignore CHS entirely
// start LBA 1
binary.LittleEndian.PutUint32(parts[8:12], 1)
// end LBA last omne on disk
binary.LittleEndian.PutUint32(parts[12:16], uint32(t.secondaryHeader))
return b
}
// toPartitionArrayBytes write the bytes for the partition array
func (t *Table) toPartitionArrayBytes() ([]byte, error) {
blocksize := uint64(t.LogicalSectorSize)
firstblock := t.LogicalSectorSize
nextstart := uint64(firstblock)
realParts := make([]*Partition, 0, len(t.Partitions))
// go through the partitions, make sure Start/End/Size are correct, and each has a GUID
for i, part := range t.Partitions {
err := part.initEntry(blocksize, nextstart)
if err != nil {
return nil, fmt.Errorf("Could not initialize partition %d correctly: %v", i, err)
}
realParts = append(realParts, part)
nextstart = part.End + 1
}
// generate the partition bytes
partSize := t.partitionEntrySize * uint32(t.partitionArraySize)
bpart := make([]byte, partSize, partSize)
for i, p := range t.Partitions {
// write the primary partition entry
b2, err := p.toBytes()
if err != nil {
return nil, fmt.Errorf("Error preparing partition entry %d for writing to disk: %v", i, err)
}
slotStart := i * int(t.partitionEntrySize)
slotEnd := slotStart + int(t.partitionEntrySize)
copy(bpart[slotStart:slotEnd], b2)
}
return bpart, nil
}
// toGPTBytes write just the gpt header to bytes
func (t *Table) toGPTBytes(primary bool) ([]byte, error) {
b := make([]byte, t.LogicalSectorSize, t.LogicalSectorSize)
// 8 bytes "EFI PART" signature - endianness on this?
copy(b[0:8], getEfiSignature())
// 4 bytes revision 1.0
copy(b[8:12], getEfiRevision())
// 4 bytes header size
copy(b[12:16], getEfiHeaderSize())
// 4 bytes CRC32/zlib of header with this field zeroed out - must calculate then come back
copy(b[16:20], []byte{0x00, 0x00, 0x00, 0x00})
// 4 bytes zeroes reserved
copy(b[20:24], getEfiZeroes())
// which LBA are we?
if primary {
binary.LittleEndian.PutUint64(b[24:32], t.primaryHeader)
binary.LittleEndian.PutUint64(b[32:40], t.secondaryHeader)
} else {
binary.LittleEndian.PutUint64(b[24:32], t.secondaryHeader)
binary.LittleEndian.PutUint64(b[32:40], t.primaryHeader)
}
// usable LBAs for partitions
binary.LittleEndian.PutUint64(b[40:48], t.firstDataSector)
binary.LittleEndian.PutUint64(b[48:56], t.lastDataSector)
// 16 bytes disk GUID
var guid uuid.UUID
if t.GUID == "" {
guid, _ = uuid.NewRandom()
} else {
var err error
guid, err = uuid.Parse(t.GUID)
if err != nil {
return nil, fmt.Errorf("Invalid UUID: %s", t.GUID)
}
}
copy(b[56:72], bytesToUUIDBytes(guid[0:16]))
// starting LBA of array of partition entries
binary.LittleEndian.PutUint64(b[72:80], t.partitionArraySector(primary))
// how many entries?
binary.LittleEndian.PutUint32(b[80:84], uint32(t.partitionArraySize))
// how big is a single entry?
binary.LittleEndian.PutUint32(b[84:88], 0x80)
// we need a CRC/zlib of the partition entries, so we do those first, then append the bytes
bpart, err := t.toPartitionArrayBytes()
if err != nil {
return nil, fmt.Errorf("Error converting partition array to bytes: %v", err)
}
checksum := crc32.ChecksumIEEE(bpart)
binary.LittleEndian.PutUint32(b[88:92], checksum)
// calculate checksum of entire header and place 4 bytes of offset 16 = 0x10
checksum = crc32.ChecksumIEEE(b[0:92])
binary.LittleEndian.PutUint32(b[16:20], checksum)
// zeroes to the end of the sector
for i := 92; i < t.LogicalSectorSize; i++ {
b[i] = 0x00
}
return b, nil
}
// tableFromBytes read a partition table from a byte slice
func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table, error) {
// minimum size - gpt entries + header + LBA0 for (protective) MBR
minSize := gptSize + logicalBlockSize*2
if len(b) < minSize {
return nil, fmt.Errorf("Data for partition was %d bytes instead of expected minimum %d", len(b), minSize)
}
// GPT starts at LBA1
gpt := b[logicalBlockSize:]
// start with fixed headers
efiSignature := gpt[0:8]
efiRevision := gpt[8:12]
efiHeaderSize := gpt[12:16]
efiHeaderCrcBytes := append(make([]byte, 0, 4), gpt[16:20]...)
efiHeaderCrc := binary.LittleEndian.Uint32(efiHeaderCrcBytes)
efiZeroes := gpt[20:24]
primaryHeader := binary.LittleEndian.Uint64(gpt[24:32])
secondaryHeader := binary.LittleEndian.Uint64(gpt[32:40])
firstDataSector := binary.LittleEndian.Uint64(gpt[40:48])
lastDataSector := binary.LittleEndian.Uint64(gpt[48:56])
diskGUID, err := uuid.FromBytes(bytesToUUIDBytes(gpt[56:72]))
if err != nil {
return nil, fmt.Errorf("Unable to read guid from disk: %v", err)
}
partitionEntryFirstLBA := binary.LittleEndian.Uint64(gpt[72:80])
partitionEntryCount := binary.LittleEndian.Uint32(gpt[80:84])
partitionEntrySize := binary.LittleEndian.Uint32(gpt[84:88])
partitionEntryChecksum := binary.LittleEndian.Uint32(gpt[88:92])
// once we have the header CRC, zero it out
copy(gpt[16:20], []byte{0x00, 0x00, 0x00, 0x00})
if bytes.Compare(efiSignature, getEfiSignature()) != 0 {
return nil, fmt.Errorf("Invalid EFI Signature %v", efiSignature)
}
if bytes.Compare(efiRevision, getEfiRevision()) != 0 {
return nil, fmt.Errorf("Invalid EFI Revision %v", efiRevision)
}
if bytes.Compare(efiHeaderSize, getEfiHeaderSize()) != 0 {
return nil, fmt.Errorf("Invalid EFI Header size %v", efiHeaderSize)
}
if bytes.Compare(efiZeroes, getEfiZeroes()) != 0 {
return nil, fmt.Errorf("Invalid EFI Header, expected zeroes, got %v", efiZeroes)
}
// get the checksum
checksum := crc32.ChecksumIEEE(gpt[0:92])
if efiHeaderCrc != checksum {
return nil, fmt.Errorf("Invalid EFI Header Checksum, expected %v, got %v", checksum, efiHeaderCrc)
}
// now for partitions
partArrayStart := partitionEntryFirstLBA * uint64(logicalBlockSize)
partArrayEnd := partArrayStart + uint64(partitionEntryCount*partitionEntrySize)
bpart := b[partArrayStart:partArrayEnd]
// we need a CRC/zlib of the partition entries, so we do those first, then append the bytes
checksum = crc32.ChecksumIEEE(bpart)
if partitionEntryChecksum != checksum {
return nil, fmt.Errorf("Invalid EFI Partition Entry Checksum, expected %v, got %v", checksum, partitionEntryChecksum)
}
// potential protective MBR is at LBA0
hasProtectiveMBR := readProtectiveMBR(b[:logicalBlockSize], uint32(secondaryHeader))
table := Table{
LogicalSectorSize: logicalBlockSize,
PhysicalSectorSize: physicalBlockSize,
partitionEntrySize: partitionEntrySize,
primaryHeader: primaryHeader,
secondaryHeader: secondaryHeader,
firstDataSector: firstDataSector,
lastDataSector: lastDataSector,
partitionArraySize: int(partitionEntryCount),
ProtectiveMBR: hasProtectiveMBR,
GUID: strings.ToUpper(diskGUID.String()),
initialized: true,
}
parts := make([]*Partition, 0, partitionEntryCount)
count := int(partitionEntryCount)
for i := 0; i < count; i++ {
// write the primary partition entry
start := i * int(partitionEntrySize)
end := start + int(partitionEntrySize)
// skip all 0s
p, err := partitionFromBytes(bpart[start:end], table.LogicalSectorSize, table.PhysicalSectorSize)
if err != nil {
return nil, fmt.Errorf("Error reading partition entry %d: %v", i, err)
}
// augment partition information
p.Size = (p.End - p.Start + 1) * uint64(logicalBlockSize)
parts = append(parts, p)
}
table.Partitions = parts
return &table, nil
}
// Type report the type of table, always "gpt"
func (t *Table) Type() string {
return "gpt"
}
// Write writes a GPT to disk
// Must be passed the util.File to which to write and the size of the disk
func (t *Table) Write(f util.File, size int64) error {
// it is possible that we are given a basic new table that we need to initialize
if !t.initialized {
t.initTable(size)
}
// write the protectiveMBR if any
// write the primary GPT header
// write the primary partition array
// write the secondary partition array
// write the secondary GPT header
var written int
var err error
if t.ProtectiveMBR {
fullMBR := t.generateProtectiveMBR()
protectiveMBR := fullMBR[mbrPartitionEntriesStart:]
written, err = f.WriteAt(protectiveMBR, mbrPartitionEntriesStart)
if err != nil {
return fmt.Errorf("Error writing protective MBR to disk: %v", err)
}
if written != len(protectiveMBR) {
return fmt.Errorf("Wrote %d bytes of protective MBR instead of %d", written, len(protectiveMBR))
}
}
primaryHeader, err := t.toGPTBytes(true)
if err != nil {
return fmt.Errorf("Error converting primary GPT header to byte array: %v", err)
}
written, err = f.WriteAt(primaryHeader, int64(t.LogicalSectorSize))
if err != nil {
return fmt.Errorf("Error writing primary GPT to disk: %v", err)
}
if written != len(primaryHeader) {
return fmt.Errorf("Wrote %d bytes of primary GPT header instead of %d", written, len(primaryHeader))
}
partitionArray, err := t.toPartitionArrayBytes()
if err != nil {
return fmt.Errorf("Error converting primary GPT partitions to byte array: %v", err)
}
written, err = f.WriteAt(partitionArray, int64(t.LogicalSectorSize*int(t.partitionArraySector(true))))
if err != nil {
return fmt.Errorf("Error writing primary partition arrayto disk: %v", err)
}
if written != len(partitionArray) {
return fmt.Errorf("Wrote %d bytes of primary partition array instead of %d", written, len(primaryHeader))
}
written, err = f.WriteAt(partitionArray, int64(t.LogicalSectorSize*int(t.partitionArraySector(false))))
if err != nil {
return fmt.Errorf("Error writing secondary partition array to disk: %v", err)
}
if written != len(partitionArray) {
return fmt.Errorf("Wrote %d bytes of secondary partition array instead of %d", written, len(primaryHeader))
}
secondaryHeader, err := t.toGPTBytes(false)
if err != nil {
return fmt.Errorf("Error converting secondary GPT header to byte array: %v", err)
}
written, err = f.WriteAt(secondaryHeader, int64(t.secondaryHeader)*int64(t.LogicalSectorSize))
if err != nil {
return fmt.Errorf("Error writing secondary GPT to disk: %v", err)
}
if written != len(secondaryHeader) {
return fmt.Errorf("Wrote %d bytes of secondary GPT header instead of %d", written, len(secondaryHeader))
}
return nil
}
// Read read a partition table from a disk
// must be passed the util.File from which to read, and the logical and physical block sizes
//
// if successful, returns a gpt.Table struct
// returns errors if fails at any stage reading the disk or processing the bytes on disk as a GPT
func Read(f util.File, logicalBlockSize, physicalBlockSize int) (*Table, error) {
// read the data off of the disk
b := make([]byte, gptSize+logicalBlockSize*2, gptSize+logicalBlockSize*2)
read, err := f.ReadAt(b, 0)
if err != nil {
return nil, fmt.Errorf("Error reading GPT from file: %v", err)
}
if read != len(b) {
return nil, fmt.Errorf("Read only %d bytes of GPT from file instead of expected %d", read, len(b))
}
return tableFromBytes(b, logicalBlockSize, physicalBlockSize)
}
// GetPartitions get the partitions
func (t *Table) GetPartitions() []part.Partition {
// each Partition matches the part.Partition interface, but golang does not accept passing them in a slice
parts := make([]part.Partition, len(t.Partitions), len(t.Partitions))
for i, p := range t.Partitions {
parts[i] = p
}
return parts
}

View File

@@ -0,0 +1,28 @@
package gpt
// Type constants for the GUID for type of partition, see https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries
type Type string
// List of GUID partition types
const (
Unused Type = "00000000-0000-0000-0000-000000000000"
MbrBoot Type = "024DEE41-33E7-11D3-9D69-0008C781F39F"
EFISystemPartition Type = "C12A7328-F81F-11D2-BA4B-00A0C93EC93B"
BiosBoot Type = "21686148-6449-6E6F-744E-656564454649"
MicrosoftReserved Type = "E3C9E316-0B5C-4DB8-817D-F92DF00215AE"
MicrosoftBasicData Type = "EBD0A0A2-B9E5-4433-87C0-68B6B72699C7"
MicrosoftLDMMetadata Type = "5808C8AA-7E8F-42E0-85D2-E1E90434CFB3"
MicrosoftLDMData Type = "AF9B60A0-1431-4F62-BC68-3311714A69AD"
MicrosoftWindowsRecovery Type = "DE94BBA4-06D1-4D40-A16A-BFD50179D6AC"
LinuxFilesystem Type = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
LinuxRaid Type = "A19D880F-05FC-4D3B-A006-743F0F84911E"
LinuxRootX86 Type = "44479540-F297-41B2-9AF7-D131D5F0458A"
LinuxRootX86_64 Type = "4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709"
LinuxRootArm32 Type = "69DAD710-2CE4-4E3C-B16C-21A1D49ABED3"
LinuxRootArm64 Type = "B921B045-1DF0-41C3-AF44-4C6F280D3FAE"
LinuxSwap Type = "0657FD6D-A4AB-43C4-84E5-0933C84B4F4F"
LinuxLVM Type = "E6D6D379-F507-44C2-A23C-238F2A3DF928"
LinuxDMCrypt Type = "7FFEC5C9-2D00-49B7-8941-3EA10A5586B7"
LinuxLUKS Type = "CA7D7CCB-63ED-4C53-861C-1742536059CC"
VMWareFilesystem Type = "AA31E02A-400F-11DB-9590-000C2911D1B8"
)

View File

@@ -0,0 +1,26 @@
// Package mbr provides an interface to Master Boot Record (MBR) partitioned disks.
//
// You can use this package to manipulate existing MBR disks, read existing disks, or create entirely
// new partition tables on disks or disk files.
//
// mbr.Table implements the Table interface in github.com/diskfs/go-diskfs/partition
//
// Normally, the best way to interact with a disk is to use the github.com/diskfs/go-diskfs package,
// which, when necessary, will call this one. When creating a new disk or manipulating an existing one,
// You will, however, need to interact with an mbr.Table and mbr.Partition structs.
//
// Here is a simple example of an MBR Table with a single 10MB Linux partition:
//
// table := &mbr.Table{
// LogicalSectorSize: 512,
// PhysicalSectorSize: 512,
// Partitions: []*mbr.Partition{
// {
// Bootable: false,
// Type: Linux,
// Start: 2048,
// Size: 20480,
// },
// },
// }
package mbr

View File

@@ -0,0 +1,203 @@
package mbr
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"github.com/diskfs/go-diskfs/util"
)
// Partition represents the structure of a single partition on the disk
// note that start and end cylinder, head, sector (CHS) are ignored, for the most part.
// godiskfs works with disks that support [Logical Block Addressing (LBA)](https://en.wikipedia.org/wiki/Logical_block_addressing)
type Partition struct {
Bootable bool
Type Type //
Start uint32 // Start first absolute LBA sector for partition
Size uint32 // Size number of sectors in partition
StartCylinder byte
StartHead byte
StartSector byte
EndCylinder byte
EndHead byte
EndSector byte
// we need this for calculations
logicalSectorSize int
physicalSectorSize int
}
// PartitionEqualBytes compares if the bytes for 2 partitions are equal, ignoring CHS start and end
func PartitionEqualBytes(b1, b2 []byte) bool {
if (b1 == nil && b2 != nil) || (b2 == nil && b1 != nil) {
return false
}
if b1 == nil && b2 == nil {
return true
}
if len(b1) != len(b2) {
return false
}
return b1[0] == b2[0] &&
b1[4] == b2[4] &&
bytes.Compare(b1[8:12], b2[8:12]) == 0 &&
bytes.Compare(b1[12:16], b2[12:16]) == 0
}
// Equal compares if another partition is equal to this one, ignoring CHS start and end
func (p *Partition) Equal(p2 *Partition) bool {
if p2 == nil {
return false
}
return p.Bootable == p2.Bootable &&
p.Type == p2.Type &&
p.Start == p2.Start &&
p.Size == p2.Size
}
func (p *Partition) GetSize() int64 {
_, lss := p.sectorSizes()
return int64(p.Size) * int64(lss)
}
func (p *Partition) GetStart() int64 {
_, lss := p.sectorSizes()
return int64(p.Start) * int64(lss)
}
// toBytes return the 16 bytes for this partition
func (p *Partition) toBytes() ([]byte, error) {
b := make([]byte, partitionEntrySize, partitionEntrySize)
if p.Bootable {
b[0] = 0x80
} else {
b[0] = 0x00
}
b[1] = p.StartHead
b[2] = p.StartSector
b[3] = p.StartCylinder
b[4] = byte(p.Type)
b[5] = p.EndHead
b[6] = p.EndSector
b[7] = p.EndCylinder
binary.LittleEndian.PutUint32(b[8:12], p.Start)
binary.LittleEndian.PutUint32(b[12:16], p.Size)
return b, nil
}
// partitionFromBytes create a partition entry from 16 bytes
func partitionFromBytes(b []byte, logicalSectorSize, physicalSectorSize int) (*Partition, error) {
if len(b) != partitionEntrySize {
return nil, fmt.Errorf("Data for partition was %d bytes instead of expected %d", len(b), partitionEntrySize)
}
var bootable bool
switch b[0] {
case 0x00:
bootable = false
case 0x80:
bootable = true
default:
return nil, fmt.Errorf("Invalid partition")
}
return &Partition{
Bootable: bootable,
StartHead: b[1],
StartSector: b[2],
StartCylinder: b[3],
Type: Type(b[4]),
EndHead: b[5],
EndSector: b[6],
EndCylinder: b[7],
Start: binary.LittleEndian.Uint32(b[8:12]),
Size: binary.LittleEndian.Uint32(b[12:16]),
logicalSectorSize: logicalSectorSize,
physicalSectorSize: physicalSectorSize,
}, nil
}
// WriteContents fills the partition with the contents provided
// reads from beginning of reader to exactly size of partition in bytes
func (p *Partition) WriteContents(f util.File, contents io.Reader) (uint64, error) {
pss, lss := p.sectorSizes()
total := uint64(0)
// chunks of physical sector size for efficient writing
b := make([]byte, pss, pss)
// we start at the correct byte location
start := p.Start * uint32(lss)
size := p.Size * uint32(lss)
// loop in physical sector sizes
for {
read, err := contents.Read(b)
if err != nil && err != io.EOF {
return total, fmt.Errorf("Could not read contents to pass to partition: %v", err)
}
tmpTotal := uint64(read) + total
if uint32(tmpTotal) > size {
return total, fmt.Errorf("Requested to write at least %d bytes to partition but maximum size is %d", tmpTotal, size)
}
var written int
if read > 0 {
written, err = f.WriteAt(b[:read], int64(start)+int64(total))
if err != nil {
return total, fmt.Errorf("Error writing to file: %v", err)
}
// increment our total
total = total + uint64(written)
}
// is this the end of the data?
if err == io.EOF {
break
}
}
// did the total written equal the size of the partition?
if total != uint64(size) {
return total, fmt.Errorf("Write %d bytes to partition but actual size is %d", total, size)
}
return total, nil
}
// readContents reads the contents of the partition into a writer
// streams the entire partition to the writer
func (p *Partition) ReadContents(f util.File, out io.Writer) (int64, error) {
pss, lss := p.sectorSizes()
total := int64(0)
// chunks of physical sector size for efficient writing
b := make([]byte, pss, pss)
// we start at the correct byte location
start := p.Start * uint32(lss)
size := p.Size * uint32(lss)
// loop in physical sector sizes
for {
read, err := f.ReadAt(b, int64(start)+total)
if err != nil && err != io.EOF {
return total, fmt.Errorf("Error reading from file: %v", err)
}
if read > 0 {
out.Write(b[:read])
}
// increment our total
total += int64(read)
// is this the end of the data?
if err == io.EOF || total >= int64(size) {
break
}
}
return total, nil
}
// sectorSizes get the sector sizes for this partition, falling back to the defaults if 0
func (p *Partition) sectorSizes() (physical, logical int) {
physical = p.physicalSectorSize
if physical == 0 {
physical = physicalSectorSize
}
logical = p.logicalSectorSize
if logical == 0 {
logical = logicalSectorSize
}
return physical, logical
}

View File

@@ -0,0 +1,184 @@
package mbr
import (
"bytes"
"fmt"
"github.com/diskfs/go-diskfs/partition/part"
"github.com/diskfs/go-diskfs/util"
)
// Table represents an MBR partition table to be applied to a disk or read from a disk
type Table struct {
Partitions []*Partition
LogicalSectorSize int // logical size of a sector
PhysicalSectorSize int // physical size of the sector
initialized bool
}
const (
mbrSize = 512
logicalSectorSize = 512
physicalSectorSize = 512
partitionEntriesStart = 446
partitionEntriesCount = 4
signatureStart = 510
)
// partitionEntrySize standard size of an MBR partition
const partitionEntrySize = 16
func getMbrSignature() []byte {
return []byte{0x55, 0xaa}
}
// compare 2 partition arrays
func comparePartitionArray(p1, p2 []*Partition) bool {
if (p1 == nil && p2 != nil) || (p2 == nil && p1 != nil) {
return false
}
if p1 == nil && p2 == nil {
return true
}
// neither is nil, so now we need to compare
if len(p1) != len(p2) {
return false
}
matches := true
for i, p := range p1 {
if p == nil && p2 != nil || !p.Equal(p2[i]) {
matches = false
break
}
}
return matches
}
// ensure that a blank table is initialized
func (t *Table) initTable(size int64) {
// default settings
if t.LogicalSectorSize == 0 {
t.LogicalSectorSize = 512
}
if t.PhysicalSectorSize == 0 {
t.PhysicalSectorSize = 512
}
t.initialized = true
}
// Equal check if another table is equal to this one, ignoring CHS start and end for the partitions
func (t *Table) Equal(t2 *Table) bool {
if t2 == nil {
return false
}
// neither is nil, so now we need to compare
basicMatch := t.LogicalSectorSize == t2.LogicalSectorSize &&
t.PhysicalSectorSize == t2.PhysicalSectorSize
partMatch := comparePartitionArray(t.Partitions, t2.Partitions)
return basicMatch && partMatch
}
// tableFromBytes read a partition table from a byte slice
func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table, error) {
// check length
if len(b) != mbrSize {
return nil, fmt.Errorf("Data for partition was %d bytes instead of expected %d", len(b), mbrSize)
}
mbrSignature := b[signatureStart:]
// validate signature
if bytes.Compare(mbrSignature, getMbrSignature()) != 0 {
return nil, fmt.Errorf("Invalid MBR Signature %v", mbrSignature)
}
parts := make([]*Partition, 0, partitionEntriesCount)
count := int(partitionEntriesCount)
for i := 0; i < count; i++ {
// write the primary partition entry
start := partitionEntriesStart + i*partitionEntrySize
end := start + partitionEntrySize
p, err := partitionFromBytes(b[start:end], logicalSectorSize, physicalSectorSize)
if err != nil {
return nil, fmt.Errorf("Error reading partition entry %d: %v", i, err)
}
parts = append(parts, p)
}
table := &Table{
Partitions: parts,
LogicalSectorSize: logicalSectorSize,
PhysicalSectorSize: 512,
}
return table, nil
}
// Type report the type of table, always the string "mbr"
func (t *Table) Type() string {
return "mbr"
}
// Read read a partition table from a disk, given the logical block size and physical block size
func Read(f util.File, logicalBlockSize, physicalBlockSize int) (*Table, error) {
// read the data off of the disk
b := make([]byte, mbrSize, mbrSize)
read, err := f.ReadAt(b, 0)
if err != nil {
return nil, fmt.Errorf("Error reading MBR from file: %v", err)
}
if read != len(b) {
return nil, fmt.Errorf("Read only %d bytes of MBR from file instead of expected %d", read, len(b))
}
return tableFromBytes(b, logicalBlockSize, physicalBlockSize)
}
// ToBytes convert Table to byte slice suitable to be flashed to a disk
// If successful, always will return a byte slice of size exactly 512
func (t *Table) toBytes() ([]byte, error) {
b := make([]byte, 0, mbrSize-partitionEntriesStart)
// write the partitions
for i := 0; i < partitionEntriesCount; i++ {
if i < len(t.Partitions) {
btmp, err := t.Partitions[i].toBytes()
if err != nil {
return nil, fmt.Errorf("Could not prepare partition %d to write on disk: %v", i, err)
}
b = append(b, btmp...)
} else {
b = append(b, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}...)
}
}
// signature
b = append(b, getMbrSignature()...)
return b, nil
}
// Write writes a given MBR Table to disk.
// Must be passed the util.File to write to and the size of the disk
func (t *Table) Write(f util.File, size int64) error {
b, err := t.toBytes()
if err != nil {
return fmt.Errorf("Error preparing partition table for writing to disk: %v", err)
}
written, err := f.WriteAt(b, partitionEntriesStart)
if err != nil {
return fmt.Errorf("Error writing partition table to disk: %v", err)
}
if written != len(b) {
return fmt.Errorf("Partition table wrote %d bytes to disk instead of the expected %d", written, len(b))
}
return nil
}
func (t *Table) GetPartitions() []part.Partition {
// each Partition matches the part.Partition interface, but golang does not accept passing them in a slice
parts := make([]part.Partition, len(t.Partitions), len(t.Partitions))
for i, p := range t.Partitions {
parts[i] = p
}
return parts
}

View File

@@ -0,0 +1,33 @@
package mbr
// Type constants for the GUID for type of partition, see https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries
type Type byte
// List of GUID partition types
const (
Empty Type = 0x00
Fat12 Type = 0x01
XenixRoot Type = 0x02
XenixUsr Type = 0x03
Fat16 Type = 0x04
ExtendedCHS Type = 0x05
Fat16b Type = 0x06
NTFS Type = 0x07
CommodoreFAT Type = 0x08
Fat32CHS Type = 0x0b
Fat32LBA Type = 0x0c
Fat16bLBA Type = 0x0e
ExtendedLBA Type = 0x0f
Linux Type = 0x83
LinuxExtended Type = 0x85
LinuxLVM Type = 0x8e
Iso9660 Type = 0x96
MacOSXUFS Type = 0xa8
MacOSXBoot Type = 0xab
HFS Type = 0xaf
Solaris8Boot Type = 0xbe
GPTProtective Type = 0xef
EFISystem Type = 0xef
VMWareFS Type = 0xfb
VMWareSwap Type = 0xfc
)

View File

@@ -0,0 +1,15 @@
package part
import (
"io"
"github.com/diskfs/go-diskfs/util"
)
// Partition reference to an individual partition on disk
type Partition interface {
GetSize() int64
GetStart() int64
ReadContents(util.File, io.Writer) (int64, error)
WriteContents(util.File, io.Reader) (uint64, error)
}

View File

@@ -0,0 +1,26 @@
// Package partition provides ability to work with individual partitions.
// All useful implementations are subpackages of this package, e.g. github.com/diskfs/go-diskfs/gpt
package partition
import (
"fmt"
"github.com/diskfs/go-diskfs/partition/gpt"
"github.com/diskfs/go-diskfs/partition/mbr"
"github.com/diskfs/go-diskfs/util"
)
// Read read a partition table from a disk
func Read(f util.File, logicalBlocksize, physicalBlocksize int) (Table, error) {
// just try each type
gptTable, err := gpt.Read(f, logicalBlocksize, physicalBlocksize)
if err == nil {
return gptTable, nil
}
mbrTable, err := mbr.Read(f, logicalBlocksize, physicalBlocksize)
if err == nil {
return mbrTable, nil
}
// we are out
return nil, fmt.Errorf("Unknown disk partition type")
}

View File

@@ -0,0 +1,13 @@
package partition
import (
"github.com/diskfs/go-diskfs/partition/part"
"github.com/diskfs/go-diskfs/util"
)
// Table reference to a partitioning table on disk
type Table interface {
Type() string
Write(util.File, int64) error
GetPartitions() []part.Partition
}

View File

@@ -0,0 +1,13 @@
// Package util common utilities or other elements shared across github.com/diskfs/go-diskfs packages
package util
import "io"
// File interface that can be read from and written to.
// Normally implemented as actual os.File, but useful as a separate interface so can easily
// use alternate implementations.
type File interface {
io.ReaderAt
io.WriterAt
io.Seeker
}

View File

@@ -0,0 +1,5 @@
package util
const (
AppNameVersion = "https://github.com/diskfs/go-diskfs"
)

27
pkg/metadata/vendor/github.com/google/uuid/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

19
pkg/metadata/vendor/github.com/google/uuid/README.md generated vendored Normal file
View File

@@ -0,0 +1,19 @@
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
The uuid package generates and inspects UUIDs based on
[RFC 4122](http://tools.ietf.org/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
code.google.com/p/go-uuid). It differs from these earlier packages in that
a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
`go get github.com/google/uuid`
###### Documentation
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://godoc.org/github.com/google/uuid

80
pkg/metadata/vendor/github.com/google/uuid/dce.go generated vendored Normal file
View File

@@ -0,0 +1,80 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

12
pkg/metadata/vendor/github.com/google/uuid/doc.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

1
pkg/metadata/vendor/github.com/google/uuid/go.mod generated vendored Normal file
View File

@@ -0,0 +1 @@
module github.com/google/uuid

53
pkg/metadata/vendor/github.com/google/uuid/hash.go generated vendored Normal file
View File

@@ -0,0 +1,53 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:])
h.Write(data)
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

37
pkg/metadata/vendor/github.com/google/uuid/marshal.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err == nil {
*uuid = id
}
return err
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

90
pkg/metadata/vendor/github.com/google/uuid/node.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"sync"
)
var (
nodeMu sync.Mutex
ifname string // name of interface being used
nodeID [6]byte // hardware for version 1 UUIDs
zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
iname, addr := getHardwareInterface(name) // null implementation for js
if iname != "" && addr != nil {
ifname = iname
copy(nodeID[:], addr)
return true
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
ifname = "random"
randomBits(nodeID[:])
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nid := nodeID
return nid[:]
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
defer nodeMu.Unlock()
nodeMu.Lock()
copy(nodeID[:], id)
ifname = "user"
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
}

12
pkg/metadata/vendor/github.com/google/uuid/node_js.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build js
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
// This remvoves the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }

33
pkg/metadata/vendor/github.com/google/uuid/node_net.go generated vendored Normal file
View File

@@ -0,0 +1,33 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !js
package uuid
import "net"
var interfaces []net.Interface // cached list of interfaces
// getHardwareInterface returns the name and hardware address of interface name.
// If name is "" then the name and hardware address of one of the system's
// interfaces is returned. If no interfaces are found (name does not exist or
// there are no interfaces) then "", nil is returned.
//
// Only addresses of at least 6 bytes are returned.
func getHardwareInterface(name string) (string, []byte) {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil {
return "", nil
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
return ifs.Name, ifs.HardwareAddr
}
}
return "", nil
}

59
pkg/metadata/vendor/github.com/google/uuid/sql.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
return nil
case string:
// if an empty UUID comes from a table, we return a null UUID
if src == "" {
return nil
}
// see Parse for required string format
u, err := Parse(src)
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
case []byte:
// if an empty UUID comes from a table, we return a null UUID
if len(src) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(src) != 16 {
return uuid.Scan(string(src))
}
copy((*uuid)[:], src)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

123
pkg/metadata/vendor/github.com/google/uuid/time.go generated vendored Normal file
View File

@@ -0,0 +1,123 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
oldSeq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if oldSeq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}

43
pkg/metadata/vendor/github.com/google/uuid/util.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// randomBits completely fills slice b with random data.
func randomBits(b []byte) {
if _, err := io.ReadFull(rander, b); err != nil {
panic(err.Error()) // rand should never fail
}
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = [256]byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
}
// xtob converts hex characters x1 and x2 into a byte.
func xtob(x1, x2 byte) (byte, bool) {
b1 := xvalues[x1]
b2 := xvalues[x2]
return (b1 << 4) | b2, b1 != 255 && b2 != 255
}

245
pkg/metadata/vendor/github.com/google/uuid/uuid.go generated vendored Normal file
View File

@@ -0,0 +1,245 @@
// Copyright 2018 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID [16]byte
// A Version represents a UUID's version.
type Version byte
// A Variant represents a UUID's variant.
type Variant byte
// Constants returned by Variant.
const (
Invalid = Variant(iota) // Invalid UUID
RFC4122 // The variant specified in RFC4122
Reserved // Reserved, NCS backward compatibility.
Microsoft // Reserved, Microsoft Corporation backward compatibility.
Future // Reserved for future definition.
)
var rander = rand.Reader // random function
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36:
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
if strings.ToLower(s[:9]) != "urn:uuid:" {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
case 36 + 2:
s = s[1:]
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
case 32:
var ok bool
for i := range uuid {
uuid[i], ok = xtob(s[i*2], s[i*2+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
b = b[1:]
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
var ok bool
for i := 0; i < 32; i += 2 {
uuid[i/2], ok = xtob(b[i], b[i+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// MustParse is like Parse but panics if the string cannot be parsed.
// It simplifies safe initialization of global variables holding compiled UUIDs.
func MustParse(s string) UUID {
uuid, err := Parse(s)
if err != nil {
panic(`uuid: Parse(` + s + `): ` + err.Error())
}
return uuid
}
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
// does not have a length of 16. The bytes are copied from the slice.
func FromBytes(b []byte) (uuid UUID, err error) {
err = uuid.UnmarshalBinary(b)
return uuid, err
}
// Must returns uuid if err is nil and panics otherwise.
func Must(uuid UUID, err error) UUID {
if err != nil {
panic(err)
}
return uuid
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
var buf [36]byte
encodeHex(buf[:], uuid)
return string(buf[:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
var buf [36 + 9]byte
copy(buf[:], "urn:uuid:")
encodeHex(buf[9:], uuid)
return string(buf[:])
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst, uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
}
// Variant returns the variant encoded in uuid.
func (uuid UUID) Variant() Variant {
switch {
case (uuid[8] & 0xc0) == 0x80:
return RFC4122
case (uuid[8] & 0xe0) == 0xc0:
return Microsoft
case (uuid[8] & 0xe0) == 0xe0:
return Future
default:
return Reserved
}
}
// Version returns the version of uuid.
func (uuid UUID) Version() Version {
return Version(uuid[6] >> 4)
}
func (v Version) String() string {
if v > 15 {
return fmt.Sprintf("BAD_VERSION_%d", v)
}
return fmt.Sprintf("VERSION_%d", v)
}
func (v Variant) String() string {
switch v {
case RFC4122:
return "RFC4122"
case Reserved:
return "Reserved"
case Microsoft:
return "Microsoft"
case Future:
return "Future"
case Invalid:
return "Invalid"
}
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implements io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
// Calling SetRand with nil sets the random number generator to the default
// generator.
func SetRand(r io.Reader) {
if r == nil {
rander = rand.Reader
return
}
rander = r
}

44
pkg/metadata/vendor/github.com/google/uuid/version1.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
)
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewUUID returns nil and an error.
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nodeMu.Unlock()
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
timeLow := uint32(now & 0xffffffff)
timeMid := uint16((now >> 32) & 0xffff)
timeHi := uint16((now >> 48) & 0x0fff)
timeHi |= 0x1000 // Version 1
binary.BigEndian.PutUint32(uuid[0:], timeLow)
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
copy(uuid[10:], nodeID[:])
return uuid, nil
}

38
pkg/metadata/vendor/github.com/google/uuid/version4.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "io"
// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
var uuid UUID
_, err := io.ReadFull(rander, uuid[:])
if err != nil {
return Nil, err
}
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

21
pkg/metadata/vendor/github.com/sirupsen/logrus/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,505 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger.
**Seeing weird case-sensitive problems?** It's in the past been possible to
import Logrus as both upper- and lower-case. Due to the Go package environment,
this caused issues in the community and we needed a standard. Some environments
experienced problems with the upper-case variant, so the lower-case was decided.
Everything using `logrus` will need to use the lower-case:
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
To fix Glide, see [these
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
For an in-depth explanation of the casing issue, see [this
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
**Are you interested in assisting in maintaining Logrus?** Currently I have a
lot of obligations, and I am unable to provide Logrus with the maintainership it
needs. If you'd like to help, please reach out to me at `simon at author's
username dot com`.
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
#### Case-sensitivity
The organization's name was changed to lower-case--and this will not be changed
back. If you are getting import conflicts due to case sensitivity, please use
the lower-case import: `github.com/sirupsen/logrus`.
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/sirupsen/logrus"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
log.SetOutput(os.Stdout)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
// A common pattern is to re-use fields between logging statements by re-using
// the logrus.Entry returned from WithFields()
contextLogger := log.WithFields(log.Fields{
"common": "this is a common field",
"other": "I also should be logged always",
})
contextLogger.Info("I'll be logged with common and other field")
contextLogger.Info("Me too")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"os"
"github.com/sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stdout
// You could set this to any `io.Writer` such as a file
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
// if err == nil {
// log.Out = file
// } else {
// log.Info("Failed to log to file, using default stderr")
// }
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging through logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Default Fields
Often it's helpful to have fields _always_ attached to log statements in an
application or parts of one. For example, you may want to always log the
`request_id` and `user_ip` in the context of a request. Instead of writing
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
every line, you can create a `logrus.Entry` to pass around instead:
```go
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
requestLogger.Info("something happened on that request") # will log request_id and user_ip
requestLogger.Warn("something not great happened")
```
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
`init`:
```go
import (
log "github.com/sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description |
| ----- | ----------- |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(&log.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(&log.TextFormatter{})
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine.
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Logger as an `io.Writer`
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
defer w.Close()
srv := http.Server{
// create a stdlib log.Logger that writes to
// logrus.Logger.
ErrorLog: log.New(w, "", 0),
}
```
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
This means that we can override the standard library logger easily:
```go
logger := logrus.New()
logger.Formatter = &logrus.JSONFormatter{}
// Use logrus for standard log output
// Note that `log` here references stdlib's log
// Not logrus imported under the name `log`.
log.SetOutput(logger.Writer())
```
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
#### Tools
| Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
#### Testing
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
import(
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"testing"
)
func TestSomething(t*testing.T){
logger, hook := test.NewNullLogger()
logger.Error("Helloerror")
assert.Equal(t, 1, len(hook.Entries))
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
hook.Reset()
assert.Nil(t, hook.LastEntry())
}
```
#### Fatal handlers
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
```
...
handler := func() {
// gracefully shutdown something...
}
logrus.RegisterExitHandler(handler)
...
```
#### Thread safety
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes:
* You have no hooks registered, or hooks calling is already thread-safe.
* Writing to logger.Out is already thread-safe, for example:
1) logger.Out is protected by locks.
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)

View File

@@ -0,0 +1,64 @@
package logrus
// The following code was sourced and modified from the
// https://github.com/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import (
"fmt"
"os"
)
var handlers = []func(){}
func runHandler(handler func()) {
defer func() {
if err := recover(); err != nil {
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
}
}()
handler()
}
func runHandlers() {
for _, handler := range handlers {
runHandler(handler)
}
}
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
func Exit(code int) {
runHandlers()
os.Exit(code)
}
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
// all handlers. The handlers will also be invoked when any Fatal log entry is
// made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
// closing database connections, or sending a alert that the application is
// closing.
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}

26
pkg/metadata/vendor/github.com/sirupsen/logrus/doc.go generated vendored Normal file
View File

@@ -0,0 +1,26 @@
/*
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
log "github.com/sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"number": 1,
"size": 10,
}).Info("A walrus appears")
}
Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
For a full guide visit https://github.com/sirupsen/logrus
*/
package logrus

276
pkg/metadata/vendor/github.com/sirupsen/logrus/entry.go generated vendored Normal file
View File

@@ -0,0 +1,276 @@
package logrus
import (
"bytes"
"fmt"
"os"
"sync"
"time"
)
var bufferPool *sync.Pool
func init() {
bufferPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
Buffer *bytes.Buffer
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
return "", err
}
str := string(serialized)
return str, nil
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(&entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

View File

@@ -0,0 +1,193 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.SetLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.level()
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
func WithError(err error) *Entry {
return std.WithField(ErrorKey, err)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

View File

@@ -0,0 +1,45 @@
package logrus
import "time"
const defaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
if t, ok := data["time"]; ok {
data["fields.time"] = t
}
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
}
if l, ok := data["level"]; ok {
data["fields.level"] = l
}
}

View File

@@ -0,0 +1,34 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks LevelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,79 @@
package logrus
import (
"encoding/json"
"fmt"
)
type fieldKey string
// FieldMap allows customization of the key names for default fields.
type FieldMap map[fieldKey]string
// Default key names for the default fields
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
}
return string(key)
}
// JSONFormatter formats logs into parsable json
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message",
// },
// }
FieldMap FieldMap
}
// Format renders a single log entry
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
prefixFieldClashes(data)
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = defaultTimestampFormat
}
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View File

@@ -0,0 +1,317 @@
package logrus
import (
"io"
"os"
"sync"
"sync/atomic"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks LevelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged.
Level Level
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
}
type MutexWrap struct {
lock sync.Mutex
disabled bool
}
func (mw *MutexWrap) Lock() {
if !mw.disabled {
mw.lock.Lock()
}
}
func (mw *MutexWrap) Unlock() {
if !mw.disabled {
mw.lock.Unlock()
}
}
func (mw *MutexWrap) Disable() {
mw.disabled = true
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(LevelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
}
}
func (logger *Logger) newEntry() *Entry {
entry, ok := logger.entryPool.Get().(*Entry)
if ok {
return entry
}
return NewEntry(logger)
}
func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
entry := logger.newEntry()
entry.Printf(format, args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Print(args ...interface{}) {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Println(args ...interface{}) {
entry := logger.newEntry()
entry.Println(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
}
}
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}
func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}

View File

@@ -0,0 +1,143 @@
package logrus
import (
"fmt"
"log"
"strings"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch strings.ToLower(lvl) {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
FatalLevel,
ErrorLevel,
WarnLevel,
InfoLevel,
DebugLevel,
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var (
_ StdLogger = &log.Logger{}
_ StdLogger = &Entry{}
_ StdLogger = &Logger{}
)
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}
// The FieldLogger interface generalizes the Entry and Logger types
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
WithError(err error) *Entry
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Printf(format string, args ...interface{})
Warnf(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Print(args ...interface{})
Warn(args ...interface{})
Warning(args ...interface{})
Error(args ...interface{})
Fatal(args ...interface{})
Panic(args ...interface{})
Debugln(args ...interface{})
Infoln(args ...interface{})
Println(args ...interface{})
Warnln(args ...interface{})
Warningln(args ...interface{})
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
}

View File

@@ -0,0 +1,10 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TIOCGETA
type Termios unix.Termios

View File

@@ -0,0 +1,14 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package logrus
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TCGETS
type Termios unix.Termios

View File

@@ -0,0 +1,191 @@
package logrus
import (
"bytes"
"fmt"
"io"
"os"
"sort"
"strings"
"sync"
"time"
"golang.org/x/crypto/ssh/terminal"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 36
gray = 37
)
var (
baseTimestamp time.Time
)
func init() {
baseTimestamp = time.Now()
}
// TextFormatter formats logs into text
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if entry.Logger != nil {
f.isTerminal = f.checkIfTerminal(entry.Logger.Out)
}
}
func (f *TextFormatter) checkIfTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
return terminal.IsTerminal(int(v.Fd()))
default:
return false
}
}
// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data)
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = defaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
}
func (f *TextFormatter) needsQuoting(text string) bool {
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
return true
}
}
return false
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
if b.Len() > 0 {
b.WriteByte(' ')
}
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
stringVal, ok := value.(string)
if !ok {
stringVal = fmt.Sprint(value)
}
if !f.needsQuoting(stringVal) {
b.WriteString(stringVal)
} else {
b.WriteString(fmt.Sprintf("%q", stringVal))
}
}

View File

@@ -0,0 +1,62 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = entry.Debug
case InfoLevel:
printFunc = entry.Info
case WarnLevel:
printFunc = entry.Warn
case ErrorLevel:
printFunc = entry.Error
case FatalLevel:
printFunc = entry.Fatal
case PanicLevel:
printFunc = entry.Panic
default:
printFunc = entry.Print
}
go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
entry.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

27
pkg/metadata/vendor/golang.org/x/crypto/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
pkg/metadata/vendor/golang.org/x/crypto/PATENTS generated vendored Normal file
View File

@@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

21
pkg/metadata/vendor/golang.org/x/crypto/README.md generated vendored Normal file
View File

@@ -0,0 +1,21 @@
# Go Cryptography
This repository holds supplementary Go cryptography libraries.
## Download/Install
The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You
can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`.
## Report Issues / Send Patches
This repository uses Gerrit for code changes. To learn how to submit changes to
this repository, see https://golang.org/doc/contribute.html.
The main issue tracker for the crypto repository is located at
https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the
subject line, so it is easy to find.
Note that contributions to the cryptography package receive additional scrutiny
due to their sensitive nature. Patches may take longer than normal to receive
feedback.

View File

@@ -0,0 +1,8 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
#define REDMASK51 0x0007FFFFFFFFFFFF

View File

@@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// These constants cannot be encoded in non-MOVQ immediates.
// We access them directly from memory instead.
DATA ·_121666_213(SB)/8, $996687872
GLOBL ·_121666_213(SB), 8, $8
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
GLOBL ·_2P0(SB), 8, $8
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
GLOBL ·_2P1234(SB), 8, $8

View File

@@ -0,0 +1,65 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
// func cswap(inout *[4][5]uint64, v uint64)
TEXT ·cswap(SB),7,$0
MOVQ inout+0(FP),DI
MOVQ v+8(FP),SI
SUBQ $1, SI
NOTQ SI
MOVQ SI, X15
PSHUFD $0x44, X15, X15
MOVOU 0(DI), X0
MOVOU 16(DI), X2
MOVOU 32(DI), X4
MOVOU 48(DI), X6
MOVOU 64(DI), X8
MOVOU 80(DI), X1
MOVOU 96(DI), X3
MOVOU 112(DI), X5
MOVOU 128(DI), X7
MOVOU 144(DI), X9
MOVO X1, X10
MOVO X3, X11
MOVO X5, X12
MOVO X7, X13
MOVO X9, X14
PXOR X0, X10
PXOR X2, X11
PXOR X4, X12
PXOR X6, X13
PXOR X8, X14
PAND X15, X10
PAND X15, X11
PAND X15, X12
PAND X15, X13
PAND X15, X14
PXOR X10, X0
PXOR X10, X1
PXOR X11, X2
PXOR X11, X3
PXOR X12, X4
PXOR X12, X5
PXOR X13, X6
PXOR X13, X7
PXOR X14, X8
PXOR X14, X9
MOVOU X0, 0(DI)
MOVOU X2, 16(DI)
MOVOU X4, 32(DI)
MOVOU X6, 48(DI)
MOVOU X8, 64(DI)
MOVOU X1, 80(DI)
MOVOU X3, 96(DI)
MOVOU X5, 112(DI)
MOVOU X7, 128(DI)
MOVOU X9, 144(DI)
RET

View File

@@ -0,0 +1,834 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// We have an implementation in amd64 assembly so this code is only run on
// non-amd64 platforms. The amd64 assembly does not support gccgo.
// +build !amd64 gccgo appengine
package curve25519
import (
"encoding/binary"
)
// This code is a port of the public domain, "ref10" implementation of
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
// fieldElement represents an element of the field GF(2^255 - 19). An element
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
// context.
type fieldElement [10]int32
func feZero(fe *fieldElement) {
for i := range fe {
fe[i] = 0
}
}
func feOne(fe *fieldElement) {
feZero(fe)
fe[0] = 1
}
func feAdd(dst, a, b *fieldElement) {
for i := range dst {
dst[i] = a[i] + b[i]
}
}
func feSub(dst, a, b *fieldElement) {
for i := range dst {
dst[i] = a[i] - b[i]
}
}
func feCopy(dst, src *fieldElement) {
for i := range dst {
dst[i] = src[i]
}
}
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
//
// Preconditions: b in {0,1}.
func feCSwap(f, g *fieldElement, b int32) {
b = -b
for i := range f {
t := b & (f[i] ^ g[i])
f[i] ^= t
g[i] ^= t
}
}
// load3 reads a 24-bit, little-endian value from in.
func load3(in []byte) int64 {
var r int64
r = int64(in[0])
r |= int64(in[1]) << 8
r |= int64(in[2]) << 16
return r
}
// load4 reads a 32-bit, little-endian value from in.
func load4(in []byte) int64 {
return int64(binary.LittleEndian.Uint32(in))
}
func feFromBytes(dst *fieldElement, src *[32]byte) {
h0 := load4(src[:])
h1 := load3(src[4:]) << 6
h2 := load3(src[7:]) << 5
h3 := load3(src[10:]) << 3
h4 := load3(src[13:]) << 2
h5 := load4(src[16:])
h6 := load3(src[20:]) << 7
h7 := load3(src[23:]) << 5
h8 := load3(src[26:]) << 4
h9 := load3(src[29:]) << 2
var carry [10]int64
carry[9] = (h9 + 1<<24) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[1] = (h1 + 1<<24) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[3] = (h3 + 1<<24) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[5] = (h5 + 1<<24) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[7] = (h7 + 1<<24) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[0] = (h0 + 1<<25) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[2] = (h2 + 1<<25) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[4] = (h4 + 1<<25) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[6] = (h6 + 1<<25) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[8] = (h8 + 1<<25) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
dst[0] = int32(h0)
dst[1] = int32(h1)
dst[2] = int32(h2)
dst[3] = int32(h3)
dst[4] = int32(h4)
dst[5] = int32(h5)
dst[6] = int32(h6)
dst[7] = int32(h7)
dst[8] = int32(h8)
dst[9] = int32(h9)
}
// feToBytes marshals h to s.
// Preconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Write p=2^255-19; q=floor(h/p).
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
//
// Proof:
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
//
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
// Then 0<y<1.
//
// Write r=h-pq.
// Have 0<=r<=p-1=2^255-20.
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
//
// Write x=r+19(2^-255)r+y.
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
//
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
func feToBytes(s *[32]byte, h *fieldElement) {
var carry [10]int32
q := (19*h[9] + (1 << 24)) >> 25
q = (h[0] + q) >> 26
q = (h[1] + q) >> 25
q = (h[2] + q) >> 26
q = (h[3] + q) >> 25
q = (h[4] + q) >> 26
q = (h[5] + q) >> 25
q = (h[6] + q) >> 26
q = (h[7] + q) >> 25
q = (h[8] + q) >> 26
q = (h[9] + q) >> 25
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
h[0] += 19 * q
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
carry[0] = h[0] >> 26
h[1] += carry[0]
h[0] -= carry[0] << 26
carry[1] = h[1] >> 25
h[2] += carry[1]
h[1] -= carry[1] << 25
carry[2] = h[2] >> 26
h[3] += carry[2]
h[2] -= carry[2] << 26
carry[3] = h[3] >> 25
h[4] += carry[3]
h[3] -= carry[3] << 25
carry[4] = h[4] >> 26
h[5] += carry[4]
h[4] -= carry[4] << 26
carry[5] = h[5] >> 25
h[6] += carry[5]
h[5] -= carry[5] << 25
carry[6] = h[6] >> 26
h[7] += carry[6]
h[6] -= carry[6] << 26
carry[7] = h[7] >> 25
h[8] += carry[7]
h[7] -= carry[7] << 25
carry[8] = h[8] >> 26
h[9] += carry[8]
h[8] -= carry[8] << 26
carry[9] = h[9] >> 25
h[9] -= carry[9] << 25
// h10 = carry9
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
// evidently 2^255 h10-2^255 q = 0.
// Goal: Output h[0]+...+2^230 h[9].
s[0] = byte(h[0] >> 0)
s[1] = byte(h[0] >> 8)
s[2] = byte(h[0] >> 16)
s[3] = byte((h[0] >> 24) | (h[1] << 2))
s[4] = byte(h[1] >> 6)
s[5] = byte(h[1] >> 14)
s[6] = byte((h[1] >> 22) | (h[2] << 3))
s[7] = byte(h[2] >> 5)
s[8] = byte(h[2] >> 13)
s[9] = byte((h[2] >> 21) | (h[3] << 5))
s[10] = byte(h[3] >> 3)
s[11] = byte(h[3] >> 11)
s[12] = byte((h[3] >> 19) | (h[4] << 6))
s[13] = byte(h[4] >> 2)
s[14] = byte(h[4] >> 10)
s[15] = byte(h[4] >> 18)
s[16] = byte(h[5] >> 0)
s[17] = byte(h[5] >> 8)
s[18] = byte(h[5] >> 16)
s[19] = byte((h[5] >> 24) | (h[6] << 1))
s[20] = byte(h[6] >> 7)
s[21] = byte(h[6] >> 15)
s[22] = byte((h[6] >> 23) | (h[7] << 3))
s[23] = byte(h[7] >> 5)
s[24] = byte(h[7] >> 13)
s[25] = byte((h[7] >> 21) | (h[8] << 4))
s[26] = byte(h[8] >> 4)
s[27] = byte(h[8] >> 12)
s[28] = byte((h[8] >> 20) | (h[9] << 6))
s[29] = byte(h[9] >> 2)
s[30] = byte(h[9] >> 10)
s[31] = byte(h[9] >> 18)
}
// feMul calculates h = f * g
// Can overlap h with f or g.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Notes on implementation strategy:
//
// Using schoolbook multiplication.
// Karatsuba would save a little in some cost models.
//
// Most multiplications by 2 and 19 are 32-bit precomputations;
// cheaper than 64-bit postcomputations.
//
// There is one remaining multiplication by 19 in the carry chain;
// one *19 precomputation can be merged into this,
// but the resulting data flow is considerably less clean.
//
// There are 12 carries below.
// 10 of them are 2-way parallelizable and vectorizable.
// Can get away with 11 carries, but then data flow is much deeper.
//
// With tighter constraints on inputs can squeeze carries into int32.
func feMul(h, f, g *fieldElement) {
f0 := f[0]
f1 := f[1]
f2 := f[2]
f3 := f[3]
f4 := f[4]
f5 := f[5]
f6 := f[6]
f7 := f[7]
f8 := f[8]
f9 := f[9]
g0 := g[0]
g1 := g[1]
g2 := g[2]
g3 := g[3]
g4 := g[4]
g5 := g[5]
g6 := g[6]
g7 := g[7]
g8 := g[8]
g9 := g[9]
g1_19 := 19 * g1 // 1.4*2^29
g2_19 := 19 * g2 // 1.4*2^30; still ok
g3_19 := 19 * g3
g4_19 := 19 * g4
g5_19 := 19 * g5
g6_19 := 19 * g6
g7_19 := 19 * g7
g8_19 := 19 * g8
g9_19 := 19 * g9
f1_2 := 2 * f1
f3_2 := 2 * f3
f5_2 := 2 * f5
f7_2 := 2 * f7
f9_2 := 2 * f9
f0g0 := int64(f0) * int64(g0)
f0g1 := int64(f0) * int64(g1)
f0g2 := int64(f0) * int64(g2)
f0g3 := int64(f0) * int64(g3)
f0g4 := int64(f0) * int64(g4)
f0g5 := int64(f0) * int64(g5)
f0g6 := int64(f0) * int64(g6)
f0g7 := int64(f0) * int64(g7)
f0g8 := int64(f0) * int64(g8)
f0g9 := int64(f0) * int64(g9)
f1g0 := int64(f1) * int64(g0)
f1g1_2 := int64(f1_2) * int64(g1)
f1g2 := int64(f1) * int64(g2)
f1g3_2 := int64(f1_2) * int64(g3)
f1g4 := int64(f1) * int64(g4)
f1g5_2 := int64(f1_2) * int64(g5)
f1g6 := int64(f1) * int64(g6)
f1g7_2 := int64(f1_2) * int64(g7)
f1g8 := int64(f1) * int64(g8)
f1g9_38 := int64(f1_2) * int64(g9_19)
f2g0 := int64(f2) * int64(g0)
f2g1 := int64(f2) * int64(g1)
f2g2 := int64(f2) * int64(g2)
f2g3 := int64(f2) * int64(g3)
f2g4 := int64(f2) * int64(g4)
f2g5 := int64(f2) * int64(g5)
f2g6 := int64(f2) * int64(g6)
f2g7 := int64(f2) * int64(g7)
f2g8_19 := int64(f2) * int64(g8_19)
f2g9_19 := int64(f2) * int64(g9_19)
f3g0 := int64(f3) * int64(g0)
f3g1_2 := int64(f3_2) * int64(g1)
f3g2 := int64(f3) * int64(g2)
f3g3_2 := int64(f3_2) * int64(g3)
f3g4 := int64(f3) * int64(g4)
f3g5_2 := int64(f3_2) * int64(g5)
f3g6 := int64(f3) * int64(g6)
f3g7_38 := int64(f3_2) * int64(g7_19)
f3g8_19 := int64(f3) * int64(g8_19)
f3g9_38 := int64(f3_2) * int64(g9_19)
f4g0 := int64(f4) * int64(g0)
f4g1 := int64(f4) * int64(g1)
f4g2 := int64(f4) * int64(g2)
f4g3 := int64(f4) * int64(g3)
f4g4 := int64(f4) * int64(g4)
f4g5 := int64(f4) * int64(g5)
f4g6_19 := int64(f4) * int64(g6_19)
f4g7_19 := int64(f4) * int64(g7_19)
f4g8_19 := int64(f4) * int64(g8_19)
f4g9_19 := int64(f4) * int64(g9_19)
f5g0 := int64(f5) * int64(g0)
f5g1_2 := int64(f5_2) * int64(g1)
f5g2 := int64(f5) * int64(g2)
f5g3_2 := int64(f5_2) * int64(g3)
f5g4 := int64(f5) * int64(g4)
f5g5_38 := int64(f5_2) * int64(g5_19)
f5g6_19 := int64(f5) * int64(g6_19)
f5g7_38 := int64(f5_2) * int64(g7_19)
f5g8_19 := int64(f5) * int64(g8_19)
f5g9_38 := int64(f5_2) * int64(g9_19)
f6g0 := int64(f6) * int64(g0)
f6g1 := int64(f6) * int64(g1)
f6g2 := int64(f6) * int64(g2)
f6g3 := int64(f6) * int64(g3)
f6g4_19 := int64(f6) * int64(g4_19)
f6g5_19 := int64(f6) * int64(g5_19)
f6g6_19 := int64(f6) * int64(g6_19)
f6g7_19 := int64(f6) * int64(g7_19)
f6g8_19 := int64(f6) * int64(g8_19)
f6g9_19 := int64(f6) * int64(g9_19)
f7g0 := int64(f7) * int64(g0)
f7g1_2 := int64(f7_2) * int64(g1)
f7g2 := int64(f7) * int64(g2)
f7g3_38 := int64(f7_2) * int64(g3_19)
f7g4_19 := int64(f7) * int64(g4_19)
f7g5_38 := int64(f7_2) * int64(g5_19)
f7g6_19 := int64(f7) * int64(g6_19)
f7g7_38 := int64(f7_2) * int64(g7_19)
f7g8_19 := int64(f7) * int64(g8_19)
f7g9_38 := int64(f7_2) * int64(g9_19)
f8g0 := int64(f8) * int64(g0)
f8g1 := int64(f8) * int64(g1)
f8g2_19 := int64(f8) * int64(g2_19)
f8g3_19 := int64(f8) * int64(g3_19)
f8g4_19 := int64(f8) * int64(g4_19)
f8g5_19 := int64(f8) * int64(g5_19)
f8g6_19 := int64(f8) * int64(g6_19)
f8g7_19 := int64(f8) * int64(g7_19)
f8g8_19 := int64(f8) * int64(g8_19)
f8g9_19 := int64(f8) * int64(g9_19)
f9g0 := int64(f9) * int64(g0)
f9g1_38 := int64(f9_2) * int64(g1_19)
f9g2_19 := int64(f9) * int64(g2_19)
f9g3_38 := int64(f9_2) * int64(g3_19)
f9g4_19 := int64(f9) * int64(g4_19)
f9g5_38 := int64(f9_2) * int64(g5_19)
f9g6_19 := int64(f9) * int64(g6_19)
f9g7_38 := int64(f9_2) * int64(g7_19)
f9g8_19 := int64(f9) * int64(g8_19)
f9g9_38 := int64(f9_2) * int64(g9_19)
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
var carry [10]int64
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
// |h0| <= 2^25
// |h4| <= 2^25
// |h1| <= 1.51*2^58
// |h5| <= 1.51*2^58
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
// |h1| <= 2^24; from now on fits into int32
// |h5| <= 2^24; from now on fits into int32
// |h2| <= 1.21*2^59
// |h6| <= 1.21*2^59
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
// |h2| <= 2^25; from now on fits into int32 unchanged
// |h6| <= 2^25; from now on fits into int32 unchanged
// |h3| <= 1.51*2^58
// |h7| <= 1.51*2^58
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
// |h3| <= 2^24; from now on fits into int32 unchanged
// |h7| <= 2^24; from now on fits into int32 unchanged
// |h4| <= 1.52*2^33
// |h8| <= 1.52*2^33
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
// |h4| <= 2^25; from now on fits into int32 unchanged
// |h8| <= 2^25; from now on fits into int32 unchanged
// |h5| <= 1.01*2^24
// |h9| <= 1.51*2^58
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
// |h9| <= 2^24; from now on fits into int32 unchanged
// |h0| <= 1.8*2^37
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
// |h0| <= 2^25; from now on fits into int32 unchanged
// |h1| <= 1.01*2^24
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feSquare calculates h = f*f. Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
func feSquare(h, f *fieldElement) {
f0 := f[0]
f1 := f[1]
f2 := f[2]
f3 := f[3]
f4 := f[4]
f5 := f[5]
f6 := f[6]
f7 := f[7]
f8 := f[8]
f9 := f[9]
f0_2 := 2 * f0
f1_2 := 2 * f1
f2_2 := 2 * f2
f3_2 := 2 * f3
f4_2 := 2 * f4
f5_2 := 2 * f5
f6_2 := 2 * f6
f7_2 := 2 * f7
f5_38 := 38 * f5 // 1.31*2^30
f6_19 := 19 * f6 // 1.31*2^30
f7_38 := 38 * f7 // 1.31*2^30
f8_19 := 19 * f8 // 1.31*2^30
f9_38 := 38 * f9 // 1.31*2^30
f0f0 := int64(f0) * int64(f0)
f0f1_2 := int64(f0_2) * int64(f1)
f0f2_2 := int64(f0_2) * int64(f2)
f0f3_2 := int64(f0_2) * int64(f3)
f0f4_2 := int64(f0_2) * int64(f4)
f0f5_2 := int64(f0_2) * int64(f5)
f0f6_2 := int64(f0_2) * int64(f6)
f0f7_2 := int64(f0_2) * int64(f7)
f0f8_2 := int64(f0_2) * int64(f8)
f0f9_2 := int64(f0_2) * int64(f9)
f1f1_2 := int64(f1_2) * int64(f1)
f1f2_2 := int64(f1_2) * int64(f2)
f1f3_4 := int64(f1_2) * int64(f3_2)
f1f4_2 := int64(f1_2) * int64(f4)
f1f5_4 := int64(f1_2) * int64(f5_2)
f1f6_2 := int64(f1_2) * int64(f6)
f1f7_4 := int64(f1_2) * int64(f7_2)
f1f8_2 := int64(f1_2) * int64(f8)
f1f9_76 := int64(f1_2) * int64(f9_38)
f2f2 := int64(f2) * int64(f2)
f2f3_2 := int64(f2_2) * int64(f3)
f2f4_2 := int64(f2_2) * int64(f4)
f2f5_2 := int64(f2_2) * int64(f5)
f2f6_2 := int64(f2_2) * int64(f6)
f2f7_2 := int64(f2_2) * int64(f7)
f2f8_38 := int64(f2_2) * int64(f8_19)
f2f9_38 := int64(f2) * int64(f9_38)
f3f3_2 := int64(f3_2) * int64(f3)
f3f4_2 := int64(f3_2) * int64(f4)
f3f5_4 := int64(f3_2) * int64(f5_2)
f3f6_2 := int64(f3_2) * int64(f6)
f3f7_76 := int64(f3_2) * int64(f7_38)
f3f8_38 := int64(f3_2) * int64(f8_19)
f3f9_76 := int64(f3_2) * int64(f9_38)
f4f4 := int64(f4) * int64(f4)
f4f5_2 := int64(f4_2) * int64(f5)
f4f6_38 := int64(f4_2) * int64(f6_19)
f4f7_38 := int64(f4) * int64(f7_38)
f4f8_38 := int64(f4_2) * int64(f8_19)
f4f9_38 := int64(f4) * int64(f9_38)
f5f5_38 := int64(f5) * int64(f5_38)
f5f6_38 := int64(f5_2) * int64(f6_19)
f5f7_76 := int64(f5_2) * int64(f7_38)
f5f8_38 := int64(f5_2) * int64(f8_19)
f5f9_76 := int64(f5_2) * int64(f9_38)
f6f6_19 := int64(f6) * int64(f6_19)
f6f7_38 := int64(f6) * int64(f7_38)
f6f8_38 := int64(f6_2) * int64(f8_19)
f6f9_38 := int64(f6) * int64(f9_38)
f7f7_38 := int64(f7) * int64(f7_38)
f7f8_38 := int64(f7_2) * int64(f8_19)
f7f9_76 := int64(f7_2) * int64(f9_38)
f8f8_19 := int64(f8) * int64(f8_19)
f8f9_38 := int64(f8) * int64(f9_38)
f9f9_38 := int64(f9) * int64(f9_38)
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
var carry [10]int64
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feMul121666 calculates h = f * 121666. Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
func feMul121666(h, f *fieldElement) {
h0 := int64(f[0]) * 121666
h1 := int64(f[1]) * 121666
h2 := int64(f[2]) * 121666
h3 := int64(f[3]) * 121666
h4 := int64(f[4]) * 121666
h5 := int64(f[5]) * 121666
h6 := int64(f[6]) * 121666
h7 := int64(f[7]) * 121666
h8 := int64(f[8]) * 121666
h9 := int64(f[9]) * 121666
var carry [10]int64
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feInvert sets out = z^-1.
func feInvert(out, z *fieldElement) {
var t0, t1, t2, t3 fieldElement
var i int
feSquare(&t0, z)
for i = 1; i < 1; i++ {
feSquare(&t0, &t0)
}
feSquare(&t1, &t0)
for i = 1; i < 2; i++ {
feSquare(&t1, &t1)
}
feMul(&t1, z, &t1)
feMul(&t0, &t0, &t1)
feSquare(&t2, &t0)
for i = 1; i < 1; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t1, &t2)
feSquare(&t2, &t1)
for i = 1; i < 5; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t2, &t1)
for i = 1; i < 10; i++ {
feSquare(&t2, &t2)
}
feMul(&t2, &t2, &t1)
feSquare(&t3, &t2)
for i = 1; i < 20; i++ {
feSquare(&t3, &t3)
}
feMul(&t2, &t3, &t2)
feSquare(&t2, &t2)
for i = 1; i < 10; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t2, &t1)
for i = 1; i < 50; i++ {
feSquare(&t2, &t2)
}
feMul(&t2, &t2, &t1)
feSquare(&t3, &t2)
for i = 1; i < 100; i++ {
feSquare(&t3, &t3)
}
feMul(&t2, &t3, &t2)
feSquare(&t2, &t2)
for i = 1; i < 50; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t1, &t1)
for i = 1; i < 5; i++ {
feSquare(&t1, &t1)
}
feMul(out, &t1, &t0)
}
func scalarMult(out, in, base *[32]byte) {
var e [32]byte
copy(e[:], in[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
feFromBytes(&x1, base)
feOne(&x2)
feCopy(&x3, &x1)
feOne(&z3)
swap := int32(0)
for pos := 254; pos >= 0; pos-- {
b := e[pos/8] >> uint(pos&7)
b &= 1
swap ^= int32(b)
feCSwap(&x2, &x3, swap)
feCSwap(&z2, &z3, swap)
swap = int32(b)
feSub(&tmp0, &x3, &z3)
feSub(&tmp1, &x2, &z2)
feAdd(&x2, &x2, &z2)
feAdd(&z2, &x3, &z3)
feMul(&z3, &tmp0, &x2)
feMul(&z2, &z2, &tmp1)
feSquare(&tmp0, &tmp1)
feSquare(&tmp1, &x2)
feAdd(&x3, &z3, &z2)
feSub(&z2, &z3, &z2)
feMul(&x2, &tmp1, &tmp0)
feSub(&tmp1, &tmp1, &tmp0)
feSquare(&z2, &z2)
feMul121666(&z3, &tmp1)
feSquare(&x3, &x3)
feAdd(&tmp0, &tmp0, &z3)
feMul(&z3, &x1, &z2)
feMul(&z2, &tmp1, &tmp0)
}
feCSwap(&x2, &x3, swap)
feCSwap(&z2, &z3, swap)
feInvert(&z2, &z2)
feMul(&x2, &x2, &z2)
feToBytes(out, &x2)
}

Some files were not shown because too many files have changed in this diff Show More