Compare commits

..

43 Commits

Author SHA1 Message Date
bootandy
2fe3943ed5 Update readme 2018-04-06 21:07:13 +01:00
bootandy
b8ad44b7f0 increment version 2018-04-06 20:49:22 +01:00
bootandy
1a34bc3198 Integration tests
Several tests for recursive dirs, hard & soft links.
Tests use the tempfile project.

Personally I find the tests hard to read. Am considering adding a
'--no-color' output option as this will make the tests much more
readable. (Currently we have to call format_string to get the matching
colors and if a test fails the diff is very hard to read).
2018-04-06 20:44:02 +01:00
bootandy
385ddb75e1 Minor code neatening 2018-04-06 20:35:00 +01:00
bootandy
5c6165da8a First integration test
This test needs neatening but it is the first example of a working
integration test
2018-04-05 14:45:04 +01:00
bootandy
f39b09e79c increment version (0.2.1 already tagged) 2018-04-05 14:45:04 +01:00
bootandy
778dbb44b3 Remove graying background
Remove background that gets grayer.
1) This looks funny on terminals that aren't black
2) Makes testing easier
2018-04-04 23:12:00 +01:00
bootandy
69c79d5f95 Update readme 2018-04-04 20:21:59 +01:00
bootandy
61ab0e8f96 Simplify build.
Just build mac and linux.
Remove appveyor file which was designed for windows.
2018-04-03 20:54:53 +01:00
bootandy
285fd62850 increment version 2018-04-03 19:53:37 +01:00
bootandy
6a63cbe1bc fix: Dust was supposed to take multiple dir args
Dust will now work when given multiple dirs. Before this fix it would
only show the largest dir.
2018-04-03 17:54:45 +01:00
bootandy
8d98171b82 Update README 2018-04-03 17:17:04 +01:00
bootandy
120b4e16e7 Squash Node and DirEnt objects into single object 2018-04-03 17:05:28 +01:00
bootandy
6198e3183f pull units variable out as constant 2018-04-03 16:37:29 +01:00
bootandy
ecf6c8f0e5 Refactor: Pull display code out to different file 2018-03-22 17:21:51 -04:00
andy boot
f14a9789f4 Merge pull request #4 from bootandy/blocks
Blocks
2018-03-22 16:37:05 -04:00
bootandy
4944d517f4 Apparent size mode: handle hard links.
If we are viewing apparent size then each hard linked file should be
counted. Not just the first one.
2018-03-22 14:36:17 -04:00
bootandy
cce656ab4c rm dead code 2018-03-22 14:30:30 -04:00
bootandy
03a517a310 block size is always 512 on rust 2018-03-22 14:29:59 -04:00
andy boot
3796c39ac9 Merge pull request #3 from nebkor/inodes_refactor
Broken into files, inode/dev changes from inodes branch taken
2018-03-22 12:34:12 -04:00
andy boot
0357fc5e71 Merge branch 'master' into inodes_refactor 2018-03-22 12:30:03 -04:00
Joe Ardent
aa3f411974 fixes blocksize error in get_blocksize().
Under Linux, MetadataExt::st_blocks() returns then number of 512B
blocks.

https://doc.rust-lang.org/1.23.0/std/os/linux/fs/trait.MetadataExt.html#tymethod.st_blocks
2018-03-22 00:34:14 -07:00
Joe Ardent
eb69ad19a0 break platform cfg code into utils submodule 2018-03-22 00:00:21 -07:00
Joe Ardent
c127580057 Bring over dev/inode pair changes from bootandy's repo 2018-03-21 23:47:37 -07:00
andy boot
c978c6cf93 Merge pull request #2 from bootandy/inodes
Inodes
2018-03-22 00:25:17 -04:00
bootandy
f72a67132c Remove commented out code 2018-03-21 19:50:13 -04:00
bootandy
4d1f881c17 fix: inodes are only unique with dev
First iteration adding dev to inodes to form a tuple to go into our
'have we seen this before' hashset.
inodes are not unique across partitions
2018-03-21 19:50:08 -04:00
Joe Ardent
e75a666a4c better display-children-finding code 2018-03-21 13:43:52 -07:00
Joe Ardent
617b0d2971 Merge branch 'nebkor' 2018-03-21 10:56:15 -07:00
Joe Ardent
8fa83e3836 First step of refactor done. 2018-03-21 10:55:03 -07:00
Joe Ardent
7fa2ce3434 start of refactor 2018-03-21 10:55:03 -07:00
bootandy
b0e971891e Fix basic message 2018-03-21 12:14:17 -04:00
andy boot
cef95fa415 Merge pull request #1 from nebkor/master
Minor cleanup, prettier output
2018-03-21 12:12:19 -04:00
Joe Ardent
381d286847 use accessor and creator methods for new types in lib 2018-03-21 00:09:23 -07:00
Joe Ardent
aa963defda update sample output with new box-drawing characters 2018-03-20 22:50:54 -07:00
Joe Ardent
0faf795284 First step of refactor done. 2018-03-20 22:40:53 -07:00
Joe Ardent
b7271b1da2 start of refactor 2018-03-20 22:09:29 -07:00
Joe Ardent
99f462f023 s/sibblings/siblings/g 2018-03-20 20:40:36 -07:00
Joe Ardent
1c80cbf28b Make printing slightly prettier 2018-03-20 19:23:36 -07:00
Joe Ardent
f802d7a6b4 quiet some clippy warnings 2018-03-20 19:23:36 -07:00
Joe Ardent
b4c6c68527 minor whitespace changes from rustfmt 2018-03-20 19:10:24 -07:00
bootandy
2c495364c7 None core OS: fix code bug 2018-03-19 16:11:55 -04:00
bootandy
32b653fe41 travis build file
no idea if this will work
2018-03-19 16:11:55 -04:00
15 changed files with 744 additions and 382 deletions

View File

@@ -1,42 +1,76 @@
# Based on the "trust" template v0.1.2
# https://github.com/japaric/trust/tree/v0.1.2
dist: trusty
language: rust
services: docker
sudo: required
# TODO Rust builds on stable by default, this can be
# overridden on a case by case basis down below.
env:
global:
- PROJECT_NAME=dust
- CHANNEL=stable
global:
# TODO Update this to match the name of your project.
- CRATE_NAME=dust
language: rust
rust:
- stable
matrix:
fast_finish: true
- os: osx
env: TARGET=i686-apple-darwin
- os: linux
env: TARGET=i686-unknown-linux-gnu
- os: osx
env: TARGET=x86_64-apple-darwin
- os: linux
env: TARGET=x86_64-unknown-linux-gnu
cache: cargo
# TODO These are all the build jobs. Adjust as necessary. Comment out what you
# don't need
include:
# Linux
- env: TARGET=x86_64-unknown-linux-gnu
# OSX
- env: TARGET=x86_64-apple-darwin
os: osx
before_install:
- export PATH="$PATH:$HOME/.cargo/bin"
- set -e
- rustup self update
install:
- sh ci/install.sh
- source ~/.cargo/env || true
script:
- cargo build --release --target $TARGET
- cargo test --target $TARGET
- bash ci/script.sh
after_script: set +e
before_deploy:
- sh ci/before_deploy.sh
deploy:
provider: releases
# TODO update `api_key.secure`
# - Create a `public_repo` GitHub token. Go to: https://github.com/settings/tokens/new
# - Encrypt it: `travis encrypt 0123456789012345678901234567890123456789
# - Paste the output down here
api_key:
secure: "osa6xr6VGDSKUFGPVSElFl2ItJQ0vzTuc+xcBYtEO663sFf5bsCXnBmzdNB4dSfiV1KdVXZQaN7S2rIO9GoWVY02hz8OLYWh5wDXkdQQ8hSEl9FXnSB8I1zWTB4dYsnAZPkjbkH4p+HmmlH6COH1FRKWcEtJMWbZBr0kVc3hxWQue2U5Lo0FhWgNo9oOfs8b6lvF9N/bSQaaeE0OEBVyc3iOBG+03UrcPwhuO3vUNYPkiqxdwGctYyjBEbc4OTKLPazvgFaDmiACtIM1iPIly5WXR+A/3hMxgDKlpqi9F8bO/I8C+kXpP7ZBlqkejo8MwjZ8UHXzX7uXsq/5Upp8tPDTr0ko4dDvDrQMu+mnGwsEU3T8HPGE2c1DZl858tMt/NhUjtEXJDY5Fadia3I7Th5dUGR8O3ylnXf8rkSxFcqDAJ6u9KFH2SUAR5nEU4hRSSx8KjSTadQsQGQS6Iyvf8HV0GwPPkJStEZq/ZHE1hEQHXZBREBHnjgCkjj/LItyFFMs0qZbfTUBurKokQnQElkOddpzLsfb6G6+8rV6+WNmsuqUVx04YKTfjb5X/tV0Bjgx1YOMMBhWXrRi+XO04l9fsoU8zYfb29bqEzC7Rx38/A4Bncp/eI1JqUxv7EIbpEadIVq1LrVJg54O7Kx0LA6AsCRwTdmFGdSFbz3oujI="
file: ${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}.tar.gz
skip_cleanup: true
secure: UlU73Td7Bkb2N88ws4YGLWR+4U0IMgiou9QQtMnmpouJFjeUNxtLSPMPODVXP7zq4sKt5HR5B3fX9MW4mKm351fvnQEoihETn06pKiXGnY//SlTPTt67MX9ZOYmd9ohJReMDOZDgqhnGLxfymycGtsLAmdjDZnAl+IMqgg0FMyVFj9Cl9aKxnn12lxQyX4zabHKk8TUKD3By8ZoEUnJMHt3gEtOmbDgS4brcTPeHCzqnYFw73LEnkqvz+JP0XwauJY7Cf8lminKm/klmjCkQji8T9SHI52v1g0Fxpx0ucp2o3vulQrLHXaHvZ6Fr7J0cSXXzaFF3rrGLt4t4jU/+9TZm1+n5k5XuPW4x4NTCC9NmIj/z0/z41t82E9qZhzhtm2Jdsg6H2tNk+C774TYqcmR6GCvfRadfjRp3cA5dh0UwDVjH2MJFxlHDVkl6la0mVVRsCGF3oBKZVk0BDl1womfnmI46o/uU+gLknHN6Ed6PHHPPYDViWd3VKdmHKT7XrkMMUF6HjZUtla689DWIOWZSiV++1dVPcl/1TV+6tTmN4bBtPcLuX7SHRuLp2PI2kATvRMECsa7gZRypW4jKpVn7b2yetX9TVI3i1zR5zkQJ3dPg8sATvYPL53aKH/WsqUg4rzoAlbk9so+++R4bQY69LhV3B511B7EAynoZFdM
file_glob: true
file: $CRATE_NAME-$TRAVIS_TAG-$TARGET.*
on:
repo: bootandy/dust
# TODO Here you can pick which targets will generate binary releases
# In this example, there are some targets that are tested using the stable
# and nightly channels. This condition makes sure there is only one release
# for such targets and that's generated using the stable channel
condition: $TRAVIS_RUST_VERSION = stable
tags: true
provider: releases
skip_cleanup: true
cache: cargo
before_cache:
# Travis can't cache files that are not readable by "others"
- chmod -R a+r $HOME/.cargo
branches:
only:
# release tags
- /^v\d+\.\d+\.\d+.*$/
- master
notifications:
email:
on_success: never

View File

@@ -1,8 +1,11 @@
[package]
name = "dust"
version = "0.1.0"
authors = ["bootandy <bootandy@gmail.com>"]
version = "0.2.3"
authors = ["bootandy <bootandy@gmail.com>", "nebkor <code@ardent.nebcorp.com>"]
[dependencies]
ansi_term = "0.11"
clap = "2.31"
assert_cli = "0.5"
tempfile = "3"

View File

@@ -4,6 +4,12 @@
# Dust
du + rust = dust. A rust alternative to du
[Releases](https://github.com/bootandy/dust/releases)
To install:
* Download linux / mac binary from [Releases](https://github.com/bootandy/dust/releases)
* unzip file: tar -xvf <file>
* copy file to search path: sudo mv dust /usr/local/bin/
Unlike du, dust is meant to give you an instant overview of which directories are using disk space without requiring sort or head. Dust does not count file system blocks; it uses file sizes instead. Dust will print a maximum of 1 'Did not have permissions message'.
@@ -11,32 +17,35 @@ Dust will list the 15 biggest sub directories and will smartly recurse down the
```
Usage: dust <dir>
Usage: dust <dir> <another_dir> <and_more>
Usage: dust -s <dir> (apparent-size - shows the length of the file as opposed to the amount of disk space it uses)
Usage: dust -n 30 <dir> (Shows 30 directories not 15)
```
```
dust .
161M .
160M └─ ./target
123M ├─ ./target/debug
83M ├─ ./target/debug/deps
16M ── ./target/debug/deps/libclap-82e6176feef5d4b7.rlib
8.6M │── ./target/debug/deps/dust-993f7d919d92f0f8.dSYM
8.6M └─ ./target/debug/deps/dust-993f7d919d92f0f8.dSYM/Contents
8.6M │ └── ./target/debug/deps/dust-993f7d919d92f0f8.dSYM/Contents/Resources
27M ├── ./target/debug/incremental
12M │ └── ./target/debug/build
20M ├── ./target/x86_64-apple-darwin
20M │ └── ./target/x86_64-apple-darwin/debug
20M │ └─ ./target/x86_64-apple-darwin/debug/deps
16M │ └── ./target/x86_64-apple-darwin/debug/deps/libclap-7e3f8513c52cd558.rlib
16M └── ./target/release
13M └── ./target/release/deps
djin:git/dust> dust
65M .
65M └─ ./target
49M ├─ ./target/debug
26M ├─ ./target/debug/deps
21M ── ./target/debug/deps/libclap-9e6625ac8ff074ad.rlib
13M ── ./target/debug/dust
8.9M │ └─ ./target/debug/incremental
6.7M ├─┬ ./target/debug/incremental/dust-2748eiei2tcnp
6.7M │ │ └─┬ ./target/debug/incremental/dust-2748eiei2tcnp/s-ezd6jnik5u-163pyem-1aab9ncf5glum
3.0M │ │ └── ./target/debug/incremental/dust-2748eiei2tcnp/s-ezd6jnik5u-163pyem-1aab9ncf5glum/dep-graph.bin
2.2M │ └─┬ ./target/debug/incremental/dust-1dlon65p8m3vl
2.2M │ └── ./target/debug/incremental/dust-1dlon65p8m3vl/s-ezd6jncecv-1xsnfd0-4dw9l1r2th2t
15M └─ ./target/release
9.2M ├─┬ ./target/release/deps
6.7M └── ./target/release/deps/libclap-87bc2534ea57f044.rlib
5.9M └── ./target/release/dust
```
Performance: dust is currently about 4 times slower than du.
Alternatives:
* [NCDU](https://dev.yorhel.nl/ncdu)
* du -d 1 -h | sort -h
Note: Apparent-size is calculated slightly differently in dust to gdu. In dust each hard link is counted as using file_length space. In gdu only the first entry is counted.

23
ci/before_deploy.ps1 Normal file
View File

@@ -0,0 +1,23 @@
# This script takes care of packaging the build artifacts that will go in the
# release zipfile
$SRC_DIR = $PWD.Path
$STAGE = [System.Guid]::NewGuid().ToString()
Set-Location $ENV:Temp
New-Item -Type Directory -Name $STAGE
Set-Location $STAGE
$ZIP = "$SRC_DIR\$($Env:CRATE_NAME)-$($Env:APPVEYOR_REPO_TAG_NAME)-$($Env:TARGET).zip"
# TODO Update this to package the right artifacts
Copy-Item "$SRC_DIR\target\$($Env:TARGET)\release\dust" '.\'
7z a "$ZIP" *
Push-AppveyorArtifact "$ZIP"
Remove-Item *.* -Force
Set-Location ..
Remove-Item $STAGE
Set-Location $SRC_DIR

33
ci/before_deploy.sh Normal file
View File

@@ -0,0 +1,33 @@
# This script takes care of building your crate and packaging it for release
set -ex
main() {
local src=$(pwd) \
stage=
case $TRAVIS_OS_NAME in
linux)
stage=$(mktemp -d)
;;
osx)
stage=$(mktemp -d -t tmp)
;;
esac
test -f Cargo.lock || cargo generate-lockfile
# TODO Update this to build the artifacts that matter to you
cross rustc --bin dust --target $TARGET --release -- -C lto
# TODO Update this to package the right artifacts
cp target/$TARGET/release/dust $stage/
cd $stage
tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz *
cd $src
rm -rf $stage
}
main

27
ci/install.sh Normal file
View File

@@ -0,0 +1,27 @@
set -ex
main() {
local target=
if [ $TRAVIS_OS_NAME = linux ]; then
target=x86_64-unknown-linux-musl
sort=sort
else
target=x86_64-apple-darwin
sort=gsort # for `sort --sort-version`, from brew's coreutils.
fi
# This fetches latest stable release
local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \
| cut -d/ -f3 \
| grep -E '^v[0.1.0-9.]+$' \
| $sort --version-sort \
| tail -n1)
curl -LSfs https://japaric.github.io/trust/install.sh | \
sh -s -- \
--force \
--git japaric/cross \
--tag $tag \
--target $target
}
main

24
ci/script.sh Normal file
View File

@@ -0,0 +1,24 @@
# This script takes care of testing your crate
set -ex
# TODO This is the "test phase", tweak it as you see fit
main() {
cross build --target $TARGET
cross build --target $TARGET --release
if [ ! -z $DISABLE_TESTS ]; then
return
fi
cross test --target $TARGET
cross test --target $TARGET --release
cross run --target $TARGET
cross run --target $TARGET --release
}
# we don't run the "test phase" when doing deploys
if [ -z $TRAVIS_TAG ]; then
main
fi

131
src/display.rs Normal file
View File

@@ -0,0 +1,131 @@
extern crate ansi_term;
use self::ansi_term::Colour::Fixed;
use dust::Node;
static UNITS: [char; 4] = ['T', 'G', 'M', 'K'];
pub fn draw_it(permissions: bool, heads: &Vec<Node>, to_display: &Vec<&Node>) -> () {
if !permissions {
eprintln!("Did not have permissions for all directories");
}
for d in to_display {
if heads.contains(d) {
display_node(d, &to_display, true, "")
}
}
}
fn display_node<S: Into<String>>(
node_to_print: &Node,
to_display: &Vec<&Node>,
is_first: bool,
indentation_str: S,
) {
let mut is = indentation_str.into();
print_this_node(node_to_print, is_first, is.as_ref());
is = is.replace("└─┬", " ");
is = is.replace("└──", " ");
is = is.replace("├──", "");
is = is.replace("├─┬", "");
let printable_node_slashes = node_to_print.name().matches('/').count();
let mut num_siblings = to_display.iter().fold(0, |a, b| {
if node_to_print.children().contains(b)
&& b.name().matches('/').count() == printable_node_slashes + 1
{
a + 1
} else {
a
}
});
let mut is_biggest = true;
for node in to_display {
if node_to_print.children().contains(node) {
let has_display_children = node.children()
.iter()
.fold(false, |has_kids, n| has_kids || to_display.contains(&n));
let has_children = node.children().len() > 0 && has_display_children;
if node.name().matches('/').count() == printable_node_slashes + 1 {
num_siblings -= 1;
let tree_chars = {
if num_siblings == 0 {
if has_children {
"└─┬"
} else {
"└──"
}
} else {
if has_children {
"├─┬"
} else {
"├──"
}
}
};
display_node(&node, to_display, is_biggest, is.to_string() + tree_chars);
is_biggest = false;
}
}
}
}
fn print_this_node(node: &Node, is_biggest: bool, indentation: &str) {
let pretty_size = format!("{:>5}", human_readable_number(node.size()),);
println!(
"{}",
format_string(node.name(), is_biggest, pretty_size.as_ref(), indentation)
)
}
pub fn format_string(dir_name: &str, is_biggest: bool, size: &str, indentation: &str) -> String {
format!(
"{} {} {}",
if is_biggest {
Fixed(196).paint(size)
} else {
Fixed(7).paint(size)
},
indentation,
dir_name,
)
}
fn human_readable_number(size: u64) -> String {
for (i, u) in UNITS.iter().enumerate() {
let marker = 1024u64.pow((UNITS.len() - i) as u32);
if size >= marker {
if size / marker < 10 {
return format!("{:.1}{}", (size as f32 / marker as f32), u);
} else {
return format!("{}{}", (size / marker), u);
}
}
}
return format!("{}B", size);
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_human_readable_number() {
assert_eq!(human_readable_number(1), "1B");
assert_eq!(human_readable_number(956), "956B");
assert_eq!(human_readable_number(1004), "1004B");
assert_eq!(human_readable_number(1024), "1.0K");
assert_eq!(human_readable_number(1536), "1.5K");
assert_eq!(human_readable_number(1024 * 512), "512K");
assert_eq!(human_readable_number(1024 * 1024), "1.0M");
assert_eq!(human_readable_number(1024 * 1024 * 1024 - 1), "1023M");
assert_eq!(human_readable_number(1024 * 1024 * 1024 * 20), "20G");
assert_eq!(human_readable_number(1024 * 1024 * 1024 * 1024), "1.0T");
}
}

68
src/lib.rs Normal file
View File

@@ -0,0 +1,68 @@
use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
#[derive(Clone, Debug)]
pub struct Node {
name: String,
size: u64,
children: Vec<Node>,
}
impl Node {
pub fn new<S: Into<String>>(name: S, size: u64, children: Vec<Node>) -> Self {
Node {
children: children,
name: name.into(),
size: size,
}
}
pub fn children(&self) -> &Vec<Node> {
&self.children
}
pub fn name(&self) -> &String {
&self.name
}
pub fn size(&self) -> u64 {
self.size
}
}
impl Ord for Node {
fn cmp(&self, other: &Self) -> Ordering {
if self.size > other.size {
Ordering::Less
} else if self.size < other.size {
Ordering::Greater
} else {
let my_slashes = self.name.matches('/').count();
let other_slashes = other.name.matches('/').count();
if my_slashes > other_slashes {
Ordering::Greater
} else if my_slashes < other_slashes {
Ordering::Less
} else {
if self.name < other.name {
Ordering::Less
} else if self.name > other.name {
Ordering::Greater
} else {
Ordering::Equal
}
}
}
}
}
impl PartialOrd for Node {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Node {
fn eq(&self, other: &Self) -> bool {
(&self.name, self.size) == (&other.name, other.size)
}
}
impl Eq for Node {}

View File

@@ -1,77 +1,19 @@
// test:
// recursive dirs that link to each other.
// Pass in bad dir name
// num to search for is less than num available
// admin files.
//
extern crate ansi_term;
#[macro_use]
extern crate clap;
extern crate assert_cli;
extern crate dust;
use std::collections::HashSet;
use ansi_term::Colour::Fixed;
use self::display::draw_it;
use clap::{App, AppSettings, Arg};
use utils::{find_big_ones, get_dir_tree};
use std::cmp;
use std::cmp::Ordering;
use std::fs;
use std::fs::ReadDir;
use std::io;
mod display;
mod utils;
#[derive(Clone, Debug)]
struct Node {
dir: Dir,
children: Vec<Node>,
}
impl Ord for Node {
fn cmp(&self, other: &Self) -> Ordering {
if self.dir.size > other.dir.size {
Ordering::Less
} else if self.dir.size < other.dir.size {
Ordering::Greater
} else {
let my_slashes = self.dir.name.matches("/").count();
let other_slashes = other.dir.name.matches("/").count();
if my_slashes > other_slashes {
Ordering::Greater
} else if my_slashes < other_slashes {
Ordering::Less
} else {
if self.dir.name < other.dir.name {
Ordering::Less
} else if self.dir.name > other.dir.name {
Ordering::Greater
} else {
Ordering::Equal
}
}
}
}
}
impl PartialOrd for Node {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Node {
fn eq(&self, other: &Self) -> bool {
(&self.dir.name, self.dir.size) == (&other.dir.name, other.dir.size)
}
}
impl Eq for Node {}
#[derive(Clone, Debug)]
struct Dir {
name: String,
size: u64,
}
static DEFAULT_NUMBER_OF_LINES: &'static str = &"15";
static DEFAULT_NUMBER_OF_LINES: &'static str = "15";
fn main() {
let options = App::new("Trailing args example")
let options = App::new("Dust")
.setting(AppSettings::TrailingVarArg)
.arg(
Arg::with_name("number_of_lines")
@@ -80,6 +22,11 @@ fn main() {
.takes_value(true)
.default_value(DEFAULT_NUMBER_OF_LINES),
)
.arg(
Arg::with_name("use_apparent_size")
.short("s")
.help("If set will use file length. Otherwise we use blocks"),
)
.arg(Arg::with_name("inputs").multiple(true))
.get_matches();
@@ -90,275 +37,12 @@ fn main() {
}
};
let number_of_lines = value_t!(options.value_of("number_of_lines"), usize).unwrap();
let use_apparent_size = options.is_present("use_apparent_size");
let (permissions, results) = get_dir_tree(filenames);
let slice_it = find_big_ones(&results, number_of_lines);
display(permissions, slice_it);
let (permissions, node_per_top_level_dir) = get_dir_tree(&filenames, use_apparent_size);
let slice_it = find_big_ones(&node_per_top_level_dir, number_of_lines);
draw_it(permissions, &node_per_top_level_dir, &slice_it);
}
fn get_dir_tree(filenames: Vec<&str>) -> (bool, Vec<Node>) {
let mut permissions = true;
let mut results = vec![];
for b in filenames {
let mut new_name = String::from(b);
while new_name.chars().last() == Some('/') && new_name.len() != 1 {
new_name.pop();
}
let (hp, data) = examine_dir_str(new_name);
permissions = permissions && hp;
results.push(data);
}
(permissions, results)
}
fn examine_dir_str(loc: String) -> (bool, Node) {
let mut inodes: HashSet<u64> = HashSet::new();
let (hp, result) = examine_dir(fs::read_dir(&loc), &mut inodes);
// This needs to be folded into the below recursive call somehow
let new_size = result.iter().fold(0, |a, b| a + b.dir.size);
(
hp,
Node {
dir: Dir {
name: loc,
size: new_size,
},
children: result,
},
)
}
#[cfg(target_os = "linux")]
fn get_metadata_blocks_and_inode(d: &std::fs::DirEntry) -> Option<(u64, u64)> {
use std::os::linux::fs::MetadataExt;
match d.metadata().ok() {
Some(md) => Some((md.len(), md.st_ino())),
None => None,
}
}
#[cfg(target_os = "unix")]
fn get_metadata_blocks_and_inode(d: &std::fs::DirEntry) -> Option<(u64, u64)> {
use std::os::unix::fs::MetadataExt;
match d.metadata().ok() {
Some(md) => Some((md.len(), md.ino())),
None => None,
}
}
#[cfg(target_os = "macos")]
fn get_metadata_blocks_and_inode(d: &std::fs::DirEntry) -> Option<(u64, u64)> {
use std::os::macos::fs::MetadataExt;
match d.metadata().ok() {
Some(md) => Some((md.len(), md.st_ino())),
None => None,
}
}
#[cfg(not(any(target_os = "linux", target_os = "unix", target_os = "macos")))]
fn get_metadata_blocks_and_inode(_d: &std::fs::DirEntry) -> Option<(u64, u64)> {
match d.metadata().ok() {
Some(md) => Some((md.len(), 0)), //move to option not 0
None => None,
}
}
fn examine_dir(a_dir: io::Result<ReadDir>, inodes: &mut HashSet<u64>) -> (bool, Vec<Node>) {
let mut result = vec![];
let mut have_permission = true;
if a_dir.is_ok() {
let paths = a_dir.unwrap();
for dd in paths {
match dd {
Ok(d) => {
let file_type = d.file_type().ok();
let maybe_size_and_inode = get_metadata_blocks_and_inode(&d);
match (file_type, maybe_size_and_inode) {
(Some(file_type), Some((size, inode))) => {
let s = d.path().to_string_lossy().to_string();
if inodes.contains(&inode) {
continue;
}
inodes.insert(inode);
if d.path().is_dir() && !file_type.is_symlink() {
let (hp, recursive) = examine_dir(fs::read_dir(d.path()), inodes);
have_permission = have_permission && hp;
let new_size = recursive.iter().fold(size, |a, b| a + b.dir.size);
result.push(Node {
dir: Dir {
name: s,
size: new_size,
},
children: recursive,
})
} else {
result.push(Node {
dir: Dir {
name: s,
size: size,
},
children: vec![],
})
}
}
(_, None) => have_permission = false,
(_, _) => (),
}
}
Err(_) => (),
}
}
} else {
have_permission = false;
}
(have_permission, result)
}
// We start with a list of root directories - these must be the biggest folders
// We then repeadedly merge in the children of the biggest directory - Each iteration
// the next biggest directory's children are merged in.
fn find_big_ones<'a>(l: &'a Vec<Node>, max_to_show: usize) -> Vec<&Node> {
let mut new_l: Vec<&Node> = l.iter().map(|a| a).collect();
new_l.sort();
for processed_pointer in 0..max_to_show {
if new_l.len() == processed_pointer {
break;
}
// Must be a list of pointers into new_l otherwise b_list will go out of scope
// when it is deallocated
let mut b_list: Vec<&Node> = new_l[processed_pointer]
.children
.iter()
.map(|a| a)
.collect();
new_l.extend(b_list);
new_l.sort();
/*println!(
"{:?} -------------------",
new_l
.iter()
.map(|a| a.dir.size.to_string() + ": " + &a.dir.name)
.collect::<Vec<String>>()
);*/
}
if new_l.len() > max_to_show {
new_l[0..max_to_show + 1].to_vec()
} else {
new_l
}
}
fn display(permissions: bool, to_display: Vec<&Node>) -> () {
if !permissions {
eprintln!("Did not have permissions for all directories");
}
display_node(to_display[0], &to_display, true, 1, "")
}
fn display_node<S: Into<String>>(
node_to_print: &Node,
to_display: &Vec<&Node>,
is_first: bool,
depth: u8,
indentation_str: S,
) {
let mut is = indentation_str.into();
print_this_node(node_to_print, is_first, depth, is.as_ref());
is = is.replace("└──", " ");
is = is.replace("├──", "");
let printable_node_slashes = node_to_print.dir.name.matches("/").count();
let mut num_sibblings = to_display.iter().fold(0, |a, b| {
if node_to_print.children.contains(b)
&& b.dir.name.matches("/").count() == printable_node_slashes + 1
{
a + 1
} else {
a
}
});
let mut is_biggest = true;
for node in to_display {
if node_to_print.children.contains(node) {
if node.dir.name.matches("/").count() == printable_node_slashes + 1 {
num_sibblings -= 1;
let tree_chars = {
if num_sibblings == 0 {
"└──"
} else {
"├──"
}
};
display_node(
&node,
to_display,
is_biggest,
depth + 1,
is.to_string() + tree_chars,
);
is_biggest = false;
}
}
}
}
fn print_this_node(node_to_print: &Node, is_biggest: bool, depth: u8, indentation_str: &str) {
let padded_size = format!("{:>5}", human_readable_number(node_to_print.dir.size),);
println!(
"{} {} {}",
if is_biggest {
Fixed(196).paint(padded_size)
} else {
Fixed(7).paint(padded_size)
},
indentation_str,
Fixed(7)
.on(Fixed(cmp::min(8, (depth) as u8) + 231))
.paint(node_to_print.dir.name.to_string())
);
}
fn human_readable_number(size: u64) -> (String) {
let units = vec!["T", "G", "M", "K"]; //make static
//return format!("{}B", size);
for (i, u) in units.iter().enumerate() {
let marker = 1024u64.pow((units.len() - i) as u32);
if size >= marker {
if size / marker < 10 {
return format!("{:.1}{}", (size as f32 / marker as f32), u);
} else {
return format!("{}{}", (size / marker), u);
}
}
}
return format!("{}B", size);
}
mod tests {
use super::*;
#[test]
fn test_human_readable_number() {
assert_eq!(human_readable_number(1), "1B");
assert_eq!(human_readable_number(956), "956B");
assert_eq!(human_readable_number(1004), "1004B");
assert_eq!(human_readable_number(1024), "1.0K");
assert_eq!(human_readable_number(1536), "1.5K");
assert_eq!(human_readable_number(1024 * 512), "512K");
assert_eq!(human_readable_number(1024 * 1024), "1.0M");
assert_eq!(human_readable_number(1024 * 1024 * 1024 - 1), "1023M");
assert_eq!(human_readable_number(1024 * 1024 * 1024 * 20), "20G");
assert_eq!(human_readable_number(1024 * 1024 * 1024 * 1024), "1.0T");
}
}
#[cfg(test)]
mod tests;

0
src/test_dir/many/a_file Normal file
View File

View File

@@ -0,0 +1 @@
hello

140
src/tests.rs Normal file
View File

@@ -0,0 +1,140 @@
extern crate ansi_term;
extern crate tempfile;
use self::tempfile::Builder;
use self::tempfile::TempDir;
use super::*;
use display::format_string;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use std::process::Command;
#[test]
pub fn test_main() {
let r = format!(
"{}
{}
{}
{}",
format_string("src/test_dir", true, " 4.0K", ""),
format_string("src/test_dir/many", true, " 4.0K", "└─┬",),
format_string("src/test_dir/many/hello_file", true, " 4.0K", " ├──",),
format_string("src/test_dir/many/a_file", false, " 0B", " └──",),
);
assert_cli::Assert::main_binary()
.with_args(&["src/test_dir"])
.stdout()
.is(r)
.unwrap();
}
#[test]
pub fn test_apparent_size() {
let r = format!(
"{}",
format_string("src/test_dir/many/hello_file", true, " 6B", " ├──",),
);
assert_cli::Assert::main_binary()
.with_args(&["-s", "src/test_dir"])
.stdout()
.contains(r)
.unwrap();
}
fn build_temp_file(dir: &TempDir) -> (PathBuf) {
let file_path = dir.path().join("notes.txt");
let mut file = File::create(&file_path).unwrap();
writeln!(file, "I am a temp file").unwrap();
file_path
}
#[test]
pub fn test_soft_sym_link() {
let dir = Builder::new().tempdir().unwrap();
let file = build_temp_file(&dir);
let dir_s = dir.path().to_str().unwrap();
let file_path_s = file.to_str().unwrap();
let link_name = dir.path().join("the_link");
let link_name_s = link_name.to_str().unwrap();
let c = Command::new("ln")
.arg("-s")
.arg(file_path_s)
.arg(link_name_s)
.output();
assert!(c.is_ok());
let r = format!(
"{}
{}
{}",
format_string(dir_s, true, " 8.0K", ""),
format_string(file_path_s, true, " 4.0K", "├──",),
format_string(link_name_s, false, " 4.0K", "└──",),
);
assert_cli::Assert::main_binary()
.with_args(&[dir_s])
.stdout()
.contains(r)
.unwrap();
}
// Hard links are ignored as the inode is the same as the file
#[test]
pub fn test_hard_sym_link() {
let dir = Builder::new().tempdir().unwrap();
let file = build_temp_file(&dir);
let dir_s = dir.path().to_str().unwrap();
let file_path_s = file.to_str().unwrap();
let link_name = dir.path().join("the_link");
let link_name_s = link_name.to_str().unwrap();
let c = Command::new("ln")
.arg(file_path_s)
.arg(link_name_s)
.output();
assert!(c.is_ok());
let r = format!(
"{}
{}",
format_string(dir_s, true, " 4.0K", ""),
format_string(file_path_s, true, " 4.0K", "└──")
);
assert_cli::Assert::main_binary()
.with_args(&[dir_s])
.stdout()
.contains(r)
.unwrap();
}
//Check we don't recurse down an infinite symlink tree
#[test]
pub fn test_recursive_sym_link() {
let dir = Builder::new().tempdir().unwrap();
let dir_s = dir.path().to_str().unwrap();
let link_name = dir.path().join("the_link");
let link_name_s = link_name.to_str().unwrap();
let c = Command::new("cd").arg(dir_s).output();
assert!(c.is_ok());
let c = Command::new("ln")
.arg("-s")
.arg(".")
.arg(link_name_s)
.output();
assert!(c.is_ok());
let r = format!("{}", format_string(dir_s, true, " 4.0K", ""));
assert_cli::Assert::main_binary()
.with_args(&[dir_s])
.stdout()
.contains(r)
.unwrap();
}

113
src/utils/mod.rs Normal file
View File

@@ -0,0 +1,113 @@
use std::collections::HashSet;
use std::fs::{self, ReadDir};
use std::io;
use dust::Node;
mod platform;
use self::platform::*;
pub fn get_dir_tree(filenames: &Vec<&str>, apparent_size: bool) -> (bool, Vec<Node>) {
let mut permissions = true;
let mut results = vec![];
for &b in filenames {
let mut new_name = String::from(b);
while new_name.chars().last() == Some('/') && new_name.len() != 1 {
new_name.pop();
}
let (hp, data) = examine_dir_str(&new_name, apparent_size);
permissions = permissions && hp;
results.push(data);
}
(permissions, results)
}
fn examine_dir_str(loc: &str, apparent_size: bool) -> (bool, Node) {
let mut inodes: HashSet<(u64, u64)> = HashSet::new();
let (hp, result) = examine_dir(fs::read_dir(loc), apparent_size, &mut inodes);
// This needs to be folded into the below recursive call somehow
let new_size = result.iter().fold(0, |a, b| a + b.size());
(hp, Node::new(loc, new_size, result))
}
fn examine_dir(
a_dir: io::Result<ReadDir>,
apparent_size: bool,
inodes: &mut HashSet<(u64, u64)>,
) -> (bool, Vec<Node>) {
let mut result = vec![];
let mut have_permission = true;
if a_dir.is_ok() {
let paths = a_dir.unwrap();
for dd in paths {
match dd {
Ok(d) => {
let file_type = d.file_type().ok();
let maybe_size_and_inode = get_metadata(&d, apparent_size);
match (file_type, maybe_size_and_inode) {
(Some(file_type), Some((size, inode))) => {
let s = d.path().to_string_lossy().to_string();
if !apparent_size {
if let Some(inode_dev_pair) = inode {
if inodes.contains(&inode_dev_pair) {
continue;
}
inodes.insert(inode_dev_pair);
}
}
if d.path().is_dir() && !file_type.is_symlink() {
let (hp, recursive) =
examine_dir(fs::read_dir(d.path()), apparent_size, inodes);
have_permission = have_permission && hp;
let new_size = recursive.iter().fold(size, |a, b| a + b.size());
result.push(Node::new(s, new_size, recursive))
} else {
result.push(Node::new(s, size, vec![]))
}
}
(_, None) => have_permission = false,
(_, _) => (),
}
}
Err(_) => (),
}
}
} else {
have_permission = false;
}
(have_permission, result)
}
// We start with a list of root directories - these must be the biggest folders
// We then repeadedly merge in the children of the biggest directory - Each iteration
// the next biggest directory's children are merged in.
pub fn find_big_ones<'a>(l: &'a Vec<Node>, max_to_show: usize) -> Vec<&Node> {
let mut new_l: Vec<&Node> = l.iter().map(|a| a).collect();
new_l.sort();
for processed_pointer in 0..max_to_show {
if new_l.len() == processed_pointer {
break;
}
// Must be a list of pointers into new_l otherwise b_list will go out of scope
// when it is deallocated
let mut b_list: Vec<&Node> = new_l[processed_pointer]
.children()
.iter()
.map(|a| a)
.collect();
new_l.extend(b_list);
new_l.sort();
}
if new_l.len() > max_to_show {
new_l[0..max_to_show + 1].to_vec()
} else {
new_l
}
}

72
src/utils/platform.rs Normal file
View File

@@ -0,0 +1,72 @@
use std;
fn get_block_size() -> u64 {
// All os specific implementations of MetatdataExt seem to define a block as 512 bytes
// https://doc.rust-lang.org/std/os/linux/fs/trait.MetadataExt.html#tymethod.st_blocks
512
}
#[cfg(target_os = "linux")]
pub fn get_metadata(
d: &std::fs::DirEntry,
use_apparent_size: bool,
) -> Option<(u64, Option<(u64, u64)>)> {
use std::os::linux::fs::MetadataExt;
match d.metadata().ok() {
Some(md) => {
let inode = Some((md.st_ino(), md.st_dev()));
if use_apparent_size {
Some((md.len(), inode))
} else {
Some((md.st_blocks() * get_block_size(), inode))
}
}
None => None,
}
}
#[cfg(target_os = "unix")]
pub fn get_metadata(
d: &std::fs::DirEntry,
use_apparent_size: bool,
) -> Option<(u64, Option<(u64, u64)>)> {
use std::os::unix::fs::MetadataExt;
match d.metadata().ok() {
Some(md) => {
let inode = Some((md.ino(), md.dev()));
if use_apparent_size {
Some((md.len(), inode))
} else {
Some((md.blocks() * get_block_size(), inode))
}
}
None => None,
}
}
#[cfg(target_os = "macos")]
pub fn get_metadata(
d: &std::fs::DirEntry,
use_apparent_size: bool,
) -> Option<(u64, Option<(u64, u64)>)> {
use std::os::macos::fs::MetadataExt;
match d.metadata().ok() {
Some(md) => {
let inode = Some((md.st_ino(), md.st_dev()));
if use_apparent_size {
Some((md.len(), inode))
} else {
Some((md.st_blocks() * get_block_size(), inode))
}
}
None => None,
}
}
#[cfg(not(any(target_os = "linux", target_os = "unix", target_os = "macos")))]
pub fn get_metadata(d: &std::fs::DirEntry, _apparent: bool) -> Option<(u64, Option<(u64, u64)>)> {
match d.metadata().ok() {
Some(md) => Some((md.len(), None)),
None => None,
}
}