# !/bin/bash # Step-By-Step for file in *; do if [ -d "$file" ]; then echo -e "------------------------------------------------------------------------------------------\nCollection: $file" cd "$file" echo "0 - Merging files" rm -vf CREADME CHANGELOG* readme* README* stage* echo "Number of files:" `find . -type f | wc -l` cat * > /tmp/aio-"${PWD##*/}".lst && rm * && mv /tmp/aio-"${PWD##*/}".lst ./ && wc -l aio-"${PWD##*/}".lst filetype=`file -k aio-"${PWD##*/}".lst` echo "FileType: $filetype" echo "1 - Uniq Lines" cat aio-"${PWD##*/}".lst | sort -b -f -i -T "$(pwd)/" | uniq > stage1 && wc -l stage1 echo "2 - Cleanning Lines" tr '\r' '\n' < stage1 > stage2-tmp && rm stage1 && tr '\0' ' ' < stage2-tmp > stage2-tmp1 && rm stage2-tmp && tr -cd '\11\12\15\40-\176' < stage2-tmp1 > stage2-tmp && rm stage2-tmp1 filetype=`file stage2-tmp` echo "FileType: $filetype" if [[ "$filetype" == *ASCII* ]] ; then cat stage2-tmp | sed "s/ */ /gI;s/^[ \t]*//;s/[ \t]*$//" | sort -b -f -i -T "$(pwd)/" | uniq > stage2 && rm stage2-* && wc -l stage2 echo "3 - Removing HTML Tags" htmlTags="a|b|big|blockquote|body|br|center|code|del|div|em|font|h[1-9]|head|hr|html|i|img|ins|item|li|ol|option|p|pre|s|small|span|strong|sub|sup|table|td|th|title|tr|tt|u|ul" cat stage2 | sed -r "s/<[^>]*>//g;s/^\w.*=\"\w.*\">//;s/^($htmlTags)>//I;s/<\/*($htmlTags)$//I;s/&*/&/gI;s/"/\"/gI;s/'/'/gI;s/'/'/gI;s/</ stage3 && wc -l stage3 && rm stage2 echo "4 - Removing Emails Address" cat stage3 | sed -r "s/\w.*\@.*\.(ac|ag|as|at|au|be|bg|bill|bm|bs|c|ca|cc|ch|cm|co|com|cs|de|dk|edu|es|fi|fm|fr|gov|gr|hr|hu|ic|ie|il|info|it|jo|jp|kr|lk|lu|lv|me|mil|mu|net|nil|nl|no|nt|org|pk|pl|pt|ru|se|si|tc|tk|to|tv|tw|uk|us|ws|yu):*//gI" | sort -b -f -i -T "$(pwd)/" | uniq > stage4 && wc -l stage4 && rm stage3 echo "5 - WPA'ified" #pw-inspector -i aio-"${PWD##*/}".lst -o aio-"${PWD##*/}"-wpa.lst -m 8 -M 63 ; wc -l aio-"${PWD##*/}"-wpa.lst && rm aio-"${PWD##*/}"-wpa.lst pw-inspector -i stage4 -o stage5 -m 8 -M 63 ; wc -l stage5 && rm stage5 echo "6 - Compressing" #7za a -t7z -mx9 -v200m stage4.7z stage4 echo "7 - File Sizes" du -sh * fi cd .. fi done echo "============================================================================================================================================" exit 1 # AIO + Sort for file in *; do if [ -d "$file" ]; then echo -e "------------------------------------------------------------------------------------------\nCollection: $file" cd "$file" rm -f stage* echo "Number of files:" `find . -type f | wc -l` cat * > /tmp/aio-"${PWD##*/}".lst && rm * && mv /tmp/aio-"${PWD##*/}".lst ./ tr '\r' '\n' < aio-"${PWD##*/}".lst > stage1-tmp && tr '\0' ' ' < stage1-tmp > stage1-tmp1 && tr -cd '\11\12\15\40-\176' < stage1-tmp1 > stage1-tmp && mv stage1-tmp stage1 && rm stage1-* # End Of Line/New Line & "printable" filetype=`file stage1` if [[ "$filetype" == *ASCII* ]] ; then htmlTags="a|b|big|blockquote|body|br|center|code|del|div|em|font|h[1-9]|head|hr|html|i|img|ins|item|li|ol|option|p|pre|s|small|span|strong|sub|sup|table|td|th|title|tr|tt|u|ul" cat stage1 | sed -r "s/ */ /gI;s/^[ \t]*//;s/[ \t]*$//;s/<[^>]*>//g;s/^\w.*=\"\w.*\">//;s/^($htmlTags)>//I;s/<\/*($htmlTags)$//I;s/&*/&/gI;s/"/\"/gI;s/'/'/gI;s/'/'/gI;s/</ stage2 && rm stage1 sort -b -f -i -T "$(pwd)/" stage2 > stage3 && rm stage2 grep -v " * .* " stage3 > stage3.1 grep " * .* " stage3 > stage3.4 #grep -v " * .* \| " stage3 > stage3.1 # All one or two words #grep " * .* " stage3 | grep -v " " > stage3.2 # All 3+ words #grep " * .* " stage3 | grep " " > stage3.3 # All multiple spacing words rm stage3 for fileIn in stage3.*; do # Place one or two words at the start, cat "$fileIn" | uniq -c -d > stage3.0 # Sort, then find dups (else uniq could miss out a few values if the list wasn't in order e.g. test1 test2 test3, test2, test4) sort -b -f -i -T "$(pwd)/" -k1,1r -k2 stage3.0 > stage3 && rm stage3.0 # Sort by amount of dup times (9-0) then by the value (A-Z) sed 's/^ *//;s/^[0-9]* //' stage3 >> "${PWD##*/}"-clean.lst && rm stage3 # Remove "formatting" that uniq adds (Lots of spaces at the start) cat "$fileIn" | uniq -u >> "${PWD##*/}"-clean.lst # Sort, then add unique values at the end (A-Z) rm "$fileIn" done rm -f stage* #aio-"${PWD##*/}".lst #7za a -t7z -mx9 -v200m "${PWD##*/}".7z "${PWD##*/}".lst wc -l "${PWD##*/}"-clean.lst md5sum "${PWD##*/}"-clean.lst else echo "Wrong filetype: $filetype" fi cd .. fi done echo "============================================================================================================================================" exit 1 # Misc commnads top watch -d -n 1 “ls -l” #ls -m // ls -lt // ls -ltc // ls -ltu // ls -lth sdiff -s stage3.2 stage4 | less bash go.sh | tee output du -sh * # ls -lh start=`date +%s`&& blah && echo "$((`date +%s`-$start))" cat stage3 | sed -r "s/\w.*\@.*\://" | sort -b -f -i -T "$(pwd)/" | uniq > stage4 && wc -l stage4 # Split Accounts mkdir bad/ for file in *; do bad="$(file -k \"$file\" | grep -v ASCII | grep -v text | awk '{split($1,a,\":\"); print a[1]}')" if [ "$bad" ] ; then mv "$bad" bad/ ; fi done awk '!x[$0]++' FILE # Remove duplicate entries in a file W/O sorting (sort | uniq) awk 'x[$0]++' FILE # Just the the entries that are dups (sort | uniq -d) #dos2unix #tr -dc "[:alnum:][:space:][:punct:]" for files in *; do # increase file counter by one md5sum "$files" tr '\r' '\n' < "$files" > /tmp/"$files" && tr '\0' ' ' < /tmp/"$files" > "$files" && tr -cd '\11\12\15\40-\176' < "$files" > /tmp/"$files" && mv /tmp/"$files" "$files" cat "$files" >> aio-"${PWD##*/}".lst rm "$files" done for files in *; do md5sum "$files" ; done # Mount network share mkdir "${PWD}/wordlist/" smbmount //[ip]/[sharename] "${PWD}/wordlist/" -o username=[username],password=[password],uid=1000,mask=000 cd "${PWD}/wordlist/" && ls # Renames all files to .lst for f in *; do echo "$f" && mv "$f" "$f".lst done # Lowercases all filenames for f in *.lst ; do new=$(echo $f | tr '[:upper:]' '[:lower:]') echo "$f" && mv "$f" "$new"_ && mv "$new"_ "$new" done # Convert End Of Line formats to linux tr -d '\r' | tr '\0' ' ' wordlist.lst > wordlist_clean.lst # Trim leading and trailing white spaces for f in *.lst ; do echo "$f" && sed -i 's/^[ \t]*//;s/[ \t]*$//' "$f" done # Remove HTML tag codes sed -e :a -e 's/<[^>]*>//g;/ wordlist_clean.lst #Split username:password #tr '[:alnum:]:[:alnum:]' '[:alnum:]\n[:alnum:]' < wordlist.lst > wordlist_clean.lst sed 's/[a-zA-Z-0-9]:[a-zA-Z-0-9]/[a-zA-Z-0-9] [a-zA-Z-0-9]/g' # Remove email addresses #sed '/./s/@//g' #Remove all non-printable characters from myfile.txt tr -cd "[:print:]" wordlist.lst > wordlist_clean.lst # Combines 1, 2, and 3 into one file. cat 1.lst 2.lst 3.lst > wordlist.lst # Combines all files in the current directory into one file cat -v *.lst > wordlist.lst # Updates a file with a new one cat new_wordlist.lst >> wordlist.lst # Combines everything, sorts (Alphabetize) and remove the duplicates sort -i -b -f *.lst | uniq > wordlist.lst # Combines everything, sorts (Alphabetize) and remove the duplicates, moves it back a folder, and removes the files sort -i -b -f * | uniq > .."/${PWD##*/}".lst && rm * && cd .. # Combines everything and sorts (Duplicates at the top in frequency duplicated then alphabetize) rm -rf tmp/ && mkdir tmp/ # Create tmp folders sort -i -b -f *.lst | uniq -d -c | sed -e 's/^ *//' > tmp/dups # Sorts (regardless of blanks, caps and printable), only dup words and count them, remove added spaces sort -i -b -f -k1,1r -k2 tmp/dups -o tmp/temp # Sort in number of dups then Alphabetize sed -r -i 's/^[0-9]+ //' tmp/temp # Remove dup number sort -i -b -f *.lst | uniq -u >> tmp/temp # Sorts (regardless of blanks, caps and printable), only uniq words, add them on at the end mv tmp/dups wordlist.lst && rm -rf tmp/ # move and delete # Makes WPA/WPA2 compatiable by deleting words "less than 8 chars" and "more than 63" cat wordlist.lst | pw-inspector -m 8 -M 63 > wordlist_wpa.lst pw-inspector -i wordlist.lst -m 8 -M 63 -o wordlist_wpa.lst # Count the number of lines wc -l wordlist.lst # Compress all files into 200mb files for f in *.lst; do file=$(echo "$f" | cut -d'.' -f1) 7za a -t7z -mx9 -v200m "$file".7z "$f" #&& rm "$f" done # File size du -sh *