Linux web-conference.aiou.edu.pk 5.4.0-204-generic #224-Ubuntu SMP Thu Dec 5 13:38:28 UTC 2024 x86_64
Apache/2.4.41 (Ubuntu)
: 172.16.50.247 | : 3.146.107.49
Cant Read [ /etc/named.conf ]
7.4.3-4ubuntu2.28
appadmin
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
BLACK DEFEND!
README
+ Create Folder
+ Create File
/
snap /
lxd /
24061 /
lib /
python3 /
dist-packages /
chardet /
[ HOME SHELL ]
Name
Size
Permission
Action
cli
[ DIR ]
drwxr-xr-x
__init__.py
1.52
KB
-rw-r--r--
big5freq.py
30.52
KB
-rw-r--r--
big5prober.py
1.72
KB
-rw-r--r--
chardistribution.py
9.19
KB
-rw-r--r--
charsetgroupprober.py
3.7
KB
-rw-r--r--
charsetprober.py
4.99
KB
-rw-r--r--
codingstatemachine.py
3.51
KB
-rw-r--r--
compat.py
1.11
KB
-rw-r--r--
cp949prober.py
1.81
KB
-rw-r--r--
enums.py
1.62
KB
-rw-r--r--
escprober.py
3.86
KB
-rw-r--r--
escsm.py
10.26
KB
-rw-r--r--
eucjpprober.py
3.66
KB
-rw-r--r--
euckrfreq.py
13.23
KB
-rw-r--r--
euckrprober.py
1.71
KB
-rw-r--r--
euctwfreq.py
30.88
KB
-rw-r--r--
euctwprober.py
1.71
KB
-rw-r--r--
gb2312freq.py
20.23
KB
-rw-r--r--
gb2312prober.py
1.71
KB
-rw-r--r--
hebrewprober.py
13.51
KB
-rw-r--r--
jisfreq.py
25.17
KB
-rw-r--r--
jpcntx.py
19.18
KB
-rw-r--r--
langbulgarianmodel.py
12.54
KB
-rw-r--r--
langcyrillicmodel.py
17.53
KB
-rw-r--r--
langgreekmodel.py
12.39
KB
-rw-r--r--
langhebrewmodel.py
11.08
KB
-rw-r--r--
langhungarianmodel.py
12.3
KB
-rw-r--r--
langthaimodel.py
11.03
KB
-rw-r--r--
langturkishmodel.py
10.84
KB
-rw-r--r--
latin1prober.py
5.24
KB
-rw-r--r--
mbcharsetprober.py
3.33
KB
-rw-r--r--
mbcsgroupprober.py
1.96
KB
-rw-r--r--
mbcssm.py
24.88
KB
-rw-r--r--
sbcharsetprober.py
5.52
KB
-rw-r--r--
sbcsgroupprober.py
3.46
KB
-rw-r--r--
sjisprober.py
3.69
KB
-rw-r--r--
universaldetector.py
12.19
KB
-rw-r--r--
utf8prober.py
2.7
KB
-rw-r--r--
version.py
242
B
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : charsetprober.py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import logging import re from .enums import ProbingState class CharSetProber(object): SHORTCUT_THRESHOLD = 0.95 def __init__(self, lang_filter=None): self._state = None self.lang_filter = lang_filter self.logger = logging.getLogger(__name__) def reset(self): self._state = ProbingState.DETECTING @property def charset_name(self): return None def feed(self, buf): pass @property def state(self): return self._state def get_confidence(self): return 0.0 @staticmethod def filter_high_byte_only(buf): buf = re.sub(b'([\x00-\x7F])+', b' ', buf) return buf @staticmethod def filter_international_words(buf): """ We define three types of bytes: alphabet: english alphabets [a-zA-Z] international: international characters [\x80-\xFF] marker: everything else [^a-zA-Z\x80-\xFF] The input buffer can be thought to contain a series of words delimited by markers. This function works to filter all words that contain at least one international character. All contiguous sequences of markers are replaced by a single space ascii character. This filter applies to all scripts which do not use English characters. """ filtered = bytearray() # This regex expression filters out only words that have at-least one # international character. The word may include one marker character at # the end. words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?', buf) for word in words: filtered.extend(word[:-1]) # If the last character in the word is a marker, replace it with a # space as markers shouldn't affect our analysis (they are used # similarly across all languages and may thus have similar # frequencies). last_char = word[-1:] if not last_char.isalpha() and last_char < b'\x80': last_char = b' ' filtered.extend(last_char) return filtered @staticmethod def filter_with_english_letters(buf): """ Returns a copy of ``buf`` that retains only the sequences of English alphabet and high byte characters that are not between <> characters. Also retains English alphabet and high byte characters immediately before occurrences of >. This filter can be applied to all scripts which contain both English characters and extended ASCII characters, but is currently only used by ``Latin1Prober``. """ filtered = bytearray() in_tag = False prev = 0 for curr in range(len(buf)): # Slice here to get bytes instead of an int with Python 3 buf_char = buf[curr:curr + 1] # Check if we're coming out of or entering an HTML tag if buf_char == b'>': in_tag = False elif buf_char == b'<': in_tag = True # If current character is not extended-ASCII and not alphabetic... if buf_char < b'\x80' and not buf_char.isalpha(): # ...and we're not in a tag if curr > prev and not in_tag: # Keep everything after last non-extended-ASCII, # non-alphabetic character filtered.extend(buf[prev:curr]) # Output a space to delimit stretch we kept filtered.extend(b' ') prev = curr + 1 # If we're not in a tag... if not in_tag: # Keep everything after last non-extended-ASCII, non-alphabetic # character filtered.extend(buf[prev:]) return filtered
Close