Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use l2informer as source #1

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
23 changes: 23 additions & 0 deletions cleanup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import os
from bs4 import BeautifulSoup

def cleanup(old_xml_dir, new_xml_dir):
npc_files = []
for file in os.listdir(old_xml_dir):
if file.endswith(".xml"):
with open(os.path.join(old_xml_dir, file), "r") as f:
contents = f.read()
soup = BeautifulSoup(contents, "xml")
npcs = soup.find_all("npc")

for npc in npcs:
dropblock = npc.find("drops")
drops = npc.find_all("drop")
if dropblock and not drops:
npc_id = eval(npc["id"])
print(npc_id)
dropblock.decompose()

with open(os.path.join(new_xml_dir, file), "w") as f:
f.write(soup.prettify())
cleanup('npcs_new', 'npcs_final')
52 changes: 52 additions & 0 deletions compare_xml_l2informer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import json
import numpy as np

drop_file_xml = "drop_data_xml.json"
drop_file_l2portal = "drop_data_l2informer.json"

key_str2int = lambda d: {int(k) if k.lstrip("-").isdigit() else k: v for k, v in d.items()}

drop_data_xml = json.load(open(drop_file_xml, "r"), object_hook=key_str2int)
drop_data_l2portal = json.load(open(drop_file_l2portal, "r"), object_hook=key_str2int)

has_additional = []
for npc_id in drop_data_xml.keys():
#print npc_id
d1 = np.array(drop_data_xml[npc_id]["drop"])
if not npc_id in drop_data_l2portal:
print ("Mob %s have no drops in l2informer" % npc_id)
continue
d2 = np.array(drop_data_l2portal[npc_id]["drop"])

if d1.shape[0] == 0 and d2.shape[0] != 0:
has_additional.append([npc_id, 1, 0, 0])
continue

if d2.shape[0] == 0 and d1.shape[0] != 0:
num_cats = len(np.unique(d1[:, -1]))
has_additional.append([npc_id, 1, num_cats])
continue

num_cats = len(np.unique(d1[:, -1]))
d1s = set(d1[:, 0])
d2s = set(d2[:, 0])
if d1s != d2s:
has_additional.append([npc_id, 1, num_cats])
continue

has_additional.append([npc_id, 0, num_cats])

has_additional = np.array(has_additional)
no_diff = np.where(has_additional[:, 1] == 0)[0]
has_diff = np.where(has_additional[:, 1] == 1)[0]
print("# mobs with no diff: " + str(no_diff.shape[0]) + ", with diff:" + str(has_diff.shape[0]))

cats = {}
for npc_id in drop_data_xml.keys():
for drop_type in ["drop", "spoil"]:
for drop in drop_data_xml[npc_id][drop_type]:
if drop[-1] not in cats:
cats[drop[-1]] = 0
cats[drop[-1]] += 1

print cats
69 changes: 69 additions & 0 deletions create_json_informer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import pandas_access as mdb
import json

db_filename = 'l2.mdb'

# Listing the tables.
#for tbl in mdb.list_tables(db_filename):
# print(tbl)


# Read a small table.
df = mdb.read_table(db_filename, "drops").sort_values(by=['npc_id', 'item_id']).to_dict()

result = {}
current = None
drops = []
spoil = []
for k, v in df['npc_id'].iteritems():
if current != v:
if current:
#print sorted(drops, key = lambda x: int(x[0]))
result[current] = {"drop": drops, "spoil": spoil}
print "New mob: %s " % v
current = v
drops = []
spoil = []
print v
#print k
# print "Item id: {} percentage: {} is_sweep: {} min: {} max: {}".format(v, drops_list['percentage'][k], drops_list['sweep'][k], drops_list['min'][k], drops_list['max'][k])
data = [df['item_id'][k], df['min'][k], df['max'][k], df['percentage'][k] / 100.0]
if df['sweep'][k] == "1":
spoil.append(data)
else:
drops.append(data)
#if k > 1500:
# break

# Add missing npcs with no drop/spoil
df = mdb.read_table(db_filename, "npcnames",dtype={'level': str, 'sp': str,'attack_range': str, 'run_speed': str})
#print df
for k, v in df['id'].iteritems():
if not v in result:
result[v] = {"drop": [], "spoil": []}

#result[] = {"drop": drops, "spoil": spoil}

file_object = open('drop_data_l2informer.json', 'w')
json.dump(result, file_object)

"""
{"18001": {"drop": [[57, 765, 1528, 0.7], [2397, 1, 1, 1.2000048000192e-05], [2402, 1, 1, 1.899984800121599e-05], [2406, 1, 1, 8e-06], [4069, 1, 1, 0.0021008403361344537], [4070, 1, 1, 0.003194888178913738], [4071, 1, 1, 0.0016155088852988692], [1419, 1, 1, 0.2], [1864, 1, 1, 0.178349], [1866, 1, 1, 0.05945], [1878, 1, 1, 0.03567], [1885, 1, 1, 0.007407407407407408], [1889, 1, 1, 0.005952380952380952], [4197, 1, 1, 6.799945600435197e-05]], "spoil": [[1806, 1, 1, 0.010868]]},
"""
#
#drops = df[df['npc_id']==18001]
#
#
#print drops[drops['sweep']==0]#.loc[:, "item_id"]
#print drops[drops['sweep']==1]#.loc[:, "item_id"]
##print drops.loc[:, "id"]
#
#adena = drops[drops['item_id']==57]
#print adena['min']
#print adena['max']
#
#drops_list = drops.to_dict()
#print drops_list
#for k, v in drops_list['item_id'].iteritems():
# print "Item id: {} percentage: {} is_sweep: {} min: {} max: {}".format(v, drops_list['percentage'][k], drops_list['sweep'][k], drops_list['min'][k], drops_list['max'][k])

Loading