This changes the indexing syntax from .() to [], the vector syntax from ~[] to
[] and the extension syntax from #fmt() to #fmt[]
This commit is contained in:
Brian Anderson 2011-08-19 15:16:48 -07:00
parent 4aa165553b
commit 518dc52f85
642 changed files with 6755 additions and 7354 deletions

View file

@ -137,10 +137,9 @@ mod write {
False);
if threshold != 0u {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(MPMB,
threshold);
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(
MPMB, threshold);
}
llvm::LLVMPassManagerBuilderPopulateModulePassManager(MPMB,
pm.llpm);
@ -293,21 +292,21 @@ fn build_link_meta(sess: &session::session, c: &ast::crate, output: &str,
provided_metas {
let name: option::t<str> = none;
let vers: option::t<str> = none;
let cmh_items: [@ast::meta_item] = ~[];
let cmh_items: [@ast::meta_item] = [];
let linkage_metas = attr::find_linkage_metas(c.node.attrs);
attr::require_unique_names(sess, linkage_metas);
for meta: @ast::meta_item in linkage_metas {
if attr::get_meta_item_name(meta) == "name" {
alt attr::get_meta_item_value_str(meta) {
some(v) { name = some(v); }
none. { cmh_items += ~[meta]; }
none. { cmh_items += [meta]; }
}
} else if (attr::get_meta_item_name(meta) == "vers") {
} else if attr::get_meta_item_name(meta) == "vers" {
alt attr::get_meta_item_value_str(meta) {
some(v) { vers = some(v); }
none. { cmh_items += ~[meta]; }
none. { cmh_items += [meta]; }
}
} else { cmh_items += ~[meta]; }
} else { cmh_items += [meta]; }
}
ret {name: name, vers: vers, cmh_items: cmh_items};
}
@ -316,7 +315,7 @@ fn build_link_meta(sess: &session::session, c: &ast::crate, output: &str,
fn crate_meta_extras_hash(sha: sha1, _crate: &ast::crate,
metas: &provided_metas) -> str {
fn len_and_str(s: &str) -> str {
ret #fmt("%u_%s", str::byte_len(s), s);
ret #fmt["%u_%s", str::byte_len(s), s];
}
fn len_and_str_lit(l: &ast::lit) -> str {
@ -345,8 +344,8 @@ fn build_link_meta(sess: &session::session, c: &ast::crate, output: &str,
fn warn_missing(sess: &session::session, name: str, default: str) {
if !sess.get_opts().library { ret; }
sess.warn(#fmt("missing crate link meta '%s', using '%s' as default",
name, default));
sess.warn(#fmt["missing crate link meta '%s', using '%s' as default",
name, default]);
}
fn crate_meta_name(sess: &session::session, _crate: &ast::crate,
@ -356,8 +355,7 @@ fn build_link_meta(sess: &session::session, c: &ast::crate, output: &str,
none. {
let name =
{
let os =
str::split(fs::basename(output), '.' as u8);
let os = str::split(fs::basename(output), '.' as u8);
assert (vec::len(os) >= 2u);
vec::pop(os);
str::connect(os, ".")
@ -429,7 +427,7 @@ fn mangle(ss: &[str]) -> str {
let n = "_ZN"; // Begin name-sequence.
for s: str in ss { n += #fmt("%u%s", str::byte_len(s), s); }
for s: str in ss { n += #fmt["%u%s", str::byte_len(s), s]; }
n += "E"; // End name-sequence.
ret n;
@ -438,7 +436,7 @@ fn mangle(ss: &[str]) -> str {
fn exported_name(path: &[str], hash: &str, _vers: &str) -> str {
// FIXME: versioning isn't working yet
ret mangle(path + ~[hash]); // + "@" + vers;
ret mangle(path + [hash]); // + "@" + vers;
}
@ -451,12 +449,12 @@ fn mangle_internal_name_by_type_only(ccx: &@crate_ctxt, t: &ty::t, name: &str)
-> str {
let s = util::ppaux::ty_to_short_str(ccx.tcx, t);
let hash = get_symbol_hash(ccx, t);
ret mangle(~[name, s, hash]);
ret mangle([name, s, hash]);
}
fn mangle_internal_name_by_path_and_seq(ccx: &@crate_ctxt, path: &[str],
flav: &str) -> str {
ret mangle(path + ~[ccx.names.next(flav)]);
ret mangle(path + [ccx.names.next(flav)]);
}
fn mangle_internal_name_by_path(_ccx: &@crate_ctxt, path: &[str]) -> str {

View file

@ -51,72 +51,68 @@ type upcalls =
fn declare_upcalls(_tn: type_names, tydesc_type: TypeRef,
taskptr_type: TypeRef, llmod: ModuleRef) -> @upcalls {
fn decl(llmod: ModuleRef, name: str, tys: [TypeRef],
rv: TypeRef) -> ValueRef {
let arg_tys: [TypeRef] = ~[];
for t: TypeRef in tys { arg_tys += ~[t]; }
fn decl(llmod: ModuleRef, name: str, tys: [TypeRef], rv: TypeRef) ->
ValueRef {
let arg_tys: [TypeRef] = [];
for t: TypeRef in tys { arg_tys += [t]; }
let fn_ty = T_fn(arg_tys, rv);
ret trans::decl_cdecl_fn(llmod, "upcall_" + name, fn_ty);
}
fn decl_with_taskptr(taskptr_type: TypeRef,
llmod: ModuleRef, name: str, tys: [TypeRef],
rv: TypeRef) -> ValueRef {
ret decl(llmod, name, ~[taskptr_type] + tys, rv);
fn decl_with_taskptr(taskptr_type: TypeRef, llmod: ModuleRef, name: str,
tys: [TypeRef], rv: TypeRef) -> ValueRef {
ret decl(llmod, name, [taskptr_type] + tys, rv);
}
let dv = bind decl_with_taskptr(taskptr_type, llmod, _, _, T_void());
let d = bind decl_with_taskptr(taskptr_type, llmod, _, _, _);
let dr = bind decl(llmod, _, _, _);
let empty_vec: [TypeRef] = ~[];
ret @{grow_task: dv("grow_task", ~[T_size_t()]),
let empty_vec: [TypeRef] = [];
ret @{grow_task: dv("grow_task", [T_size_t()]),
_yield: dv("yield", empty_vec),
sleep: dv("sleep", ~[T_size_t()]),
_fail: dv("fail", ~[T_ptr(T_i8()), T_ptr(T_i8()), T_size_t()]),
kill: dv("kill", ~[taskptr_type]),
sleep: dv("sleep", [T_size_t()]),
_fail: dv("fail", [T_ptr(T_i8()), T_ptr(T_i8()), T_size_t()]),
kill: dv("kill", [taskptr_type]),
exit: dv("exit", empty_vec),
malloc:
d("malloc", ~[T_size_t(), T_ptr(tydesc_type)], T_ptr(T_i8())),
free: dv("free", ~[T_ptr(T_i8()), T_int()]),
d("malloc", [T_size_t(), T_ptr(tydesc_type)], T_ptr(T_i8())),
free: dv("free", [T_ptr(T_i8()), T_int()]),
shared_malloc:
d("shared_malloc", ~[T_size_t(), T_ptr(tydesc_type)],
d("shared_malloc", [T_size_t(), T_ptr(tydesc_type)],
T_ptr(T_i8())),
shared_free: dv("shared_free", ~[T_ptr(T_i8())]),
mark: d("mark", ~[T_ptr(T_i8())], T_int()),
new_str: d("new_str", ~[T_ptr(T_i8()), T_size_t()], T_ptr(T_str())),
shared_free: dv("shared_free", [T_ptr(T_i8())]),
mark: d("mark", [T_ptr(T_i8())], T_int()),
new_str: d("new_str", [T_ptr(T_i8()), T_size_t()], T_ptr(T_str())),
evec_append:
d("evec_append",
~[T_ptr(tydesc_type), T_ptr(tydesc_type),
T_ptr(T_opaque_vec_ptr()), T_opaque_vec_ptr(), T_bool()],
[T_ptr(tydesc_type), T_ptr(tydesc_type),
T_ptr(T_opaque_vec_ptr()), T_opaque_vec_ptr(), T_bool()],
T_void()),
get_type_desc:
d("get_type_desc",
~[T_ptr(T_nil()), T_size_t(), T_size_t(), T_size_t(),
T_ptr(T_ptr(tydesc_type))], T_ptr(tydesc_type)),
[T_ptr(T_nil()), T_size_t(), T_size_t(), T_size_t(),
T_ptr(T_ptr(tydesc_type))], T_ptr(tydesc_type)),
ivec_resize:
d("ivec_resize", ~[T_ptr(T_opaque_ivec()), T_int()], T_void()),
d("ivec_resize", [T_ptr(T_opaque_ivec()), T_int()], T_void()),
ivec_spill:
d("ivec_spill", ~[T_ptr(T_opaque_ivec()), T_int()], T_void()),
d("ivec_spill", [T_ptr(T_opaque_ivec()), T_int()], T_void()),
ivec_resize_shared:
d("ivec_resize_shared", ~[T_ptr(T_opaque_ivec()), T_int()],
d("ivec_resize_shared", [T_ptr(T_opaque_ivec()), T_int()],
T_void()),
ivec_spill_shared:
d("ivec_spill_shared", ~[T_ptr(T_opaque_ivec()), T_int()],
d("ivec_spill_shared", [T_ptr(T_opaque_ivec()), T_int()],
T_void()),
cmp_type:
dr("cmp_type", ~[T_ptr(T_i1()), taskptr_type,
T_ptr(tydesc_type), T_ptr(T_ptr(tydesc_type)),
T_ptr(T_i8()), T_ptr(T_i8()), T_i8()],
T_void()),
dr("cmp_type",
[T_ptr(T_i1()), taskptr_type, T_ptr(tydesc_type),
T_ptr(T_ptr(tydesc_type)), T_ptr(T_i8()), T_ptr(T_i8()),
T_i8()], T_void()),
log_type:
dr("log_type", ~[taskptr_type, T_ptr(tydesc_type),
T_ptr(T_i8()), T_i32()],
dr("log_type",
[taskptr_type, T_ptr(tydesc_type), T_ptr(T_i8()), T_i32()],
T_void()),
dynastack_mark:
d("dynastack_mark", ~[], T_ptr(T_i8())),
dynastack_alloc:
d("dynastack_alloc", ~[T_size_t()], T_ptr(T_i8())),
dynastack_free:
d("dynastack_free", ~[T_ptr(T_i8())], T_void())};
dynastack_mark: d("dynastack_mark", [], T_ptr(T_i8())),
dynastack_alloc: d("dynastack_alloc", [T_size_t()], T_ptr(T_i8())),
dynastack_free: d("dynastack_free", [T_ptr(T_i8())], T_void())};
}
//
// Local Variables:

View file

@ -53,11 +53,11 @@ fn default_configuration(sess: session::session, argv0: str, input: str) ->
let mk = attr::mk_name_value_item_str;
ret ~[ // Target bindings.
mk("target_os", std::os::target_os()), mk("target_arch", "x86"),
mk("target_libc", libc),
// Build bindings.
mk("build_compiler", argv0), mk("build_input", input)];
ret [ // Target bindings.
mk("target_os", std::os::target_os()), mk("target_arch", "x86"),
mk("target_libc", libc),
// Build bindings.
mk("build_compiler", argv0), mk("build_input", input)];
}
fn build_configuration(sess: session::session, argv0: str, input: str) ->
@ -71,8 +71,8 @@ fn build_configuration(sess: session::session, argv0: str, input: str) ->
{
if sess.get_opts().test && !attr::contains_name(user_cfg, "test")
{
~[attr::mk_word_item("test")]
} else { ~[] }
[attr::mk_word_item("test")]
} else { [] }
};
ret user_cfg + gen_cfg + default_cfg;
}
@ -81,8 +81,8 @@ fn build_configuration(sess: session::session, argv0: str, input: str) ->
fn parse_cfgspecs(cfgspecs: &[str]) -> ast::crate_cfg {
// FIXME: It would be nice to use the parser to parse all varieties of
// meta_item here. At the moment we just support the meta_word variant.
let words = ~[];
for s: str in cfgspecs { words += ~[attr::mk_word_item(s)]; }
let words = [];
for s: str in cfgspecs { words += [attr::mk_word_item(s)]; }
ret words;
}
@ -92,31 +92,29 @@ fn parse_input(sess: session::session, cfg: &ast::crate_cfg, input: str) ->
@ast::crate {
if !input_is_stdin(input) {
parser::parse_crate_from_file(input, cfg, sess.get_parse_sess())
} else {
parse_input_src(sess, cfg, input).crate
}
} else { parse_input_src(sess, cfg, input).crate }
}
fn parse_input_src(sess: session::session, cfg: &ast::crate_cfg,
infile: str) -> {crate: @ast::crate, src: str} {
let srcbytes = if infile != "-" {
io::file_reader(infile)
} else {
io::stdin()
}.read_whole_stream();
fn parse_input_src(sess: session::session, cfg: &ast::crate_cfg, infile: str)
-> {crate: @ast::crate, src: str} {
let srcbytes =
if infile != "-" {
io::file_reader(infile)
} else { io::stdin() }.read_whole_stream();
let src = str::unsafe_from_bytes(srcbytes);
let crate = parser::parse_crate_from_source_str(infile, src, cfg,
sess.get_parse_sess());
let crate =
parser::parse_crate_from_source_str(infile, src, cfg,
sess.get_parse_sess());
ret {crate: crate, src: src};
}
fn time<T>(do_it: bool, what: str, thunk: fn() -> T ) -> T {
fn time<T>(do_it: bool, what: str, thunk: fn() -> T) -> T {
if !do_it { ret thunk(); }
let start = std::time::precise_time_s();
let rv = thunk();
let end = std::time::precise_time_s();
log_err #fmt("time: %s took %s s", what,
common::float_to_str(end - start, 3u));
log_err #fmt["time: %s took %s s", what,
common::float_to_str(end - start, 3u)];
ret rv;
}
@ -143,7 +141,7 @@ fn compile_input(sess: session::session, cfg: ast::crate_cfg, input: str,
bind middle::ast_map::map_crate(*crate));
time(time_passes, "external crate/lib resolution",
bind creader::read_crates(sess, *crate));
let {def_map, ext_map} =
let {def_map: def_map, ext_map: ext_map} =
time(time_passes, "resolution",
bind resolve::resolve_crate(sess, ast_map, crate));
let freevars =
@ -151,9 +149,9 @@ fn compile_input(sess: session::session, cfg: ast::crate_cfg, input: str,
bind freevars::annotate_freevars(sess, def_map, crate));
let ty_cx = ty::mk_ctxt(sess, def_map, ext_map, ast_map, freevars);
time::<()>(time_passes, "typechecking",
bind typeck::check_crate(ty_cx, crate));
bind typeck::check_crate(ty_cx, crate));
time::<()>(time_passes, "alt checking",
bind middle::check_alt::check_crate(ty_cx, crate));
bind middle::check_alt::check_crate(ty_cx, crate));
if sess.get_opts().run_typestate {
time(time_passes, "typestate checking",
bind middle::tstate::ck::check_crate(ty_cx, crate));
@ -161,15 +159,15 @@ fn compile_input(sess: session::session, cfg: ast::crate_cfg, input: str,
time(time_passes, "alias checking",
bind middle::alias::check_crate(ty_cx, crate));
time::<()>(time_passes, "kind checking",
bind kind::check_crate(ty_cx, crate));
bind kind::check_crate(ty_cx, crate));
if sess.get_opts().no_trans { ret; }
let llmod =
time::<llvm::llvm::ModuleRef>(time_passes, "translation",
bind trans::trans_crate(sess, crate,
ty_cx, output,
ast_map));
bind trans::trans_crate(sess, crate,
ty_cx, output,
ast_map));
time::<()>(time_passes, "LLVM passes",
bind link::write::run_passes(sess, llmod, output));
bind link::write::run_passes(sess, llmod, output));
}
fn pretty_print_input(sess: session::session, cfg: ast::crate_cfg, input: str,
@ -222,7 +220,8 @@ fn pretty_print_input(sess: session::session, cfg: ast::crate_cfg, input: str,
alt ppm {
ppm_typed. {
let amap = middle::ast_map::map_crate(*crate);
let {def_map, ext_map} = resolve::resolve_crate(sess, amap, crate);
let {def_map: def_map, ext_map: ext_map} =
resolve::resolve_crate(sess, amap, crate);
let freevars = freevars::annotate_freevars(sess, def_map, crate);
let ty_cx = ty::mk_ctxt(sess, def_map, ext_map, amap, freevars);
typeck::check_crate(ty_cx, crate);
@ -239,14 +238,14 @@ fn pretty_print_input(sess: session::session, cfg: ast::crate_cfg, input: str,
fn version(argv0: str) {
let vers = "unknown version";
let env_vers = #env("CFG_VERSION");
let env_vers = #env["CFG_VERSION"];
if str::byte_len(env_vers) != 0u { vers = env_vers; }
io::stdout().write_str(#fmt("%s %s\n", argv0, vers));
io::stdout().write_str(#fmt["%s %s\n", argv0, vers]);
}
fn usage(argv0: str) {
io::stdout().write_str(#fmt("usage: %s [options] <input>\n", argv0) +
"
io::stdout().write_str(#fmt["usage: %s [options] <input>\n", argv0] +
"
options:
-h --help display this message
@ -287,9 +286,9 @@ fn get_os(triple: str) -> session::os {
ret if str::find(triple, "win32") >= 0 ||
str::find(triple, "mingw32") >= 0 {
session::os_win32
} else if (str::find(triple, "darwin") >= 0) {
} else if str::find(triple, "darwin") >= 0 {
session::os_macos
} else if (str::find(triple, "linux") >= 0) {
} else if str::find(triple, "linux") >= 0 {
session::os_linux
} else { log_err "Unknown operating system!"; fail };
}
@ -300,10 +299,10 @@ fn get_arch(triple: str) -> session::arch {
str::find(triple, "i686") >= 0 ||
str::find(triple, "i786") >= 0 {
session::arch_x86
} else if (str::find(triple, "x86_64") >= 0) {
} else if str::find(triple, "x86_64") >= 0 {
session::arch_x64
} else if (str::find(triple, "arm") >= 0 ||
str::find(triple, "xscale") >= 0) {
} else if str::find(triple, "arm") >= 0 ||
str::find(triple, "xscale") >= 0 {
session::arch_arm
} else { log_err "Unknown architecture! " + triple; fail };
}
@ -331,9 +330,9 @@ fn build_session_options(binary: str, match: getopts::match, binary_dir: str)
let library = opt_present(match, "lib");
let static = opt_present(match, "static");
let library_search_paths = ~[binary_dir + "/lib"];
let library_search_paths = [binary_dir + "/lib"];
let lsp_vec = getopts::opt_strs(match, "L");
for lsp: str in lsp_vec { library_search_paths += ~[lsp]; }
for lsp: str in lsp_vec { library_search_paths += [lsp]; }
let parse_only = opt_present(match, "parse-only");
let no_trans = opt_present(match, "no-trans");
@ -341,11 +340,11 @@ fn build_session_options(binary: str, match: getopts::match, binary_dir: str)
let output_type =
if parse_only || no_trans {
link::output_type_none
} else if (opt_present(match, "S")) {
} else if opt_present(match, "S") {
link::output_type_assembly
} else if (opt_present(match, "c")) {
} else if opt_present(match, "c") {
link::output_type_object
} else if (opt_present(match, "emit-llvm")) {
} else if opt_present(match, "emit-llvm") {
link::output_type_bitcode
} else { link::output_type_exe };
let verify = !opt_present(match, "noverify");
@ -363,7 +362,7 @@ fn build_session_options(binary: str, match: getopts::match, binary_dir: str)
fail;
}
2u
} else if (opt_present(match, "OptLevel")) {
} else if opt_present(match, "OptLevel") {
alt getopts::opt_str(match, "OptLevel") {
"0" { 0u }
"1" { 1u }
@ -417,24 +416,23 @@ fn build_session(sopts: @session::options) -> session::session {
fn parse_pretty(sess: session::session, name: &str) -> pp_mode {
if str::eq(name, "normal") {
ret ppm_normal;
} else if (str::eq(name, "typed")) {
} else if str::eq(name, "typed") {
ret ppm_typed;
} else if (str::eq(name, "identified")) { ret ppm_identified; }
} else if str::eq(name, "identified") { ret ppm_identified; }
sess.fatal("argument to `pretty` or `expand` must be one of `normal`, " +
"`typed`, or `identified`");
}
fn opts() -> [getopts::opt] {
ret ~[optflag("h"), optflag("help"), optflag("v"), optflag("version"),
optflag("glue"), optflag("emit-llvm"), optflagopt("pretty"),
optflagopt("expand"), optflag("ls"), optflag("parse-only"),
optflag("no-trans"),
optflag("O"), optopt("OptLevel"), optmulti("L"), optflag("S"),
optflag("c"), optopt("o"), optflag("g"), optflag("save-temps"),
optopt("sysroot"), optflag("stats"), optflag("time-passes"),
optflag("time-llvm-passes"), optflag("no-typestate"),
optflag("noverify"), optmulti("cfg"), optflag("test"),
optflag("lib"), optflag("static"), optflag("gc")];
ret [optflag("h"), optflag("help"), optflag("v"), optflag("version"),
optflag("glue"), optflag("emit-llvm"), optflagopt("pretty"),
optflagopt("expand"), optflag("ls"), optflag("parse-only"),
optflag("no-trans"), optflag("O"), optopt("OptLevel"), optmulti("L"),
optflag("S"), optflag("c"), optopt("o"), optflag("g"),
optflag("save-temps"), optopt("sysroot"), optflag("stats"),
optflag("time-passes"), optflag("time-llvm-passes"),
optflag("no-typestate"), optflag("noverify"), optmulti("cfg"),
optflag("test"), optflag("lib"), optflag("static"), optflag("gc")];
}
fn main(args: [str]) {
@ -444,7 +442,7 @@ fn main(args: [str]) {
alt getopts::getopts(args, opts()) {
getopts::success(m) { m }
getopts::failure(f) {
log_err #fmt("error: %s", getopts::fail_str(f));
log_err #fmt["error: %s", getopts::fail_str(f)];
fail
}
};
@ -471,10 +469,10 @@ fn main(args: [str]) {
}
if n_inputs == 0u {
sess.fatal("No input filename given.");
} else if (n_inputs > 1u) {
} else if n_inputs > 1u {
sess.fatal("Multiple input filenames provided.");
}
let ifile = match.free.(0);
let ifile = match.free[0];
let saved_out_filename: str = "";
let cfg = build_configuration(sess, binary, ifile);
let expand =
@ -502,10 +500,7 @@ fn main(args: [str]) {
none::<pp_mode>. {/* continue */ }
}
let ls = opt_present(match, "ls");
if ls {
metadata::creader::list_file_metadata(ifile, io::stdout());
ret;
}
if ls { metadata::creader::list_file_metadata(ifile, io::stdout()); ret; }
let stop_after_codegen =
sopts.output_type != link::output_type_exe ||
@ -516,29 +511,31 @@ fn main(args: [str]) {
// "-" as input file will cause the parser to read from stdin so we
// have to make up a name
// We want to toss everything after the final '.'
let parts = if !input_is_stdin(ifile) {
str::split(ifile, '.' as u8)
} else {
~["default", "rs"]
};
let parts =
if !input_is_stdin(ifile) {
str::split(ifile, '.' as u8)
} else { ["default", "rs"] };
vec::pop(parts);
saved_out_filename = str::connect(parts, ".");
let suffix = alt sopts.output_type {
link::output_type_none. { "none" }
link::output_type_bitcode. { "bc" }
link::output_type_assembly. { "s" }
// Object and exe output both use the '.o' extension here
link::output_type_object. | link::output_type_exe. { "o" }
};
let suffix =
alt sopts.output_type {
link::output_type_none. { "none" }
link::output_type_bitcode. { "bc" }
link::output_type_assembly. { "s" }
// Object and exe output both use the '.o' extension here
link::output_type_object. | link::output_type_exe. {
"o"
}
};
let ofile = saved_out_filename + "." + suffix;
compile_input(sess, cfg, ifile, ofile);
}
some(ofile) {
// FIXME: what about windows? This will create a foo.exe.o.
saved_out_filename = ofile;
let temp_filename = if !stop_after_codegen {
ofile + ".o"
} else { ofile };
let temp_filename =
if !stop_after_codegen { ofile + ".o" } else { ofile };
compile_input(sess, cfg, ifile, temp_filename);
}
}
@ -556,8 +553,8 @@ fn main(args: [str]) {
// The invocations of gcc share some flags across platforms
let gcc_args =
~[stage, "-Lrt", "-lrustrt", glu, "-m32", "-o", saved_out_filename,
saved_out_filename + ".o"];
[stage, "-Lrt", "-lrustrt", glu, "-m32", "-o", saved_out_filename,
saved_out_filename + ".o"];
let lib_cmd;
let os = sess.get_targ_cfg().os;
@ -591,46 +588,45 @@ fn main(args: [str]) {
let cstore = sess.get_cstore();
for cratepath: str in cstore::get_used_crate_files(cstore) {
if str::ends_with(cratepath, ".rlib") {
gcc_args += ~[cratepath];
gcc_args += [cratepath];
cont;
}
let dir = fs::dirname(cratepath);
if dir != "" { gcc_args += ~["-L" + dir]; }
if dir != "" { gcc_args += ["-L" + dir]; }
let libarg = unlib(sess.get_targ_cfg(), fs::basename(cratepath));
gcc_args += ~["-l" + libarg];
gcc_args += ["-l" + libarg];
}
let ula = cstore::get_used_link_args(cstore);
for arg: str in ula { gcc_args += ~[arg]; }
for arg: str in ula { gcc_args += [arg]; }
let used_libs = cstore::get_used_libraries(cstore);
for l: str in used_libs { gcc_args += ~["-l" + l]; }
for l: str in used_libs { gcc_args += ["-l" + l]; }
if sopts.library {
gcc_args += ~[lib_cmd];
gcc_args += [lib_cmd];
} else {
// FIXME: why do we hardcode -lm?
gcc_args += ~["-lm", main];
gcc_args += ["-lm", main];
}
// We run 'gcc' here
let err_code = run::run_program(prog, gcc_args);
if 0 != err_code {
sess.err(#fmt("linking with gcc failed with code %d", err_code));
sess.note(#fmt("gcc arguments: %s",
str::connect(gcc_args, " ")));
sess.err(#fmt["linking with gcc failed with code %d", err_code]);
sess.note(#fmt["gcc arguments: %s", str::connect(gcc_args, " ")]);
sess.abort_if_errors();
}
// Clean up on Darwin
if sess.get_targ_cfg().os == session::os_macos {
run::run_program("dsymutil", ~[saved_out_filename]);
run::run_program("dsymutil", [saved_out_filename]);
}
// Remove the temporary object file if we aren't saving temps
if !sopts.save_temps {
run::run_program("rm", ~[saved_out_filename + ".o"]);
run::run_program("rm", [saved_out_filename + ".o"]);
}
}
@ -641,7 +637,7 @@ mod test {
#[test]
fn test_switch_implies_cfg_test() {
let match =
alt getopts::getopts(~["--test"], opts()) {
alt getopts::getopts(["--test"], opts()) {
getopts::success(m) { m }
};
let sessopts = build_session_options("whatever", match, "whatever");
@ -655,7 +651,7 @@ mod test {
#[test]
fn test_switch_implies_cfg_test_unless_cfg_test() {
let match =
alt getopts::getopts(~["--test", "--cfg=test"], opts()) {
alt getopts::getopts(["--test", "--cfg=test"], opts()) {
getopts::success(m) { m }
};
let sessopts = build_session_options("whatever", match, "whatever");

View file

@ -43,8 +43,7 @@ type options =
test: bool,
parse_only: bool,
no_trans: bool,
do_gc: bool
};
do_gc: bool};
type crate_metadata = {name: str, data: [u8]};
@ -90,10 +89,10 @@ obj session(targ_cfg: @config,
}
fn note(msg: str) { codemap::emit_note(none, msg, parse_sess.cm); }
fn span_bug(sp: span, msg: str) -> ! {
self.span_fatal(sp, #fmt("internal compiler error %s", msg));
self.span_fatal(sp, #fmt["internal compiler error %s", msg]);
}
fn bug(msg: str) -> ! {
self.fatal(#fmt("internal compiler error %s", msg));
self.fatal(#fmt["internal compiler error %s", msg]);
}
fn span_unimpl(sp: span, msg: str) -> ! {
self.span_bug(sp, "unimplemented " + msg);

View file

@ -30,7 +30,7 @@ export mk_attr;
// From a list of crate attributes get only the meta_items that impact crate
// linkage
fn find_linkage_metas(attrs: &[ast::attribute]) -> [@ast::meta_item] {
let metas: [@ast::meta_item] = ~[];
let metas: [@ast::meta_item] = [];
for attr: ast::attribute in find_attrs_by_name(attrs, "link") {
alt attr.node.value.node {
ast::meta_list(_, items) { metas += items; }
@ -95,8 +95,8 @@ fn attr_meta(attr: &ast::attribute) -> @ast::meta_item { @attr.node.value }
// Get the meta_items from inside a vector of attributes
fn attr_metas(attrs: &[ast::attribute]) -> [@ast::meta_item] {
let mitems = ~[];
for a: ast::attribute in attrs { mitems += ~[attr_meta(a)]; }
let mitems = [];
for a: ast::attribute in attrs { mitems += [attr_meta(a)]; }
ret mitems;
}
@ -122,11 +122,11 @@ fn eq(a: @ast::meta_item, b: @ast::meta_item) -> bool {
}
fn contains(haystack: &[@ast::meta_item], needle: @ast::meta_item) -> bool {
log #fmt("looking for %s",
syntax::print::pprust::meta_item_to_str(*needle));
log #fmt["looking for %s",
syntax::print::pprust::meta_item_to_str(*needle)];
for item: @ast::meta_item in haystack {
log #fmt("looking in %s",
syntax::print::pprust::meta_item_to_str(*item));
log #fmt["looking in %s",
syntax::print::pprust::meta_item_to_str(*item)];
if eq(item, needle) { log "found it!"; ret true; }
}
log "found it not :(";
@ -152,13 +152,13 @@ fn sort_meta_items(items: &[@ast::meta_item]) -> [@ast::meta_item] {
}
// This is sort of stupid here, converting to a vec of mutables and back
let v: [mutable @ast::meta_item] = ~[mutable];
for mi: @ast::meta_item in items { v += ~[mutable mi]; }
let v: [mutable @ast::meta_item] = [mutable];
for mi: @ast::meta_item in items { v += [mutable mi]; }
std::sort::quick_sort(lteq, v);
let v2: [@ast::meta_item] = ~[];
for mi: @ast::meta_item in v { v2 += ~[mi]; }
let v2: [@ast::meta_item] = [];
for mi: @ast::meta_item in v { v2 += [mi]; }
ret v2;
}
@ -176,14 +176,13 @@ fn remove_meta_items_by_name(items: &[@ast::meta_item], name: str) ->
ret vec::filter_map(filter, items);
}
fn require_unique_names(sess: &session::session,
metas: &[@ast::meta_item]) {
fn require_unique_names(sess: &session::session, metas: &[@ast::meta_item]) {
let map = map::mk_hashmap::<str, ()>(str::hash, str::eq);
for meta: @ast::meta_item in metas {
let name = get_meta_item_name(meta);
if map.contains_key(name) {
sess.span_fatal(meta.span,
#fmt("duplicate meta item `%s`", name));
#fmt["duplicate meta item `%s`", name]);
}
map.insert(name, ());
}

View file

@ -115,7 +115,7 @@ fn in_cfg(cfg: &ast::crate_cfg, attrs: &[ast::attribute]) -> bool {
}
}
let cfg_metas = attr::attr_metas(item_cfg_attrs);
vec::foldl(extract_metas, ~[], cfg_metas)
vec::foldl(extract_metas, [], cfg_metas)
};
for cfg_mi: @ast::meta_item in item_cfg_metas {

View file

@ -9,7 +9,7 @@ import front::attr;
export modify_for_testing;
type node_id_gen = @fn() -> ast::node_id ;
type node_id_gen = @fn() -> ast::node_id;
type test = {path: [ast::ident], ignore: bool};
@ -36,8 +36,8 @@ fn modify_for_testing(crate: @ast::crate) -> @ast::crate {
let cx: test_ctxt =
@{next_node_id: next_node_id_fn,
mutable path: ~[],
mutable testfns: ~[]};
mutable path: [],
mutable testfns: []};
let precursor =
{fold_crate: bind fold_crate(cx, _, _),
@ -51,8 +51,8 @@ fn modify_for_testing(crate: @ast::crate) -> @ast::crate {
ret res;
}
fn fold_mod(_cx: &test_ctxt, m: &ast::_mod, fld: fold::ast_fold)
-> ast::_mod {
fn fold_mod(_cx: &test_ctxt, m: &ast::_mod, fld: fold::ast_fold) ->
ast::_mod {
// Remove any defined main function from the AST so it doesn't clash with
// the one we're going to add. FIXME: This is sloppy. Instead we should
@ -87,14 +87,14 @@ fn fold_crate(cx: &test_ctxt, c: &ast::crate_, fld: fold::ast_fold) ->
fn fold_item(cx: &test_ctxt, i: &@ast::item, fld: fold::ast_fold) ->
@ast::item {
cx.path += ~[i.ident];
log #fmt("current path: %s", ast::path_name_i(cx.path));
cx.path += [i.ident];
log #fmt["current path: %s", ast::path_name_i(cx.path)];
if is_test_fn(i) {
log "this is a test function";
let test = {path: cx.path, ignore: is_ignored(i)};
cx.testfns += ~[test];
log #fmt("have %u test functions", vec::len(cx.testfns));
cx.testfns += [test];
log #fmt["have %u test functions", vec::len(cx.testfns)];
}
let res = fold::noop_fold_item(i, fld);
@ -127,7 +127,7 @@ fn is_ignored(i: &@ast::item) -> bool {
fn add_test_module(cx: &test_ctxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ret {items: m.items + ~[testmod] with m};
ret {items: m.items + [testmod] with m};
}
/*
@ -154,16 +154,16 @@ fn mk_test_module(cx: &test_ctxt) -> @ast::item {
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = mk_main(cx);
let testmod: ast::_mod = {view_items: ~[], items: ~[mainfn, testsfn]};
let testmod: ast::_mod = {view_items: [], items: [mainfn, testsfn]};
let item_ = ast::item_mod(testmod);
let item: ast::item =
{ident: "__test",
attrs: ~[],
attrs: [],
id: cx.next_node_id(),
node: item_,
span: ast::dummy_sp()};
log #fmt("Synthetic test module:\n%s\n", pprust::item_to_str(@item));
log #fmt["Synthetic test module:\n%s\n", pprust::item_to_str(@item)];
ret @item;
}
@ -176,27 +176,27 @@ fn mk_tests(cx: &test_ctxt) -> @ast::item {
let ret_ty = mk_test_desc_vec_ty(cx);
let decl: ast::fn_decl =
{inputs: ~[],
{inputs: [],
output: ret_ty,
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return,
constraints: ~[]};
constraints: []};
let proto = ast::proto_fn;
// The vector of test_descs for this crate
let test_descs = mk_test_desc_vec(cx);
let body_: ast::blk_ =
{stmts: ~[], expr: option::some(test_descs), id: cx.next_node_id()};
{stmts: [], expr: option::some(test_descs), id: cx.next_node_id()};
let body = nospan(body_);
let fn_ = {decl: decl, proto: proto, body: body};
let item_ = ast::item_fn(fn_, ~[]);
let item_ = ast::item_fn(fn_, []);
let item: ast::item =
{ident: "tests",
attrs: ~[],
attrs: [],
id: cx.next_node_id(),
node: item_,
span: ast::dummy_sp()};
@ -205,10 +205,10 @@ fn mk_tests(cx: &test_ctxt) -> @ast::item {
fn empty_fn_ty() -> ast::ty {
let proto = ast::proto_fn;
let input_ty = ~[];
let input_ty = [];
let ret_ty = @nospan(ast::ty_nil);
let cf = ast::return;
let constrs = ~[];
let constrs = [];
ret nospan(ast::ty_fn(proto, input_ty, ret_ty, cf, constrs));
}
@ -216,8 +216,8 @@ fn empty_fn_ty() -> ast::ty {
fn mk_test_desc_vec_ty(cx: &test_ctxt) -> @ast::ty {
let test_desc_ty_path: ast::path =
nospan({global: false,
idents: ~["std", "test", "test_desc"],
types: ~[]});
idents: ["std", "test", "test_desc"],
types: []});
let test_desc_ty: ast::ty =
nospan(ast::ty_path(test_desc_ty_path, cx.next_node_id()));
@ -228,11 +228,11 @@ fn mk_test_desc_vec_ty(cx: &test_ctxt) -> @ast::ty {
}
fn mk_test_desc_vec(cx: &test_ctxt) -> @ast::expr {
log #fmt("building test vector from %u tests", vec::len(cx.testfns));
let descs = ~[];
log #fmt["building test vector from %u tests", vec::len(cx.testfns)];
let descs = [];
for test: test in cx.testfns {
let test_ = test; // Satisfy alias analysis
descs += ~[mk_test_desc_rec(cx, test_)];
descs += [mk_test_desc_rec(cx, test_)];
}
ret @{id: cx.next_node_id(),
@ -243,7 +243,7 @@ fn mk_test_desc_vec(cx: &test_ctxt) -> @ast::expr {
fn mk_test_desc_rec(cx: &test_ctxt, test: test) -> @ast::expr {
let path = test.path;
log #fmt("encoding %s", ast::path_name_i(path));
log #fmt["encoding %s", ast::path_name_i(path)];
let name_lit: ast::lit =
nospan(ast::lit_str(ast::path_name_i(path), ast::sk_rc));
@ -255,8 +255,7 @@ fn mk_test_desc_rec(cx: &test_ctxt, test: test) -> @ast::expr {
let name_field: ast::field =
nospan({mut: ast::imm, ident: "name", expr: @name_expr});
let fn_path: ast::path =
nospan({global: false, idents: path, types: ~[]});
let fn_path: ast::path = nospan({global: false, idents: path, types: []});
let fn_expr: ast::expr =
{id: cx.next_node_id(),
@ -277,7 +276,7 @@ fn mk_test_desc_rec(cx: &test_ctxt, test: test) -> @ast::expr {
nospan({mut: ast::imm, ident: "ignore", expr: @ignore_expr});
let desc_rec_: ast::expr_ =
ast::expr_rec(~[name_field, fn_field, ignore_field], option::none);
ast::expr_rec([name_field, fn_field, ignore_field], option::none);
let desc_rec: ast::expr =
{id: cx.next_node_id(), node: desc_rec_, span: ast::dummy_sp()};
ret @desc_rec;
@ -294,28 +293,28 @@ fn mk_main(cx: &test_ctxt) -> @ast::item {
let ret_ty = nospan(ast::ty_nil);
let decl: ast::fn_decl =
{inputs: ~[args_arg],
{inputs: [args_arg],
output: @ret_ty,
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return,
constraints: ~[]};
constraints: []};
let proto = ast::proto_fn;
let test_main_call_expr = mk_test_main_call(cx);
let body_: ast::blk_ =
{stmts: ~[],
{stmts: [],
expr: option::some(test_main_call_expr),
id: cx.next_node_id()};
let body = {node: body_, span: ast::dummy_sp()};
let fn_ = {decl: decl, proto: proto, body: body};
let item_ = ast::item_fn(fn_, ~[]);
let item_ = ast::item_fn(fn_, []);
let item: ast::item =
{ident: "main",
attrs: ~[],
attrs: [],
id: cx.next_node_id(),
node: item_,
span: ast::dummy_sp()};
@ -326,38 +325,32 @@ fn mk_test_main_call(cx: &test_ctxt) -> @ast::expr {
// Get the args passed to main so we can pass the to test_main
let args_path: ast::path =
nospan({global: false, idents: ~["args"], types: ~[]});
nospan({global: false, idents: ["args"], types: []});
let args_path_expr_: ast::expr_ = ast::expr_path(args_path);
let args_path_expr: ast::expr =
{id: cx.next_node_id(),
node: args_path_expr_,
span: ast::dummy_sp()};
{id: cx.next_node_id(), node: args_path_expr_, span: ast::dummy_sp()};
// Call __test::test to generate the vector of test_descs
let test_path: ast::path =
nospan({global: false, idents: ~["tests"], types: ~[]});
nospan({global: false, idents: ["tests"], types: []});
let test_path_expr_: ast::expr_ = ast::expr_path(test_path);
let test_path_expr: ast::expr =
{id: cx.next_node_id(),
node: test_path_expr_,
span: ast::dummy_sp()};
{id: cx.next_node_id(), node: test_path_expr_, span: ast::dummy_sp()};
let test_call_expr_: ast::expr_ = ast::expr_call(@test_path_expr, ~[]);
let test_call_expr_: ast::expr_ = ast::expr_call(@test_path_expr, []);
let test_call_expr: ast::expr =
{id: cx.next_node_id(),
node: test_call_expr_,
span: ast::dummy_sp()};
{id: cx.next_node_id(), node: test_call_expr_, span: ast::dummy_sp()};
// Call std::test::test_main
let test_main_path: ast::path =
nospan({global: false,
idents: ~["std", "test", "test_main"],
types: ~[]});
idents: ["std", "test", "test_main"],
types: []});
let test_main_path_expr_: ast::expr_ = ast::expr_path(test_main_path);
@ -368,7 +361,7 @@ fn mk_test_main_call(cx: &test_ctxt) -> @ast::expr {
let test_main_call_expr_: ast::expr_ =
ast::expr_call(@test_main_path_expr,
~[@args_path_expr, @test_call_expr]);
[@args_path_expr, @test_call_expr]);
let test_main_call_expr: ast::expr =
{id: cx.next_node_id(),

View file

@ -81,20 +81,20 @@ const LLVMOptimizeForSizeAttribute: uint = 8192u;
const LLVMStackProtectAttribute: uint = 16384u;
const LLVMStackProtectReqAttribute: uint = 32768u;
const LLVMAlignmentAttribute: uint = 2031616u;
// 31 << 16
// 31 << 16
const LLVMNoCaptureAttribute: uint = 2097152u;
const LLVMNoRedZoneAttribute: uint = 4194304u;
const LLVMNoImplicitFloatAttribute: uint = 8388608u;
const LLVMNakedAttribute: uint = 16777216u;
const LLVMInlineHintAttribute: uint = 33554432u;
const LLVMStackAttribute: uint = 469762048u;
// 7 << 26
// 7 << 26
const LLVMUWTableAttribute: uint = 1073741824u;
// 1 << 30
// 1 << 30
// Consts for the LLVM IntPredicate type, pre-cast to uint.
// FIXME: as above.
// Consts for the LLVM IntPredicate type, pre-cast to uint.
// FIXME: as above.
const LLVMIntEQ: uint = 32u;
@ -276,9 +276,9 @@ native "cdecl" mod llvm = "rustllvm" {
/* Operations on constants of any type */
fn LLVMConstNull(Ty: TypeRef) -> ValueRef;
/* all zeroes */
/* all zeroes */
fn LLVMConstAllOnes(Ty: TypeRef) -> ValueRef;
/* only for int/vector */
/* only for int/vector */
fn LLVMGetUndef(Ty: TypeRef) -> ValueRef;
fn LLVMIsConstant(Val: ValueRef) -> Bool;
fn LLVMIsNull(Val: ValueRef) -> Bool;
@ -809,19 +809,19 @@ native "cdecl" mod llvm = "rustllvm" {
Value: Bool);
fn LLVMPassManagerBuilderSetDisableUnrollLoops(PMB: PassManagerBuilderRef,
Value: Bool);
fn LLVMPassManagerBuilderSetDisableSimplifyLibCalls(PMB:
PassManagerBuilderRef,
Value: Bool);
fn LLVMPassManagerBuilderUseInlinerWithThreshold(PMB:
PassManagerBuilderRef,
threshold: uint);
fn LLVMPassManagerBuilderPopulateModulePassManager(PMB:
PassManagerBuilderRef,
PM: PassManagerRef);
fn LLVMPassManagerBuilderSetDisableSimplifyLibCalls(
PMB: PassManagerBuilderRef,
Value: Bool);
fn LLVMPassManagerBuilderUseInlinerWithThreshold(
PMB: PassManagerBuilderRef,
threshold: uint);
fn LLVMPassManagerBuilderPopulateModulePassManager(
PMB: PassManagerBuilderRef,
PM: PassManagerRef);
fn LLVMPassManagerBuilderPopulateFunctionPassManager(PMB:
PassManagerBuilderRef,
PM: PassManagerRef);
fn LLVMPassManagerBuilderPopulateFunctionPassManager(
PMB: PassManagerBuilderRef,
PM: PassManagerRef);
/** Destroys a memory buffer. */
fn LLVMDisposeMemoryBuffer(MemBuf: MemoryBufferRef);
@ -905,68 +905,68 @@ native "cdecl" mod llvm = "rustllvm" {
* it's attached to.
*/
resource BuilderRef_res(B: BuilderRef) {
llvm::LLVMDisposeBuilder(B);
}
resource BuilderRef_res(B: BuilderRef) { llvm::LLVMDisposeBuilder(B); }
obj builder(B: BuilderRef,
terminated: @mutable bool,
obj builder(B: BuilderRef, terminated: @mutable bool,
// Stored twice so that we don't have to constantly deref
res: @BuilderRef_res) {
/* Terminators */
fn RetVoid() -> ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildRetVoid(B);
}
fn Ret(V: ValueRef) -> ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildRet(B, V);
}
fn AggregateRet(RetVals: &[ValueRef]) -> ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildAggregateRet(B, vec::to_ptr(RetVals),
vec::len(RetVals));
}
fn Br(Dest: BasicBlockRef) -> ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildBr(B, Dest);
}
fn CondBr(If: ValueRef, Then: BasicBlockRef, Else: BasicBlockRef) ->
ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildCondBr(B, If, Then, Else);
}
fn Switch(V: ValueRef, Else: BasicBlockRef, NumCases: uint) -> ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildSwitch(B, V, Else, NumCases);
}
fn IndirectBr(Addr: ValueRef, NumDests: uint) -> ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildIndirectBr(B, Addr, NumDests);
}
fn Invoke(Fn: ValueRef, Args: &[ValueRef], Then: BasicBlockRef,
Catch: BasicBlockRef) -> ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildInvoke(B, Fn, vec::to_ptr(Args), vec::len(Args),
Then, Catch, str::buf(""));
}
fn Unreachable() -> ValueRef {
assert (!*terminated);
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildUnreachable(B);
}
@ -1402,14 +1402,12 @@ obj builder(B: BuilderRef, terminated: @mutable bool,
let T: ValueRef =
llvm::LLVMGetNamedFunction(M, str::buf("llvm.trap"));
assert (T as int != 0);
let Args: [ValueRef] = ~[];
let Args: [ValueRef] = [];
ret llvm::LLVMBuildCall(B, T, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
}
fn is_terminated() -> bool {
ret *terminated;
}
fn is_terminated() -> bool { ret *terminated; }
}
fn new_builder(llbb: BasicBlockRef) -> builder {
@ -1454,7 +1452,7 @@ fn mk_type_names() -> type_names {
}
fn type_to_str(names: type_names, ty: TypeRef) -> str {
ret type_to_str_inner(names, ~[], ty);
ret type_to_str_inner(names, [], ty);
}
fn type_to_str_inner(names: type_names, outer0: &[TypeRef], ty: TypeRef) ->
@ -1462,7 +1460,7 @@ fn type_to_str_inner(names: type_names, outer0: &[TypeRef], ty: TypeRef) ->
if names.type_has_name(ty) { ret names.get_name(ty); }
let outer = outer0 + ~[ty];
let outer = outer0 + [ty];
let kind: int = llvm::LLVMGetTypeKind(ty);
@ -1480,6 +1478,7 @@ fn type_to_str_inner(names: type_names, outer0: &[TypeRef], ty: TypeRef) ->
alt kind {
// FIXME: more enum-as-int constants determined from Core::h;
// horrible, horrible. Complete as needed.
@ -1494,11 +1493,13 @@ fn type_to_str_inner(names: type_names, outer0: &[TypeRef], ty: TypeRef) ->
6 { ret "Label"; }
7 {
ret "i" + std::int::str(llvm::LLVMGetIntTypeWidth(ty) as int);
}
8 {
let s = "fn(";
let out_ty: TypeRef = llvm::LLVMGetReturnType(ty);
@ -1512,6 +1513,7 @@ fn type_to_str_inner(names: type_names, outer0: &[TypeRef], ty: TypeRef) ->
}
9 {
let s: str = "{";
let n_elts: uint = llvm::LLVMCountStructElementTypes(ty);
@ -1523,12 +1525,14 @@ fn type_to_str_inner(names: type_names, outer0: &[TypeRef], ty: TypeRef) ->
}
10 {
let el_ty = llvm::LLVMGetElementType(ty);
ret "[" + type_to_str_inner(names, outer, el_ty) + "]";
}
11 {
let i: uint = 0u;
for tout: TypeRef in outer0 {
@ -1543,12 +1547,13 @@ fn type_to_str_inner(names: type_names, outer0: &[TypeRef], ty: TypeRef) ->
}
12 {
ret "Opaque";
}
13 { ret "Vector"; }
14 { ret "Metadata"; }
_ { log_err #fmt("unknown TypeKind %d", kind as int); fail; }
_ { log_err #fmt["unknown TypeKind %d", kind as int]; fail; }
}
}

View file

@ -67,7 +67,7 @@ fn visit_item(e: env, i: &@ast::item) {
}
let cstore = e.sess.get_cstore();
if !cstore::add_used_library(cstore, m.native_name) { ret; }
for a: ast::attribute in
for a: ast::attribute in
attr::find_attrs_by_name(i.attrs, "link_args") {
alt attr::get_meta_item_value_str(attr::attr_meta(a)) {
some(linkarg) { cstore::add_used_link_args(cstore, linkarg); }
@ -93,12 +93,12 @@ fn metadata_matches(crate_data: &@[u8], metas: &[@ast::meta_item]) -> bool {
let attrs = decoder::get_crate_attributes(crate_data);
let linkage_metas = attr::find_linkage_metas(attrs);
log #fmt("matching %u metadata requirements against %u items",
vec::len(metas), vec::len(linkage_metas));
log #fmt["matching %u metadata requirements against %u items",
vec::len(metas), vec::len(linkage_metas)];
for needed: @ast::meta_item in metas {
if !attr::contains(linkage_metas, needed) {
log #fmt("missing %s", pprust::meta_item_to_str(*needed));
log #fmt["missing %s", pprust::meta_item_to_str(*needed)];
ret false;
}
}
@ -116,26 +116,26 @@ fn default_native_lib_naming(sess: session::session, static: bool) ->
}
fn find_library_crate(sess: &session::session, ident: &ast::ident,
metas: &[@ast::meta_item],
library_search_paths: &[str]) ->
option::t<{ident: str, data: @[u8]}> {
metas: &[@ast::meta_item], library_search_paths: &[str])
-> option::t<{ident: str, data: @[u8]}> {
attr::require_unique_names(sess, metas);
// FIXME: Probably want a warning here since the user
// is using the wrong type of meta item
let crate_name = {
let name_items = attr::find_meta_items_by_name(metas, "name");
alt vec::last(name_items) {
some(i) {
alt attr::get_meta_item_value_str(i) {
some(n) { n }
_ { ident }
let crate_name =
{
let name_items = attr::find_meta_items_by_name(metas, "name");
alt vec::last(name_items) {
some(i) {
alt attr::get_meta_item_value_str(i) {
some(n) { n }
_ { ident }
}
}
none. { ident }
}
}
none. { ident }
}
};
};
let nn = default_native_lib_naming(sess, sess.get_opts().static);
let x =
@ -157,23 +157,23 @@ fn find_library_crate_aux(nn: &{prefix: str, suffix: str}, crate_name: str,
// manually filtering fs::list_dir here.
for library_search_path: str in library_search_paths {
log #fmt("searching %s", library_search_path);
log #fmt["searching %s", library_search_path];
for path: str in fs::list_dir(library_search_path) {
log #fmt("searching %s", path);
log #fmt["searching %s", path];
let f: str = fs::basename(path);
if !(str::starts_with(f, prefix) && str::ends_with(f, nn.suffix))
{
log #fmt("skipping %s, doesn't look like %s*%s", path, prefix,
nn.suffix);
log #fmt["skipping %s, doesn't look like %s*%s", path, prefix,
nn.suffix];
cont;
}
alt get_metadata_section(path) {
option::some(cvec) {
if !metadata_matches(cvec, metas) {
log #fmt("skipping %s, metadata doesn't match", path);
log #fmt["skipping %s, metadata doesn't match", path];
cont;
}
log #fmt("found %s with matching metadata", path);
log #fmt["found %s with matching metadata", path];
ret some({ident: path, data: cvec});
}
_ { }
@ -204,15 +204,14 @@ fn get_metadata_section(filename: str) -> option::t<@[u8]> {
}
fn load_library_crate(sess: &session::session, span: span, ident: &ast::ident,
metas: &[@ast::meta_item],
library_search_paths: &[str]) ->
{ident: str, data: @[u8]} {
metas: &[@ast::meta_item], library_search_paths: &[str])
-> {ident: str, data: @[u8]} {
alt find_library_crate(sess, ident, metas, library_search_paths) {
some(t) { ret t; }
none. {
sess.span_fatal(span, #fmt("can't find crate for '%s'", ident));
sess.span_fatal(span, #fmt["can't find crate for '%s'", ident]);
}
}
}
@ -253,7 +252,7 @@ fn resolve_crate_deps(e: env, cdata: &@[u8]) -> cstore::cnum_map {
for dep: decoder::crate_dep in decoder::get_crate_deps(cdata) {
let extrn_cnum = dep.cnum;
let cname = dep.ident;
log #fmt("resolving dep %s", cname);
log #fmt["resolving dep %s", cname];
if e.crate_cache.contains_key(cname) {
log "already have it";
// We've already seen this crate
@ -264,7 +263,7 @@ fn resolve_crate_deps(e: env, cdata: &@[u8]) -> cstore::cnum_map {
// This is a new one so we've got to load it
// FIXME: Need better error reporting than just a bogus span
let fake_span = ast::dummy_sp();
let local_cnum = resolve_crate(e, cname, ~[], fake_span);
let local_cnum = resolve_crate(e, cname, [], fake_span);
cnum_map.insert(extrn_cnum, local_cnum);
}
}

View file

@ -56,9 +56,9 @@ fn mk_cstore() -> cstore {
let crate_map = map::new_int_hash::<ast::crate_num>();
ret private(@{metas: meta_cache,
use_crate_map: crate_map,
mutable used_crate_files: ~[],
mutable used_libraries: ~[],
mutable used_link_args: ~[]});
mutable used_crate_files: [],
mutable used_libraries: [],
mutable used_link_args: []});
}
fn get_crate_data(cstore: &cstore, cnum: ast::crate_num) -> crate_metadata {
@ -76,7 +76,7 @@ fn have_crate_data(cstore: &cstore, cnum: ast::crate_num) -> bool {
iter iter_crate_data(cstore: &cstore) ->
@{key: ast::crate_num, val: crate_metadata} {
for each kv: @{key: ast::crate_num, val: crate_metadata} in
for each kv: @{key: ast::crate_num, val: crate_metadata} in
p(cstore).metas.items() {
put kv;
}
@ -84,7 +84,7 @@ iter iter_crate_data(cstore: &cstore) ->
fn add_used_crate_file(cstore: &cstore, lib: &str) {
if !vec::member(lib, p(cstore).used_crate_files) {
p(cstore).used_crate_files += ~[lib];
p(cstore).used_crate_files += [lib];
}
}
@ -97,7 +97,7 @@ fn add_used_library(cstore: &cstore, lib: &str) -> bool {
if vec::member(lib, p(cstore).used_libraries) { ret false; }
p(cstore).used_libraries += ~[lib];
p(cstore).used_libraries += [lib];
ret true;
}

View file

@ -33,9 +33,9 @@ export external_resolver;
// def_id for an item defined in another crate, somebody needs to figure out
// what crate that's in and give us a def_id that makes sense for the current
// build.
type external_resolver = fn(&ast::def_id) -> ast::def_id ;
type external_resolver = fn(&ast::def_id) -> ast::def_id;
fn lookup_hash(d: &ebml::doc, eq_fn: fn(&[u8]) -> bool , hash: uint) ->
fn lookup_hash(d: &ebml::doc, eq_fn: fn(&[u8]) -> bool, hash: uint) ->
[ebml::doc] {
let index = ebml::get_doc(d, tag_index);
let table = ebml::get_doc(index, tag_index_table);
@ -44,19 +44,18 @@ fn lookup_hash(d: &ebml::doc, eq_fn: fn(&[u8]) -> bool , hash: uint) ->
let bucket = ebml::doc_at(d.data, pos);
// Awkward logic because we can't ret from foreach yet
let result: [ebml::doc] = ~[];
let result: [ebml::doc] = [];
let belt = tag_index_buckets_bucket_elt;
for each elt: ebml::doc in ebml::tagged_docs(bucket, belt) {
let pos = ebml::be_uint_from_bytes(elt.data, elt.start, 4u);
if eq_fn(vec::slice::<u8>(*elt.data, elt.start + 4u, elt.end)) {
result += ~[ebml::doc_at(d.data, pos)];
result += [ebml::doc_at(d.data, pos)];
}
}
ret result;
}
fn maybe_find_item(item_id: int, items: &ebml::doc) ->
option::t<ebml::doc> {
fn maybe_find_item(item_id: int, items: &ebml::doc) -> option::t<ebml::doc> {
fn eq_item(bytes: &[u8], item_id: int) -> bool {
ret ebml::be_uint_from_bytes(@bytes, 0u, 4u) as int == item_id;
}
@ -64,7 +63,7 @@ fn maybe_find_item(item_id: int, items: &ebml::doc) ->
let found = lookup_hash(items, eqer, hash_node_id(item_id));
if vec::len(found) == 0u {
ret option::none::<ebml::doc>;
} else { ret option::some::<ebml::doc>(found.(0)); }
} else { ret option::some::<ebml::doc>(found[0]); }
}
fn find_item(item_id: int, items: &ebml::doc) -> ebml::doc {
@ -115,19 +114,20 @@ fn item_type(item: &ebml::doc, this_cnum: ast::crate_num, tcx: ty::ctxt,
}
fn item_ty_param_kinds(item: &ebml::doc) -> [ast::kind] {
let ks: [ast::kind] = ~[];
let ks: [ast::kind] = [];
let tp = tag_items_data_item_ty_param_kinds;
for each p: ebml::doc in ebml::tagged_docs(item, tp) {
let dat : [u8] = ebml::doc_data(p);
let dat: [u8] = ebml::doc_data(p);
let vi = ebml::vint_at(dat, 0u);
let i = 0u;
while i < vi.val {
let k = alt dat.(vi.next + i) as char {
'u' { ast::kind_unique }
's' { ast::kind_shared }
'p' { ast::kind_pinned }
};
ks += ~[k];
let k =
alt dat[vi.next + i] as char {
'u' { ast::kind_unique }
's' { ast::kind_shared }
'p' { ast::kind_pinned }
};
ks += [k];
i += 1u;
}
}
@ -136,11 +136,11 @@ fn item_ty_param_kinds(item: &ebml::doc) -> [ast::kind] {
fn tag_variant_ids(item: &ebml::doc, this_cnum: ast::crate_num) ->
[ast::def_id] {
let ids: [ast::def_id] = ~[];
let ids: [ast::def_id] = [];
let v = tag_items_data_item_variant;
for each p: ebml::doc in ebml::tagged_docs(item, v) {
let ext = parse_def_id(ebml::doc_data(p));
ids += ~[{crate: this_cnum, node: ext.node}];
ids += [{crate: this_cnum, node: ext.node}];
}
ret ids;
}
@ -155,10 +155,10 @@ fn resolve_path(path: &[ast::ident], data: @[u8]) -> [ast::def_id] {
let md = ebml::new_doc(data);
let paths = ebml::get_doc(md, tag_paths);
let eqer = bind eq_item(_, s);
let result: [ast::def_id] = ~[];
let result: [ast::def_id] = [];
for doc: ebml::doc in lookup_hash(paths, eqer, hash_path(s)) {
let did_doc = ebml::get_doc(doc, tag_def_id);
result += ~[parse_def_id(ebml::doc_data(did_doc))];
result += [parse_def_id(ebml::doc_data(did_doc))];
}
ret result;
}
@ -203,12 +203,12 @@ fn get_type(data: @[u8], def: ast::def_id, tcx: &ty::ctxt,
let node_id = def.node;
let item = lookup_item(node_id, data);
let t = item_type(item, this_cnum, tcx, extres);
let tp_kinds : [ast::kind];
let tp_kinds: [ast::kind];
let fam_ch = item_family(item);
let has_ty_params = family_has_type_params(fam_ch);
if has_ty_params {
tp_kinds = item_ty_param_kinds(item);
} else { tp_kinds = ~[]; }
} else { tp_kinds = []; }
ret {kinds: tp_kinds, ty: t};
}
@ -231,22 +231,22 @@ fn get_tag_variants(_data: &@[u8], def: ast::def_id, tcx: &ty::ctxt,
cstore::get_crate_data(tcx.sess.get_cstore(), external_crate_id).data;
let items = ebml::get_doc(ebml::new_doc(data), tag_items);
let item = find_item(def.node, items);
let infos: [ty::variant_info] = ~[];
let infos: [ty::variant_info] = [];
let variant_ids = tag_variant_ids(item, external_crate_id);
for did: ast::def_id in variant_ids {
let item = find_item(did.node, items);
let ctor_ty = item_type(item, external_crate_id, tcx, extres);
let arg_tys: [ty::t] = ~[];
let arg_tys: [ty::t] = [];
alt ty::struct(tcx, ctor_ty) {
ty::ty_fn(_, args, _, _, _) {
for a: ty::arg in args { arg_tys += ~[a.ty]; }
for a: ty::arg in args { arg_tys += [a.ty]; }
}
_ {
// Nullary tag variant.
}
}
infos += ~[{args: arg_tys, ctor_ty: ctor_ty, id: did}];
infos += [{args: arg_tys, ctor_ty: ctor_ty, id: did}];
}
ret infos;
}
@ -295,14 +295,14 @@ fn item_family_to_str(fam: u8) -> str {
}
fn get_meta_items(md: &ebml::doc) -> [@ast::meta_item] {
let items: [@ast::meta_item] = ~[];
for each meta_item_doc: ebml::doc in
let items: [@ast::meta_item] = [];
for each meta_item_doc: ebml::doc in
ebml::tagged_docs(md, tag_meta_item_word) {
let nd = ebml::get_doc(meta_item_doc, tag_meta_item_name);
let n = str::unsafe_from_bytes(ebml::doc_data(nd));
items += ~[attr::mk_word_item(n)];
items += [attr::mk_word_item(n)];
}
for each meta_item_doc: ebml::doc in
for each meta_item_doc: ebml::doc in
ebml::tagged_docs(md, tag_meta_item_name_value) {
let nd = ebml::get_doc(meta_item_doc, tag_meta_item_name);
let vd = ebml::get_doc(meta_item_doc, tag_meta_item_value);
@ -310,32 +310,32 @@ fn get_meta_items(md: &ebml::doc) -> [@ast::meta_item] {
let v = str::unsafe_from_bytes(ebml::doc_data(vd));
// FIXME (#611): Should be able to decode meta_name_value variants,
// but currently they can't be encoded
items += ~[attr::mk_name_value_item_str(n, v)];
items += [attr::mk_name_value_item_str(n, v)];
}
for each meta_item_doc: ebml::doc in
for each meta_item_doc: ebml::doc in
ebml::tagged_docs(md, tag_meta_item_list) {
let nd = ebml::get_doc(meta_item_doc, tag_meta_item_name);
let n = str::unsafe_from_bytes(ebml::doc_data(nd));
let subitems = get_meta_items(meta_item_doc);
items += ~[attr::mk_list_item(n, subitems)];
items += [attr::mk_list_item(n, subitems)];
}
ret items;
}
fn get_attributes(md: &ebml::doc) -> [ast::attribute] {
let attrs: [ast::attribute] = ~[];
let attrs: [ast::attribute] = [];
alt ebml::maybe_get_doc(md, tag_attributes) {
option::some(attrs_d) {
for each attr_doc: ebml::doc in
for each attr_doc: ebml::doc in
ebml::tagged_docs(attrs_d, tag_attribute) {
let meta_items = get_meta_items(attr_doc);
// Currently it's only possible to have a single meta item on
// an attribute
assert (vec::len(meta_items) == 1u);
let meta_item = meta_items.(0);
let meta_item = meta_items[0];
attrs +=
~[{node: {style: ast::attr_outer, value: *meta_item},
span: ast::dummy_sp()}];
[{node: {style: ast::attr_outer, value: *meta_item},
span: ast::dummy_sp()}];
}
}
option::none. { }
@ -345,7 +345,7 @@ fn get_attributes(md: &ebml::doc) -> [ast::attribute] {
fn list_meta_items(meta_items: &ebml::doc, out: io::writer) {
for mi: @ast::meta_item in get_meta_items(meta_items) {
out.write_str(#fmt("%s\n", pprust::meta_item_to_str(*mi)));
out.write_str(#fmt["%s\n", pprust::meta_item_to_str(*mi)]);
}
}
@ -353,7 +353,7 @@ fn list_crate_attributes(md: &ebml::doc, out: io::writer) {
out.write_str("=Crate Attributes=\n");
for attr: ast::attribute in get_attributes(md) {
out.write_str(#fmt("%s\n", pprust::attribute_to_str(attr)));
out.write_str(#fmt["%s\n", pprust::attribute_to_str(attr)]);
}
out.write_str("\n\n");
@ -366,14 +366,13 @@ fn get_crate_attributes(data: @[u8]) -> [ast::attribute] {
type crate_dep = {cnum: ast::crate_num, ident: str};
fn get_crate_deps(data: @[u8]) -> [crate_dep] {
let deps: [crate_dep] = ~[];
let deps: [crate_dep] = [];
let cratedoc = ebml::new_doc(data);
let depsdoc = ebml::get_doc(cratedoc, tag_crate_deps);
let crate_num = 1;
for each depdoc: ebml::doc in
ebml::tagged_docs(depsdoc, tag_crate_dep) {
for each depdoc: ebml::doc in ebml::tagged_docs(depsdoc, tag_crate_dep) {
let depname = str::unsafe_from_bytes(ebml::doc_data(depdoc));
deps += ~[{cnum: crate_num, ident: depname}];
deps += [{cnum: crate_num, ident: depname}];
crate_num += 1;
}
ret deps;
@ -383,7 +382,7 @@ fn list_crate_deps(data: @[u8], out: io::writer) {
out.write_str("=External Dependencies=\n");
for dep: crate_dep in get_crate_deps(data) {
out.write_str(#fmt("%d %s\n", dep.cnum, dep.ident));
out.write_str(#fmt["%d %s\n", dep.cnum, dep.ident]);
}
out.write_str("\n");
@ -395,7 +394,7 @@ fn list_crate_items(bytes: &@[u8], md: &ebml::doc, out: io::writer) {
let items = ebml::get_doc(md, tag_items);
let index = ebml::get_doc(paths, tag_index);
let bs = ebml::get_doc(index, tag_index_buckets);
for each bucket: ebml::doc in
for each bucket: ebml::doc in
ebml::tagged_docs(bs, tag_index_buckets_bucket) {
let et = tag_index_buckets_bucket_elt;
for each elt: ebml::doc in ebml::tagged_docs(bucket, et) {
@ -403,8 +402,8 @@ fn list_crate_items(bytes: &@[u8], md: &ebml::doc, out: io::writer) {
let def = ebml::doc_at(bytes, data.pos);
let did_doc = ebml::get_doc(def, tag_def_id);
let did = parse_def_id(ebml::doc_data(did_doc));
out.write_str(#fmt("%s (%s)\n", data.path,
describe_def(items, did)));
out.write_str(#fmt["%s (%s)\n", data.path,
describe_def(items, did)]);
}
}
out.write_str("\n");

View file

@ -51,14 +51,13 @@ fn encode_tag_variant_paths(ebml_w: &ebml::writer, variants: &[variant],
fn add_to_index(ebml_w: &ebml::writer, path: &[str],
index: &mutable [entry<str>], name: &str) {
let full_path = path + ~[name];
let full_path = path + [name];
index +=
~[{val: str::connect(full_path, "::"),
pos: ebml_w.writer.tell()}];
[{val: str::connect(full_path, "::"), pos: ebml_w.writer.tell()}];
}
fn encode_native_module_item_paths(ebml_w: &ebml::writer,
nmod: &native_mod, path: &[str],
fn encode_native_module_item_paths(ebml_w: &ebml::writer, nmod: &native_mod,
path: &[str],
index: &mutable [entry<str>]) {
for nitem: @native_item in nmod.items {
add_to_index(ebml_w, path, index, nitem.ident);
@ -93,7 +92,7 @@ fn encode_module_item_paths(ebml_w: &ebml::writer, module: &_mod,
ebml::start_tag(ebml_w, tag_paths_data_mod);
encode_name(ebml_w, it.ident);
encode_def_id(ebml_w, local_def(it.id));
encode_module_item_paths(ebml_w, _mod, path + ~[it.ident], index);
encode_module_item_paths(ebml_w, _mod, path + [it.ident], index);
ebml::end_tag(ebml_w);
}
item_native_mod(nmod) {
@ -101,7 +100,7 @@ fn encode_module_item_paths(ebml_w: &ebml::writer, module: &_mod,
ebml::start_tag(ebml_w, tag_paths_data_mod);
encode_name(ebml_w, it.ident);
encode_def_id(ebml_w, local_def(it.id));
encode_native_module_item_paths(ebml_w, nmod, path + ~[it.ident],
encode_native_module_item_paths(ebml_w, nmod, path + [it.ident],
index);
ebml::end_tag(ebml_w);
}
@ -148,10 +147,9 @@ fn encode_module_item_paths(ebml_w: &ebml::writer, module: &_mod,
}
}
fn encode_item_paths(ebml_w: &ebml::writer, crate: &@crate) ->
[entry<str>] {
let index: [entry<str>] = ~[];
let path: [str] = ~[];
fn encode_item_paths(ebml_w: &ebml::writer, crate: &@crate) -> [entry<str>] {
let index: [entry<str>] = [];
let path: [str] = [];
ebml::start_tag(ebml_w, tag_paths);
encode_module_item_paths(ebml_w, crate.node.module, path, index);
ebml::end_tag(ebml_w);
@ -162,28 +160,29 @@ fn encode_item_paths(ebml_w: &ebml::writer, crate: &@crate) ->
// Item info table encoding
fn encode_family(ebml_w: &ebml::writer, c: u8) {
ebml::start_tag(ebml_w, tag_items_data_item_family);
ebml_w.writer.write(~[c]);
ebml_w.writer.write([c]);
ebml::end_tag(ebml_w);
}
fn encode_inlineness(ebml_w: &ebml::writer, c: u8) {
ebml::start_tag(ebml_w, tag_items_data_item_inlineness);
ebml_w.writer.write(~[c]);
ebml_w.writer.write([c]);
ebml::end_tag(ebml_w);
}
fn def_to_str(did: &def_id) -> str { ret #fmt("%d:%d", did.crate, did.node); }
fn def_to_str(did: &def_id) -> str { ret #fmt["%d:%d", did.crate, did.node]; }
fn encode_type_param_kinds(ebml_w: &ebml::writer, tps: &[ty_param]) {
ebml::start_tag(ebml_w, tag_items_data_item_ty_param_kinds);
ebml::write_vint(ebml_w.writer, vec::len::<ty_param>(tps));
for tp: ty_param in tps {
let c = alt tp.kind {
kind_unique. { 'u' }
kind_shared. { 's' }
kind_pinned. { 'p' }
};
ebml_w.writer.write(~[c as u8]);
let c =
alt tp.kind {
kind_unique. { 'u' }
kind_shared. { 's' }
kind_pinned. { 'p' }
};
ebml_w.writer.write([c as u8]);
}
ebml::end_tag(ebml_w);
}
@ -229,7 +228,7 @@ fn encode_tag_variant_info(ecx: &@encode_ctxt, ebml_w: &ebml::writer,
index: &mutable [entry<int>],
ty_params: &[ty_param]) {
for variant: variant in variants {
index += ~[{val: variant.node.id, pos: ebml_w.writer.tell()}];
index += [{val: variant.node.id, pos: ebml_w.writer.tell()}];
ebml::start_tag(ebml_w, tag_items_data_item);
encode_def_id(ebml_w, local_def(variant.node.id));
encode_family(ebml_w, 'v' as u8);
@ -245,8 +244,8 @@ fn encode_tag_variant_info(ecx: &@encode_ctxt, ebml_w: &ebml::writer,
}
}
fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: &ebml::writer,
item: @item, index: &mutable [entry<int>]) {
fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: &ebml::writer, item: @item,
index: &mutable [entry<int>]) {
alt item.node {
item_const(_, _) {
ebml::start_tag(ebml_w, tag_items_data_item);
@ -260,8 +259,10 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: &ebml::writer,
ebml::start_tag(ebml_w, tag_items_data_item);
encode_def_id(ebml_w, local_def(item.id));
encode_family(ebml_w,
alt fd.decl.purity { pure_fn. { 'p' } impure_fn. { 'f' } }
as u8);
alt fd.decl.purity {
pure_fn. { 'p' }
impure_fn. { 'f' }
} as u8);
encode_inlineness(ebml_w,
alt fd.decl.il {
il_normal. { 'n' }
@ -315,7 +316,7 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: &ebml::writer,
encode_symbol(ecx, ebml_w, item.id);
ebml::end_tag(ebml_w);
index += ~[{val: ctor_id, pos: ebml_w.writer.tell()}];
index += [{val: ctor_id, pos: ebml_w.writer.tell()}];
ebml::start_tag(ebml_w, tag_items_data_item);
encode_def_id(ebml_w, local_def(ctor_id));
encode_family(ebml_w, 'f' as u8);
@ -334,7 +335,7 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: &ebml::writer,
encode_type(ecx, ebml_w, ty::ty_fn_ret(ecx.ccx.tcx, fn_ty));
ebml::end_tag(ebml_w);
index += ~[{val: ctor_id, pos: ebml_w.writer.tell()}];
index += [{val: ctor_id, pos: ebml_w.writer.tell()}];
ebml::start_tag(ebml_w, tag_items_data_item);
encode_def_id(ebml_w, local_def(ctor_id));
encode_family(ebml_w, 'f' as u8);
@ -369,17 +370,17 @@ fn encode_info_for_native_item(ecx: &@encode_ctxt, ebml_w: &ebml::writer,
fn encode_info_for_items(ecx: &@encode_ctxt, ebml_w: &ebml::writer) ->
[entry<int>] {
let index: [entry<int>] = ~[];
let index: [entry<int>] = [];
ebml::start_tag(ebml_w, tag_items_data);
for each kvp: @{key: node_id, val: middle::ast_map::ast_node} in
for each kvp: @{key: node_id, val: middle::ast_map::ast_node} in
ecx.ccx.ast_map.items() {
alt kvp.val {
middle::ast_map::node_item(i) {
index += ~[{val: kvp.key, pos: ebml_w.writer.tell()}];
index += [{val: kvp.key, pos: ebml_w.writer.tell()}];
encode_info_for_item(ecx, ebml_w, i, index);
}
middle::ast_map::node_native_item(i) {
index += ~[{val: kvp.key, pos: ebml_w.writer.tell()}];
index += [{val: kvp.key, pos: ebml_w.writer.tell()}];
encode_info_for_native_item(ecx, ebml_w, i);
}
_ { }
@ -392,30 +393,30 @@ fn encode_info_for_items(ecx: &@encode_ctxt, ebml_w: &ebml::writer) ->
// Path and definition ID indexing
fn create_index<T>(index: &[entry<T>], hash_fn: fn(&T) -> uint ) ->
fn create_index<T>(index: &[entry<T>], hash_fn: fn(&T) -> uint) ->
[@[entry<T>]] {
let buckets: [@mutable [entry<T>]] = ~[];
for each i: uint in uint::range(0u, 256u) { buckets += ~[@mutable ~[]]; }
let buckets: [@mutable [entry<T>]] = [];
for each i: uint in uint::range(0u, 256u) { buckets += [@mutable []]; }
for elt: entry<T> in index {
let h = hash_fn(elt.val);
*buckets.(h % 256u) += ~[elt];
*buckets[h % 256u] += [elt];
}
let buckets_frozen = ~[];
let buckets_frozen = [];
for bucket: @mutable [entry<T>] in buckets {
buckets_frozen += ~[@*bucket];
buckets_frozen += [@*bucket];
}
ret buckets_frozen;
}
fn encode_index<T>(ebml_w: &ebml::writer, buckets: &[@[entry<T>]],
write_fn: fn(&io::writer, &T) ) {
write_fn: fn(&io::writer, &T)) {
let writer = io::new_writer_(ebml_w.writer);
ebml::start_tag(ebml_w, tag_index);
let bucket_locs: [uint] = ~[];
let bucket_locs: [uint] = [];
ebml::start_tag(ebml_w, tag_index_buckets);
for bucket: @[entry<T>] in buckets {
bucket_locs += ~[ebml_w.writer.tell()];
bucket_locs += [ebml_w.writer.tell()];
ebml::start_tag(ebml_w, tag_index_buckets_bucket);
for elt: entry<T> in *bucket {
ebml::start_tag(ebml_w, tag_index_buckets_bucket_elt);
@ -508,30 +509,30 @@ fn synthesize_crate_attrs(ecx: &@encode_ctxt, crate: &@crate) -> [attribute] {
attr::remove_meta_items_by_name(tmp, "vers")
};
let meta_items = ~[name_item, vers_item] + other_items;
let meta_items = [name_item, vers_item] + other_items;
let link_item = attr::mk_list_item("link", meta_items);
ret attr::mk_attr(link_item);
}
let attrs: [attribute] = ~[];
let attrs: [attribute] = [];
let found_link_attr = false;
for attr: attribute in crate.node.attrs {
attrs +=
if attr::get_attr_name(attr) != "link" {
~[attr]
[attr]
} else {
alt attr.node.value.node {
meta_list(n, l) {
found_link_attr = true;
~[synthesize_link_attr(ecx, l)]
[synthesize_link_attr(ecx, l)]
}
_ { ~[attr] }
_ { [attr] }
}
}
}
if !found_link_attr { attrs += ~[synthesize_link_attr(ecx, ~[])]; }
if !found_link_attr { attrs += [synthesize_link_attr(ecx, [])]; }
ret attrs;
}
@ -543,9 +544,9 @@ fn encode_crate_deps(ebml_w: &ebml::writer, cstore: &cstore::cstore) {
type numname = {crate: crate_num, ident: str};
// Pull the cnums and names out of cstore
let pairs: [mutable numname] = ~[mutable];
let pairs: [mutable numname] = [mutable];
for each hashkv: hashkv in cstore::iter_crate_data(cstore) {
pairs += ~[mutable {crate: hashkv.key, ident: hashkv.val.name}];
pairs += [mutable {crate: hashkv.key, ident: hashkv.val.name}];
}
// Sort by cnum
@ -612,7 +613,7 @@ fn encode_metadata(cx: &@crate_ctxt, crate: &@crate) -> str {
// Pad this, since something (LLVM, presumably) is cutting off the
// remaining % 4 bytes.
buf_w.write(~[0u8, 0u8, 0u8, 0u8]);
buf_w.write([0u8, 0u8, 0u8, 0u8]);
ret string_w.get_str();
}

View file

@ -19,17 +19,17 @@ export parse_ty_data;
// data buffer. Whatever format you choose should not contain pipe characters.
// Callback to translate defs to strs or back:
type str_def = fn(str) -> ast::def_id ;
type str_def = fn(str) -> ast::def_id;
type pstate =
{data: @[u8], crate: int, mutable pos: uint, len: uint, tcx: ty::ctxt};
tag ty_or_bang { a_ty(ty::t); a_bang; }
fn peek(st: @pstate) -> u8 { ret st.data.(st.pos); }
fn peek(st: @pstate) -> u8 { ret st.data[st.pos]; }
fn next(st: @pstate) -> u8 {
let ch = st.data.(st.pos);
let ch = st.data[st.pos];
st.pos = st.pos + 1u;
ret ch;
}
@ -39,7 +39,7 @@ fn parse_ident(st: @pstate, sd: str_def, last: char) -> ast::ident {
ret parse_ident_(st, sd, bind is_last(last, _));
}
fn parse_ident_(st: @pstate, _sd: str_def, is_last: fn(char) -> bool ) ->
fn parse_ident_(st: @pstate, _sd: str_def, is_last: fn(char) -> bool) ->
ast::ident {
let rslt = "";
while !is_last(peek(st) as char) {
@ -65,14 +65,14 @@ fn parse_ty_or_bang(st: @pstate, sd: str_def) -> ty_or_bang {
}
fn parse_constrs(st: @pstate, sd: str_def) -> [@ty::constr] {
let rslt: [@ty::constr] = ~[];
let rslt: [@ty::constr] = [];
alt peek(st) as char {
':' {
do {
next(st);
let one: @ty::constr =
parse_constr::<uint>(st, sd, parse_constr_arg);
rslt += ~[one];
rslt += [one];
} while peek(st) as char == ';'
}
_ { }
@ -82,14 +82,14 @@ fn parse_constrs(st: @pstate, sd: str_def) -> [@ty::constr] {
// FIXME less copy-and-paste
fn parse_ty_constrs(st: @pstate, sd: str_def) -> [@ty::type_constr] {
let rslt: [@ty::type_constr] = ~[];
let rslt: [@ty::type_constr] = [];
alt peek(st) as char {
':' {
do {
next(st);
let one: @ty::type_constr =
parse_constr::<path>(st, sd, parse_ty_constr_arg);
rslt += ~[one];
rslt += [one];
} while peek(st) as char == ';'
}
_ { }
@ -98,24 +98,24 @@ fn parse_ty_constrs(st: @pstate, sd: str_def) -> [@ty::type_constr] {
}
fn parse_path(st: @pstate, sd: str_def) -> ast::path {
let idents: [ast::ident] = ~[];
let idents: [ast::ident] = [];
fn is_last(c: char) -> bool { ret c == '(' || c == ':'; }
idents += ~[parse_ident_(st, sd, is_last)];
idents += [parse_ident_(st, sd, is_last)];
while true {
alt peek(st) as char {
':' { next(st); next(st); }
c {
if c == '(' {
ret respan(ast::dummy_sp(),
{global: false, idents: idents, types: ~[]});
} else { idents += ~[parse_ident_(st, sd, is_last)]; }
{global: false, idents: idents, types: []});
} else { idents += [parse_ident_(st, sd, is_last)]; }
}
}
}
fail "parse_path: ill-formed path";
}
type arg_parser<T> = fn(@pstate, str_def) -> ast::constr_arg_general_<T> ;
type arg_parser<T> = fn(@pstate, str_def) -> ast::constr_arg_general_<T>;
fn parse_constr_arg(st: @pstate, _sd: str_def) -> ast::fn_constr_arg {
alt peek(st) as char {
@ -153,7 +153,7 @@ fn parse_ty_constr_arg(st: @pstate, sd: str_def) ->
fn parse_constr<@T>(st: @pstate, sd: str_def, pser: arg_parser<T>) ->
@ty::constr_general<T> {
let sp = ast::dummy_sp(); // FIXME: use a real span
let args: [@sp_constr_arg<T>] = ~[];
let args: [@sp_constr_arg<T>] = [];
let pth: path = parse_path(st, sd);
let ignore: char = next(st) as char;
assert (ignore as char == '(');
@ -162,7 +162,7 @@ fn parse_constr<@T>(st: @pstate, sd: str_def, pser: arg_parser<T>) ->
do {
an_arg = pser(st, sd);
// FIXME use a real span
args += ~[@respan(sp, an_arg)];
args += [@respan(sp, an_arg)];
ignore = next(st) as char;
} while ignore == ';'
assert (ignore == ')');
@ -197,21 +197,23 @@ fn parse_ty(st: @pstate, sd: str_def) -> ty::t {
't' {
assert (next(st) as char == '[');
let def = parse_def(st, sd);
let params: [ty::t] = ~[];
while peek(st) as char != ']' { params += ~[parse_ty(st, sd)]; }
let params: [ty::t] = [];
while peek(st) as char != ']' { params += [parse_ty(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_tag(st.tcx, def, params);
}
'p' {
let k = alt next(st) as char {
'u' { kind_unique }
's' { kind_shared }
'p' { kind_pinned }
c {
log_err "unexpected char in encoded type param: ";
log_err c; fail
}
};
let k =
alt next(st) as char {
'u' { kind_unique }
's' { kind_shared }
'p' { kind_pinned }
c {
log_err "unexpected char in encoded type param: ";
log_err c;
fail
}
};
ret ty::mk_param(st.tcx, parse_int(st) as uint, k);
}
'@' { ret ty::mk_box(st.tcx, parse_mt(st, sd)); }
@ -220,22 +222,22 @@ fn parse_ty(st: @pstate, sd: str_def) -> ty::t {
'I' { ret ty::mk_vec(st.tcx, parse_mt(st, sd)); }
'R' {
assert (next(st) as char == '[');
let fields: [ty::field] = ~[];
let fields: [ty::field] = [];
while peek(st) as char != ']' {
let name = "";
while peek(st) as char != '=' {
name += str::unsafe_from_byte(next(st));
}
st.pos = st.pos + 1u;
fields += ~[{ident: name, mt: parse_mt(st, sd)}];
fields += [{ident: name, mt: parse_mt(st, sd)}];
}
st.pos = st.pos + 1u;
ret ty::mk_rec(st.tcx, fields);
}
'T' {
assert (next(st) as char == '[');
let params = ~[];
while peek(st) as char != ']' { params += ~[parse_ty(st, sd)]; }
let params = [];
while peek(st) as char != ']' { params += [parse_ty(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_tup(st.tcx, params);
}
@ -268,7 +270,7 @@ fn parse_ty(st: @pstate, sd: str_def) -> ty::t {
}
'O' {
assert (next(st) as char == '[');
let methods: [ty::method] = ~[];
let methods: [ty::method] = [];
while peek(st) as char != ']' {
let proto;
alt next(st) as char {
@ -281,12 +283,12 @@ fn parse_ty(st: @pstate, sd: str_def) -> ty::t {
}
let func = parse_ty_fn(st, sd);
methods +=
~[{proto: proto,
ident: name,
inputs: func.args,
output: func.ty,
cf: func.cf,
constrs: func.cs}];
[{proto: proto,
ident: name,
inputs: func.args,
output: func.ty,
cf: func.cf,
constrs: func.cs}];
}
st.pos += 1u;
ret ty::mk_obj(st.tcx, methods);
@ -295,8 +297,8 @@ fn parse_ty(st: @pstate, sd: str_def) -> ty::t {
assert (next(st) as char == '[');
let def = parse_def(st, sd);
let inner = parse_ty(st, sd);
let params: [ty::t] = ~[];
while peek(st) as char != ']' { params += ~[parse_ty(st, sd)]; }
let params: [ty::t] = [];
while peek(st) as char != ']' { params += [parse_ty(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_res(st.tcx, def, inner, params);
}
@ -375,7 +377,7 @@ fn parse_hex(st: @pstate) -> uint {
fn parse_ty_fn(st: @pstate, sd: str_def) ->
{args: [ty::arg], ty: ty::t, cf: ast::controlflow, cs: [@ty::constr]} {
assert (next(st) as char == '[');
let inputs: [ty::arg] = ~[];
let inputs: [ty::arg] = [];
while peek(st) as char != ']' {
let mode = ty::mo_val;
if peek(st) as char == '&' {
@ -389,7 +391,7 @@ fn parse_ty_fn(st: @pstate, sd: str_def) ->
mode = ty::mo_move;
st.pos += 1u;
}
inputs += ~[{mode: mode, ty: parse_ty(st, sd)}];
inputs += [{mode: mode, ty: parse_ty(st, sd)}];
}
st.pos += 1u; // eat the ']'
let cs = parse_constrs(st, sd);
@ -406,7 +408,7 @@ fn parse_ty_fn(st: @pstate, sd: str_def) ->
fn parse_def_id(buf: &[u8]) -> ast::def_id {
let colon_idx = 0u;
let len = vec::len::<u8>(buf);
while colon_idx < len && buf.(colon_idx) != ':' as u8 { colon_idx += 1u; }
while colon_idx < len && buf[colon_idx] != ':' as u8 { colon_idx += 1u; }
if colon_idx == len {
log_err "didn't find ':' when parsing def id";
fail;
@ -414,10 +416,10 @@ fn parse_def_id(buf: &[u8]) -> ast::def_id {
let crate_part = vec::slice::<u8>(buf, 0u, colon_idx);
let def_part = vec::slice::<u8>(buf, colon_idx + 1u, len);
let crate_part_vec = ~[];
let def_part_vec = ~[];
for b: u8 in crate_part { crate_part_vec += ~[b]; }
for b: u8 in def_part { def_part_vec += ~[b]; }
let crate_part_vec = [];
let def_part_vec = [];
for b: u8 in crate_part { crate_part_vec += [b]; }
for b: u8 in def_part { def_part_vec += [b]; }
let crate_num = uint::parse_buf(crate_part_vec, 10u) as int;
let def_num = uint::parse_buf(def_part_vec, 10u) as int;

View file

@ -17,9 +17,10 @@ export ac_no_abbrevs;
export ac_use_abbrevs;
export enc_ty;
type ctxt = // Def -> str Callback:
type ctxt =
// Def -> str Callback:
// The type context.
{ds: fn(&def_id) -> str , tcx: ty::ctxt, abbrevs: abbrev_ctxt};
{ds: fn(&def_id) -> str, tcx: ty::ctxt, abbrevs: abbrev_ctxt};
// Compact string representation for ty.t values. API ty_str & parse_from_str.
// Extra parameters are for converting to/from def_ids in the string rep.
@ -151,7 +152,7 @@ fn enc_sty(w: &io::writer, cx: &@ctxt, st: &ty::sty) {
native_abi_llvm. { w.write_char('l'); }
native_abi_x86stdcall. { w.write_char('s'); }
}
enc_ty_fn(w, cx, args, out, return, ~[]);
enc_ty_fn(w, cx, args, out, return, []);
}
ty::ty_obj(methods) {
w.write_str("O[");
@ -176,7 +177,7 @@ fn enc_sty(w: &io::writer, cx: &@ctxt, st: &ty::sty) {
w.write_str(cx.ds(def));
w.write_char('|');
}
ty::ty_param(id,k) {
ty::ty_param(id, k) {
alt k {
kind_unique. { w.write_str("pu"); }
kind_shared. { w.write_str("ps"); }
@ -210,9 +211,7 @@ fn enc_ty_fn(w: &io::writer, cx: &@ctxt, args: &[ty::arg], out: &ty::t,
w.write_char('&');
if mut { w.write_char('m'); }
}
ty::mo_move. {
w.write_char('-');
}
ty::mo_move. { w.write_char('-'); }
ty::mo_val. { }
}
enc_ty(w, cx, arg.ty);

View file

@ -42,12 +42,13 @@ fn check_crate(tcx: ty::ctxt, crate: &@ast::crate) {
// Stores information about object fields and function
// arguments that's otherwise not easily available.
let cx = @{tcx: tcx, local_map: std::map::new_int_hash()};
let v = @{visit_fn: bind visit_fn(cx, _, _, _, _, _, _, _),
visit_item: bind visit_item(cx, _, _, _),
visit_expr: bind visit_expr(cx, _, _, _),
visit_decl: bind visit_decl(cx, _, _, _)
with *visit::default_visitor::<scope>()};
visit::visit_crate(*crate, @~[], visit::mk_vt(v));
let v =
@{visit_fn: bind visit_fn(cx, _, _, _, _, _, _, _),
visit_item: bind visit_item(cx, _, _, _),
visit_expr: bind visit_expr(cx, _, _, _),
visit_decl: bind visit_decl(cx, _, _, _)
with *visit::default_visitor::<scope>()};
visit::visit_crate(*crate, @[], visit::mk_vt(v));
tcx.sess.abort_if_errors();
}
@ -57,27 +58,36 @@ fn visit_fn(cx: &@ctx, f: &ast::_fn, _tp: &[ast::ty_param], _sp: &span,
for arg_: ast::arg in f.decl.inputs {
cx.local_map.insert(arg_.id, arg(arg_.mode));
}
let scope = alt (f.proto) {
// Blocks need to obey any restrictions from the enclosing scope.
ast::proto_block. { sc }
// Closures need to prohibit writing to any of the upvars.
// This doesn't seem like a particularly clean way to do this.
ast::proto_closure. {
let dnums = ~[];
for each nid in freevars::get_freevar_defs(cx.tcx, id).keys() {
dnums += ~[nid];
}
@~[@{root_vars: ~[],
// I'm not sure if there is anything sensical to put here
block_defnum: 0,
bindings: dnums,
tys: ~[],
depends_on: ~[],
mutable ok: valid}]
}
// Non capturing functions start out fresh.
_ { @~[] }
};
let scope =
alt f.proto {
// Blocks need to obey any restrictions from the enclosing scope.
ast::proto_block. {
sc
}
// Closures need to prohibit writing to any of the upvars.
// This doesn't seem like a particularly clean way to do this.
ast::proto_closure. {
let dnums = [];
for each nid in freevars::get_freevar_defs(cx.tcx, id).keys() {
dnums += [nid];
};
@[
// I'm not sure if there is anything sensical to put here
@{root_vars: [],
block_defnum: 0,
bindings: dnums,
tys: [],
depends_on: [],
mutable ok: valid}]
}
// Non capturing functions start out fresh.
_ {
@[]
}
};
v.visit_block(f.body, scope, v);
}
@ -168,14 +178,14 @@ fn check_call(cx: &ctx, f: &@ast::expr, args: &[@ast::expr], sc: &scope) ->
{root_vars: [node_id], unsafe_ts: [ty::t]} {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = fty_args(cx, fty);
let roots: [node_id] = ~[];
let mut_roots: [{arg: uint, node: node_id}] = ~[];
let unsafe_ts: [ty::t] = ~[];
let unsafe_t_offsets: [uint] = ~[];
let roots: [node_id] = [];
let mut_roots: [{arg: uint, node: node_id}] = [];
let unsafe_ts: [ty::t] = [];
let unsafe_t_offsets: [uint] = [];
let i = 0u;
for arg_t: ty::arg in arg_ts {
if arg_t.mode != ty::mo_val {
let arg = args.(i);
let arg = args[i];
let root = expr_root(cx, arg, false);
if arg_t.mode == ty::mo_alias(true) {
alt path_def(cx, arg) {
@ -183,24 +193,27 @@ fn check_call(cx: &ctx, f: &@ast::expr, args: &[@ast::expr], sc: &scope) ->
let dnum = ast::def_id_of_def(def).node;
if def_is_local(def, true) {
if is_immutable_alias(cx, sc, dnum) {
cx.tcx.sess.span_err
(arg.span, "passing an immutable alias \
by mutable alias");
cx.tcx.sess.span_err(
arg.span,
"passing an immutable alias \
by mutable alias");
} else if is_immutable_objfield(cx, dnum) {
cx.tcx.sess.span_err
(arg.span, "passing an immutable object \
field by mutable alias");
cx.tcx.sess.span_err(
arg.span,
"passing an immutable object \
field by mutable alias");
}
} else {
cx.tcx.sess.span_err
(arg.span,
"passing a static item by mutable alias");
cx.tcx.sess.span_err(
arg.span,
"passing a static item by mutable alias");
}
mut_roots += ~[{arg: i, node: dnum}];
mut_roots += [{arg: i, node: dnum}];
}
_ {
if !mut_field(root.ds) {
let m = "passing a temporary value or \
let m =
"passing a temporary value or \
immutable field by mutable alias";
cx.tcx.sess.span_err(arg.span, m);
}
@ -208,11 +221,11 @@ fn check_call(cx: &ctx, f: &@ast::expr, args: &[@ast::expr], sc: &scope) ->
}
}
alt path_def_id(cx, root.ex) {
some(did) { roots += ~[did.node]; }
some(did) { roots += [did.node]; }
_ { }
}
alt inner_mut(root.ds) {
some(t) { unsafe_ts += ~[t]; unsafe_t_offsets += ~[i]; }
some(t) { unsafe_ts += [t]; unsafe_t_offsets += [i]; }
_ { }
}
}
@ -223,9 +236,9 @@ fn check_call(cx: &ctx, f: &@ast::expr, args: &[@ast::expr], sc: &scope) ->
ast::expr_path(_) {
if def_is_local(cx.tcx.def_map.get(f.id), true) {
cx.tcx.sess.span_err(f.span,
#fmt("function may alias with \
#fmt["function may alias with \
argument %u, which is not immutably rooted",
unsafe_t_offsets.(0)));
unsafe_t_offsets[0]]);
}
}
_ { }
@ -233,17 +246,17 @@ fn check_call(cx: &ctx, f: &@ast::expr, args: &[@ast::expr], sc: &scope) ->
}
let j = 0u;
for unsafe: ty::t in unsafe_ts {
let offset = unsafe_t_offsets.(j);
let offset = unsafe_t_offsets[j];
j += 1u;
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ty::mo_alias(true);
if i != offset &&
ty_can_unsafely_include(cx, unsafe, arg_t.ty, mut_alias) {
cx.tcx.sess.span_err(args.(i).span,
#fmt("argument %u may alias with \
cx.tcx.sess.span_err(args[i].span,
#fmt["argument %u may alias with \
argument %u, which is not immutably rooted",
i, offset));
i, offset]);
}
i += 1u;
}
@ -265,7 +278,7 @@ fn check_call(cx: &ctx, f: &@ast::expr, args: &[@ast::expr], sc: &scope) ->
if mut_alias_to_root {
cx.tcx.sess.span_err(args.(root.arg).span,
cx.tcx.sess.span_err(args[root.arg].span,
"passing a mutable alias to a \
variable that roots another alias");
}
@ -281,14 +294,14 @@ fn check_tail_call(cx: &ctx, call: &@ast::expr) {
if arg_t.mode != ty::mo_val {
let mut_a = arg_t.mode == ty::mo_alias(true);
let ok = true;
alt args.(i).node {
alt args[i].node {
ast::expr_path(_) {
let def = cx.tcx.def_map.get(args.(i).id);
let def = cx.tcx.def_map.get(args[i].id);
let dnum = ast::def_id_of_def(def).node;
alt cx.local_map.find(dnum) {
some(arg(ast::alias(mut))) {
if mut_a && !mut {
cx.tcx.sess.span_err(args.(i).span,
cx.tcx.sess.span_err(args[i].span,
"passing an immutable \
alias by mutable alias");
}
@ -299,7 +312,7 @@ fn check_tail_call(cx: &ctx, call: &@ast::expr) {
_ { ok = false; }
}
if !ok {
cx.tcx.sess.span_err(args.(i).span,
cx.tcx.sess.span_err(args[i].span,
"can not pass a local value by \
alias to a tail call");
}
@ -313,26 +326,28 @@ fn check_alt(cx: &ctx, input: &@ast::expr, arms: &[ast::arm], sc: &scope,
visit::visit_expr(input, sc, v);
let root = expr_root(cx, input, true);
let roots =
alt path_def_id(cx, root.ex) { some(did) { ~[did.node] } _ { ~[] } };
alt path_def_id(cx, root.ex) { some(did) { [did.node] } _ { [] } };
let forbidden_tp: [ty::t] =
alt inner_mut(root.ds) { some(t) { ~[t] } _ { ~[] } };
alt inner_mut(root.ds) { some(t) { [t] } _ { [] } };
for a: ast::arm in arms {
let dnums = arm_defnums(a);
let new_sc = sc;
if vec::len(dnums) > 0u {
new_sc = @(*sc + ~[@{root_vars: roots,
block_defnum: dnums.(vec::len(dnums) - 1u),
bindings: dnums,
tys: forbidden_tp,
depends_on: deps(sc, roots),
mutable ok: valid}]);
new_sc =
@(*sc +
[@{root_vars: roots,
block_defnum: dnums[vec::len(dnums) - 1u],
bindings: dnums,
tys: forbidden_tp,
depends_on: deps(sc, roots),
mutable ok: valid}]);
}
visit::visit_arm(a, new_sc, v);
}
}
fn arm_defnums(arm: &ast::arm) -> [node_id] {
ret ast::pat_binding_ids(arm.pats.(0));
ret ast::pat_binding_ids(arm.pats[0]);
}
fn check_for_each(cx: &ctx, local: &@ast::local, call: &@ast::expr,
@ -342,13 +357,14 @@ fn check_for_each(cx: &ctx, local: &@ast::local, call: &@ast::expr,
ast::expr_call(f, args) {
let data = check_call(cx, f, args, sc);
let bindings = ast::pat_binding_ids(local.node.pat);
let new_sc = @{root_vars: data.root_vars,
block_defnum: bindings.(vec::len(bindings) - 1u),
bindings: bindings,
tys: data.unsafe_ts,
depends_on: deps(sc, data.root_vars),
mutable ok: valid};
visit::visit_block(blk, @(*sc + ~[new_sc]), v);
let new_sc =
@{root_vars: data.root_vars,
block_defnum: bindings[vec::len(bindings) - 1u],
bindings: bindings,
tys: data.unsafe_ts,
depends_on: deps(sc, data.root_vars),
mutable ok: valid};
visit::visit_block(blk, @(*sc + [new_sc]), v);
}
}
}
@ -358,15 +374,13 @@ fn check_for(cx: &ctx, local: &@ast::local, seq: &@ast::expr, blk: &ast::blk,
visit::visit_expr(seq, sc, v);
let root = expr_root(cx, seq, false);
let root_def =
alt path_def_id(cx, root.ex) { some(did) { ~[did.node] } _ { ~[] } };
let unsafe = alt inner_mut(root.ds) { some(t) { ~[t] } _ { ~[] } };
alt path_def_id(cx, root.ex) { some(did) { [did.node] } _ { [] } };
let unsafe = alt inner_mut(root.ds) { some(t) { [t] } _ { [] } };
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut != ast::imm { unsafe = ~[seq_t]; }
}
ty::ty_vec(mt) { if mt.mut != ast::imm { unsafe = [seq_t]; } }
ty::ty_str. | ty::ty_istr. {/* no-op */ }
_ {
cx.tcx.sess.span_unimpl(seq.span,
@ -375,13 +389,14 @@ fn check_for(cx: &ctx, local: &@ast::local, seq: &@ast::expr, blk: &ast::blk,
}
}
let bindings = ast::pat_binding_ids(local.node.pat);
let new_sc = @{root_vars: root_def,
block_defnum: bindings.(vec::len(bindings) - 1u),
bindings: bindings,
tys: unsafe,
depends_on: deps(sc, root_def),
mutable ok: valid};
visit::visit_block(blk, @(*sc + ~[new_sc]), v);
let new_sc =
@{root_vars: root_def,
block_defnum: bindings[vec::len(bindings) - 1u],
bindings: bindings,
tys: unsafe,
depends_on: deps(sc, root_def),
mutable ok: valid};
visit::visit_block(blk, @(*sc + [new_sc]), v);
}
fn check_var(cx: &ctx, ex: &@ast::expr, p: &ast::path, id: ast::node_id,
@ -391,6 +406,7 @@ fn check_var(cx: &ctx, ex: &@ast::expr, p: &ast::path, id: ast::node_id,
let my_defnum = ast::def_id_of_def(def).node;
let var_t = ty::expr_ty(cx.tcx, ex);
for r: restrict in *sc {
// excludes variables introduced since the alias was made
// FIXME This does not work anymore, now that we have macros.
if my_defnum < r.block_defnum {
@ -399,7 +415,7 @@ fn check_var(cx: &ctx, ex: &@ast::expr, p: &ast::path, id: ast::node_id,
r.ok = val_taken(ex.span, p);
}
}
} else if (vec::member(my_defnum, r.bindings)) {
} else if vec::member(my_defnum, r.bindings) {
test_scope(cx, sc, r, p);
}
}
@ -411,7 +427,7 @@ fn check_lval(cx: &@ctx, dest: &@ast::expr, sc: &scope, v: &vt<scope>) {
let dnum = ast::def_id_of_def(cx.tcx.def_map.get(dest.id)).node;
if is_immutable_alias(*cx, sc, dnum) {
cx.tcx.sess.span_err(dest.span, "assigning to immutable alias");
} else if (is_immutable_objfield(*cx, dnum)) {
} else if is_immutable_objfield(*cx, dnum) {
cx.tcx.sess.span_err(dest.span,
"assigning to immutable obj field");
}
@ -425,9 +441,9 @@ fn check_lval(cx: &@ctx, dest: &@ast::expr, sc: &scope, v: &vt<scope>) {
let root = expr_root(*cx, dest, false);
if vec::len(*root.ds) == 0u {
cx.tcx.sess.span_err(dest.span, "assignment to non-lvalue");
} else if (!root.ds.(0).mut) {
} else if !root.ds[0].mut {
let name =
alt root.ds.(0).kind {
alt root.ds[0].kind {
unbox. { "box" }
field. { "field" }
index. { "vec content" }
@ -475,9 +491,7 @@ fn is_immutable_alias(cx: &ctx, sc: &scope, dnum: node_id) -> bool {
some(arg(ast::alias(false))) { ret true; }
_ { }
}
for r: restrict in *sc {
if vec::member(dnum, r.bindings) { ret true; }
}
for r: restrict in *sc { if vec::member(dnum, r.bindings) { ret true; } }
ret false;
}
@ -489,17 +503,18 @@ fn test_scope(cx: &ctx, sc: &scope, r: &restrict, p: &ast::path) {
let prob = r.ok;
for dep: uint in r.depends_on {
if prob != valid { break; }
prob = sc.(dep).ok;
prob = sc[dep].ok;
}
if prob != valid {
let msg = alt prob {
overwritten(sp, wpt) {
{span: sp, msg: "overwriting " + ast::path_name(wpt)}
}
val_taken(sp, vpt) {
{span: sp, msg: "taking the value of " + ast::path_name(vpt)}
}
};
let msg =
alt prob {
overwritten(sp, wpt) {
{span: sp, msg: "overwriting " + ast::path_name(wpt)}
}
val_taken(sp, vpt) {
{span: sp, msg: "taking the value of " + ast::path_name(vpt)}
}
};
cx.tcx.sess.span_err(msg.span,
msg.msg + " will invalidate alias " +
ast::path_name(p) + ", which is still used");
@ -508,10 +523,10 @@ fn test_scope(cx: &ctx, sc: &scope, r: &restrict, p: &ast::path) {
fn deps(sc: &scope, roots: &[node_id]) -> [uint] {
let i = 0u;
let result = ~[];
let result = [];
for r: restrict in *sc {
for dn: node_id in roots {
if vec::member(dn, r.bindings) { result += ~[i]; }
if vec::member(dn, r.bindings) { result += [i]; }
}
i += 1u;
}
@ -530,37 +545,37 @@ type deref = @{mut: bool, kind: deref_t, outer_t: ty::t};
fn expr_root(cx: &ctx, ex: @ast::expr, autoderef: bool) ->
{ex: @ast::expr, ds: @[deref]} {
fn maybe_auto_unbox(cx: &ctx, t: ty::t) -> {t: ty::t, ds: [deref]} {
let ds = ~[];
let ds = [];
while true {
alt ty::struct(cx.tcx, t) {
ty::ty_box(mt) {
ds += ~[@{mut: mt.mut != ast::imm, kind: unbox, outer_t: t}];
ds += [@{mut: mt.mut != ast::imm, kind: unbox, outer_t: t}];
t = mt.ty;
}
ty::ty_uniq(mt) {
ds += ~[@{mut: false, kind: unbox, outer_t: t}];
ds += [@{mut: false, kind: unbox, outer_t: t}];
}
ty::ty_res(_, inner, tps) {
ds += ~[@{mut: false, kind: unbox, outer_t: t}];
ds += [@{mut: false, kind: unbox, outer_t: t}];
t = ty::substitute_type_params(cx.tcx, tps, inner);
}
ty::ty_tag(did, tps) {
let variants = ty::tag_variants(cx.tcx, did);
if vec::len(variants) != 1u ||
vec::len(variants.(0).args) != 1u {
vec::len(variants[0].args) != 1u {
break;
}
ds += ~[@{mut: false, kind: unbox, outer_t: t}];
ds += [@{mut: false, kind: unbox, outer_t: t}];
t =
ty::substitute_type_params(cx.tcx, tps,
variants.(0).args.(0));
variants[0].args[0]);
}
_ { break; }
}
}
ret {t: t, ds: ds};
}
let ds: [deref] = ~[];
let ds: [deref] = [];
while true {
alt { ex.node } {
ast::expr_field(base, ident) {
@ -577,7 +592,7 @@ fn expr_root(cx: &ctx, ex: @ast::expr, autoderef: bool) ->
}
ty::ty_obj(_) { }
}
ds += ~[@{mut: mut, kind: field, outer_t: auto_unbox.t}];
ds += [@{mut: mut, kind: field, outer_t: auto_unbox.t}];
ds += auto_unbox.ds;
ex = base;
}
@ -586,9 +601,9 @@ fn expr_root(cx: &ctx, ex: @ast::expr, autoderef: bool) ->
alt ty::struct(cx.tcx, auto_unbox.t) {
ty::ty_vec(mt) {
ds +=
~[@{mut: mt.mut != ast::imm,
kind: index,
outer_t: auto_unbox.t}];
[@{mut: mt.mut != ast::imm,
kind: index,
outer_t: auto_unbox.t}];
}
}
ds += auto_unbox.ds;
@ -605,7 +620,7 @@ fn expr_root(cx: &ctx, ex: @ast::expr, autoderef: bool) ->
ty::ty_tag(_, _) { }
ty::ty_ptr(mt) { mut = mt.mut != ast::imm; }
}
ds += ~[@{mut: mut, kind: unbox, outer_t: base_t}];
ds += [@{mut: mut, kind: unbox, outer_t: base_t}];
ex = base;
} else { break; }
}
@ -631,9 +646,9 @@ fn inner_mut(ds: &@[deref]) -> option::t<ty::t> {
fn path_def(cx: &ctx, ex: &@ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: &ctx, ex: &@ast::expr) -> option::t<ast::def_id> {
@ -673,25 +688,23 @@ fn ty_can_unsafely_include(cx: &ctx, needle: ty::t, haystack: ty::t,
ret false;
}
ty::ty_tup(ts) {
for t in ts {
if helper(tcx, needle, t, mut) {
ret true;
}
}
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
// These may contain anything.
ty::ty_fn(_, _, _, _, _) {
ret true;
}
ty::ty_obj(_) { ret true; }
// A type param may include everything, but can only be
// treated as opaque downstream, and is thus safe unless we
// saw mutable fields, in which case the whole thing can be
// overwritten.
ty::ty_param(_,_) {
ty::ty_param(_, _) {
ret mut;
}
_ { ret false; }

View file

@ -60,14 +60,14 @@ fn new_smallintmap_int_adapter<@V>() -> std::map::hashmap<int, V> {
// interface.
// FIXME: hashmap and smallintmap should support the same interface.
fn new_smallintmap_adapter<@K,
@V>(key_idx: fn(&K) -> uint ,
idx_key: fn(&uint) -> K ) ->
@V>(key_idx: fn(&K) -> uint,
idx_key: fn(&uint) -> K) ->
std::map::hashmap<K, V> {
obj adapter<@K,
@V>(map: smallintmap::smallintmap<V>,
key_idx: fn(&K) -> uint ,
idx_key: fn(&uint) -> K ) {
key_idx: fn(&K) -> uint,
idx_key: fn(&uint) -> K) {
fn size() -> uint { fail }
@ -128,45 +128,44 @@ mod test {
#[test]
fn test_node_span_item() {
let expected: codemap::span = mk_sp(20u, 30u);
let node = node_item(@{ident: "test",
attrs: ~[],
id: 0,
node: item_mod({view_items: ~[],
items: ~[]}),
span: expected});
assert node_span(node) == expected;
let node =
node_item(@{ident: "test",
attrs: [],
id: 0,
node: item_mod({view_items: [], items: []}),
span: expected});
assert (node_span(node) == expected);
}
#[test]
fn test_node_span_obj_ctor() {
let expected: codemap::span = mk_sp(20u, 30u);
let node = node_obj_ctor(@{ident: "test",
attrs: ~[],
id: 0,
node: item_mod({view_items: ~[],
items: ~[]}),
span: expected});
assert node_span(node) == expected;
let node =
node_obj_ctor(@{ident: "test",
attrs: [],
id: 0,
node: item_mod({view_items: [], items: []}),
span: expected});
assert (node_span(node) == expected);
}
#[test]
fn test_node_span_native_item() {
let expected: codemap::span = mk_sp(20u, 30u);
let node = node_native_item(@{ident: "test",
attrs: ~[],
node: native_item_ty,
id: 0,
span: expected});
assert node_span(node) == expected;
let node =
node_native_item(@{ident: "test",
attrs: [],
node: native_item_ty,
id: 0,
span: expected});
assert (node_span(node) == expected);
}
#[test]
fn test_node_span_expr() {
let expected: codemap::span = mk_sp(20u, 30u);
let node = node_expr(@{id: 0,
node: expr_break,
span: expected});
assert node_span(node) == expected;
let node = node_expr(@{id: 0, node: expr_break, span: expected});
assert (node_span(node) == expected);
}
}

View file

@ -5,7 +5,7 @@ fn check_crate(tcx: &ty::ctxt, crate: &@crate) {
let v =
@{visit_expr: bind check_expr(tcx, _, _, _),
visit_local: bind check_local(tcx, _, _, _)
with *visit::default_visitor::<()>()};
with *visit::default_visitor::<()>()};
visit::visit_crate(*crate, (), visit::mk_vt(v));
tcx.sess.abort_if_errors();
}
@ -22,7 +22,7 @@ fn check_arms(tcx: &ty::ctxt, arms: &[arm]) {
let reachable = true;
let j = 0;
while j < i {
for prev_pat: @pat in arms.(j).pats {
for prev_pat: @pat in arms[j].pats {
if pattern_supersedes(tcx, prev_pat, arm_pat) {
reachable = false;
}
@ -38,11 +38,10 @@ fn check_arms(tcx: &ty::ctxt, arms: &[arm]) {
}
fn pattern_supersedes(tcx: &ty::ctxt, a: &@pat, b: &@pat) -> bool {
fn patterns_supersede(tcx: &ty::ctxt, as: &[@pat], bs: &[@pat]) ->
bool {
fn patterns_supersede(tcx: &ty::ctxt, as: &[@pat], bs: &[@pat]) -> bool {
let i = 0;
for a: @pat in as {
if !pattern_supersedes(tcx, a, bs.(i)) { ret false; }
if !pattern_supersedes(tcx, a, bs[i]) { ret false; }
i += 1;
}
ret true;
@ -119,19 +118,13 @@ fn is_refutable(tcx: &ty::ctxt, pat: &@pat) -> bool {
ret false;
}
pat_tup(elts) {
for elt in elts {
if is_refutable(tcx, elt) { ret true; }
}
for elt in elts { if is_refutable(tcx, elt) { ret true; } }
ret false;
}
pat_tag(_, args) {
let vdef = variant_def_ids(tcx.def_map.get(pat.id));
if std::vec::len(ty::tag_variants(tcx, vdef.tg)) != 1u {
ret true;
}
for p: @pat in args {
if is_refutable(tcx, p) { ret true; }
}
if std::vec::len(ty::tag_variants(tcx, vdef.tg)) != 1u { ret true; }
for p: @pat in args { if is_refutable(tcx, p) { ret true; } }
ret false;
}
}

View file

@ -38,59 +38,62 @@ type freevar_map = hashmap<ast::node_id, freevar_info>;
// of the AST, we take a walker function that we invoke with a visitor
// in order to start the search.
fn collect_freevars(def_map: &resolve::def_map, sess: &session::session,
walker: &fn(&visit::vt<()>) ,
walker: &fn(&visit::vt<()>),
initial_decls: [ast::node_id]) -> freevar_info {
let decls = new_int_hash();
for decl: ast::node_id in initial_decls { set_add(decls, decl); }
let refs = @mutable ~[];
let refs = @mutable [];
let walk_fn = lambda(f: &ast::_fn, _tps: &[ast::ty_param], _sp: &span,
_i: &ast::fn_ident, _nid: ast::node_id) {
for a: ast::arg in f.decl.inputs { set_add(decls, a.id); }
};
let walk_expr = lambda(expr: &@ast::expr) {
alt expr.node {
ast::expr_path(path) {
if !def_map.contains_key(expr.id) {
sess.span_fatal(expr.span,
"internal error in collect_freevars");
let walk_fn =
lambda (f: &ast::_fn, _tps: &[ast::ty_param], _sp: &span,
_i: &ast::fn_ident, _nid: ast::node_id) {
for a: ast::arg in f.decl.inputs { set_add(decls, a.id); }
};
let walk_expr =
lambda (expr: &@ast::expr) {
alt expr.node {
ast::expr_path(path) {
if !def_map.contains_key(expr.id) {
sess.span_fatal(expr.span,
"internal error in collect_freevars");
}
alt def_map.get(expr.id) {
ast::def_arg(did) { *refs += [expr.id]; }
ast::def_local(did) { *refs += [expr.id]; }
ast::def_binding(did) { *refs += [expr.id]; }
_ {/* no-op */ }
}
}
_ { }
}
alt def_map.get(expr.id) {
ast::def_arg(did) { *refs += ~[expr.id]; }
ast::def_local(did) { *refs += ~[expr.id]; }
ast::def_binding(did) { *refs += ~[expr.id]; }
_ {/* no-op */ }
};
let walk_local =
lambda (local: &@ast::local) {
for each b: @ast::pat in ast::pat_bindings(local.node.pat) {
set_add(decls, b.id);
}
}
_ { }
}
};
let walk_local = lambda(local: &@ast::local) {
for each b: @ast::pat in ast::pat_bindings(local.node.pat) {
set_add(decls, b.id);
}
};
let walk_pat = lambda(p: &@ast::pat) {
alt p.node { ast::pat_bind(_) { set_add(decls, p.id); } _ { } }
};
walker(visit::mk_simple_visitor
(@{visit_local: walk_local,
visit_pat: walk_pat,
visit_expr: walk_expr,
visit_fn: walk_fn
with *visit::default_simple_visitor()}));
};
let walk_pat =
lambda (p: &@ast::pat) {
alt p.node { ast::pat_bind(_) { set_add(decls, p.id); } _ { } }
};
walker(visit::mk_simple_visitor(@{visit_local: walk_local,
visit_pat: walk_pat,
visit_expr: walk_expr,
visit_fn: walk_fn
with
*visit::default_simple_visitor()}));
// Calculate (refs - decls). This is the set of captured upvars.
// We build a vec of the node ids of the uses and a set of the
// node ids of the definitions.
let canonical_refs = ~[];
let canonical_refs = [];
let defs = new_int_hash();
for ref_id_: ast::node_id in *refs {
let ref_id = ref_id_;
let def_id = ast::def_id_of_def(def_map.get(ref_id)).node;
if !decls.contains_key(def_id) && !defs.contains_key(def_id) {
canonical_refs += ~[ref_id];
canonical_refs += [ref_id];
set_add(defs, def_id);
}
}
@ -106,32 +109,34 @@ fn annotate_freevars(sess: &session::session, def_map: &resolve::def_map,
crate: &@ast::crate) -> freevar_map {
let freevars = new_int_hash();
let walk_fn = lambda(f: &ast::_fn, tps: &[ast::ty_param], sp: &span,
i: &ast::fn_ident, nid: ast::node_id) {
let start_walk = lambda(v: &visit::vt<()>) {
v.visit_fn(f, tps, sp, i, nid, (), v);
let walk_fn =
lambda (f: &ast::_fn, tps: &[ast::ty_param], sp: &span,
i: &ast::fn_ident, nid: ast::node_id) {
let start_walk =
lambda (v: &visit::vt<()>) {
v.visit_fn(f, tps, sp, i, nid, (), v);
};
let vars = collect_freevars(def_map, sess, start_walk, []);
freevars.insert(nid, vars);
};
let walk_expr =
lambda (expr: &@ast::expr) {
alt expr.node {
ast::expr_for_each(local, _, body) {
let start_walk =
lambda (v: &visit::vt<()>) {
v.visit_block(body, (), v);
};
let bound = ast::pat_binding_ids(local.node.pat);
let vars = collect_freevars(def_map, sess, start_walk, bound);
freevars.insert(body.node.id, vars);
}
_ { }
}
};
let vars = collect_freevars(def_map, sess, start_walk, ~[]);
freevars.insert(nid, vars);
};
let walk_expr = lambda(expr: &@ast::expr) {
alt expr.node {
ast::expr_for_each(local, _, body) {
let start_walk = lambda(v: &visit::vt<()>) {
v.visit_block(body, (), v);
};
let bound = ast::pat_binding_ids(local.node.pat);
let vars =
collect_freevars(def_map, sess, start_walk, bound);
freevars.insert(body.node.id, vars);
}
_ { }
}
};
let visitor =
visit::mk_simple_visitor(@{visit_fn: walk_fn,
visit_expr: walk_expr
visit::mk_simple_visitor(@{visit_fn: walk_fn, visit_expr: walk_expr
with *visit::default_simple_visitor()});
visit::visit_crate(*crate, (), visitor);

View file

@ -16,15 +16,13 @@ import std::vec;
import lll = lib::llvm::llvm;
type ctxt = @{ mutable next_tydesc_num: uint };
type ctxt = @{mutable next_tydesc_num: uint};
fn mk_ctxt() -> ctxt {
ret @{ mutable next_tydesc_num: 0u };
}
fn mk_ctxt() -> ctxt { ret @{mutable next_tydesc_num: 0u}; }
fn add_global(ccx: &@crate_ctxt, llval: ValueRef, name: str) -> ValueRef {
let llglobal = lll::LLVMAddGlobal(ccx.llmod, val_ty(llval),
str::buf(name));
let llglobal =
lll::LLVMAddGlobal(ccx.llmod, val_ty(llval), str::buf(name));
lll::LLVMSetInitializer(llglobal, llval);
lll::LLVMSetGlobalConstant(llglobal, True);
ret llglobal;
@ -33,7 +31,7 @@ fn add_global(ccx: &@crate_ctxt, llval: ValueRef, name: str) -> ValueRef {
fn add_gc_root(cx: &@block_ctxt, llval: ValueRef, ty: ty::t) -> @block_ctxt {
let bcx = cx;
if !type_is_gc_relevant(bcx_tcx(cx), ty) ||
ty::type_has_dynamic_size(bcx_tcx(cx), ty) {
ty::type_has_dynamic_size(bcx_tcx(cx), ty) {
ret bcx;
}
@ -52,48 +50,45 @@ fn add_gc_root(cx: &@block_ctxt, llval: ValueRef, ty: ty::t) -> @block_ctxt {
let llvalptr = bcx.build.PointerCast(llval, T_ptr(T_ptr(T_i8())));
alt td_r.kind {
tk_derived. {
// It's a derived type descriptor. First, spill it.
let lltydescptr = trans::alloca(bcx, val_ty(lltydesc));
bcx.build.Store(lltydesc, lltydescptr);
tk_derived. {
// It's a derived type descriptor. First, spill it.
let lltydescptr = trans::alloca(bcx, val_ty(lltydesc));
bcx.build.Store(lltydesc, lltydescptr);
let number = gc_cx.next_tydesc_num;
gc_cx.next_tydesc_num += 1u;
let number = gc_cx.next_tydesc_num;
gc_cx.next_tydesc_num += 1u;
let lldestindex = add_global(bcx_ccx(bcx),
C_struct(~[C_int(0),
C_uint(number)]),
"rust_gc_tydesc_dest_index");
let llsrcindex = add_global(bcx_ccx(bcx),
C_struct(~[C_int(1), C_uint(number)]),
"rust_gc_tydesc_src_index");
let lldestindex =
add_global(bcx_ccx(bcx), C_struct([C_int(0), C_uint(number)]),
"rust_gc_tydesc_dest_index");
let llsrcindex =
add_global(bcx_ccx(bcx), C_struct([C_int(1), C_uint(number)]),
"rust_gc_tydesc_src_index");
lldestindex = lll::LLVMConstPointerCast(lldestindex,
T_ptr(T_i8()));
llsrcindex = lll::LLVMConstPointerCast(llsrcindex,
T_ptr(T_i8()));
lldestindex = lll::LLVMConstPointerCast(lldestindex, T_ptr(T_i8()));
llsrcindex = lll::LLVMConstPointerCast(llsrcindex, T_ptr(T_i8()));
lltydescptr = bcx.build.PointerCast(lltydescptr,
T_ptr(T_ptr(T_i8())));
lltydescptr =
bcx.build.PointerCast(lltydescptr, T_ptr(T_ptr(T_i8())));
bcx.build.Call(gcroot, ~[ lltydescptr, lldestindex ]);
bcx.build.Call(gcroot, ~[ llvalptr, llsrcindex ]);
}
tk_param. {
bcx_tcx(cx).sess.bug("we should never be trying to root values " +
"of a type parameter");
}
tk_static. {
// Static type descriptor.
bcx.build.Call(gcroot, [lltydescptr, lldestindex]);
bcx.build.Call(gcroot, [llvalptr, llsrcindex]);
}
tk_param. {
bcx_tcx(cx).sess.bug("we should never be trying to root values " +
"of a type parameter");
}
tk_static. {
// Static type descriptor.
let llstaticgcmeta = add_global(bcx_ccx(bcx),
C_struct(~[C_int(2), lltydesc]),
"rust_gc_tydesc_static_gc_meta");
let llstaticgcmetaptr = lll::LLVMConstPointerCast(llstaticgcmeta,
T_ptr(T_i8()));
let llstaticgcmeta =
add_global(bcx_ccx(bcx), C_struct([C_int(2), lltydesc]),
"rust_gc_tydesc_static_gc_meta");
let llstaticgcmetaptr =
lll::LLVMConstPointerCast(llstaticgcmeta, T_ptr(T_i8()));
bcx.build.Call(gcroot, ~[ llvalptr, llstaticgcmetaptr ]);
}
bcx.build.Call(gcroot, [llvalptr, llstaticgcmetaptr]);
}
}
ret bcx;
@ -101,45 +96,52 @@ fn add_gc_root(cx: &@block_ctxt, llval: ValueRef, ty: ty::t) -> @block_ctxt {
fn type_is_gc_relevant(cx: &ty::ctxt, ty: &ty::t) -> bool {
alt ty::struct(cx, ty) {
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int. |
ty::ty_float. | ty::ty_uint. | ty::ty_machine(_) | ty::ty_char. |
ty::ty_istr. | ty::ty_type. | ty::ty_native(_) | ty::ty_ptr(_) |
ty::ty_type. | ty::ty_native(_) {
ret false;
}
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int. | ty::ty_float. |
ty::ty_uint. | ty::ty_machine(_) | ty::ty_char. | ty::ty_istr. |
ty::ty_type. | ty::ty_native(_) | ty::ty_ptr(_) | ty::ty_type. |
ty::ty_native(_) {
ret false;
}
ty::ty_rec(fields) {
for f in fields {
if type_is_gc_relevant(cx, f.mt.ty) { ret true; }
ty::ty_rec(fields) {
for f in fields { if type_is_gc_relevant(cx, f.mt.ty) { ret true; } }
ret false;
}
ty::ty_tup(elts) {
for elt in elts { if type_is_gc_relevant(cx, elt) { ret true; } }
ret false;
}
ty::ty_tag(did, tps) {
let variants = ty::tag_variants(cx, did);
for variant in variants {
for aty in variant.args {
let arg_ty = ty::substitute_type_params(cx, tps, aty);
if type_is_gc_relevant(cx, arg_ty) { ret true; }
}
ret false;
}
ty::ty_tup(elts) {
for elt in elts {
if type_is_gc_relevant(cx, elt) { ret true; }
}
ret false;
}
ret false;
}
ty::ty_tag(did, tps) {
let variants = ty::tag_variants(cx, did);
for variant in variants {
for aty in variant.args {
let arg_ty = ty::substitute_type_params(cx, tps, aty);
if type_is_gc_relevant(cx, arg_ty) { ret true; }
}
}
ret false;
}
ty::ty_vec(tm) { ret type_is_gc_relevant(cx, tm.ty); }
ty::ty_constr(sub, _) { ret type_is_gc_relevant(cx, sub); }
ty::ty_vec(tm) {
ret type_is_gc_relevant(cx, tm.ty);
}
ty::ty_constr(sub, _) { ret type_is_gc_relevant(cx, sub); }
ty::ty_str. | ty::ty_box(_) | ty::ty_uniq(_) |
ty::ty_fn(_,_,_,_,_) | ty::ty_native_fn(_,_,_) | ty::ty_obj(_) |
ty::ty_param(_,_) | ty::ty_res(_,_,_) { ret true; }
ty::ty_var(_) { fail "ty_var in type_is_gc_relevant"; }
ty::ty_str. | ty::ty_box(_) | ty::ty_uniq(_) | ty::ty_fn(_, _, _, _, _)
| ty::ty_native_fn(_, _, _) | ty::ty_obj(_) | ty::ty_param(_, _) |
ty::ty_res(_, _, _) {
ret true;
}
ty::ty_var(_) {
fail "ty_var in type_is_gc_relevant";
}
}
}

View file

@ -101,35 +101,30 @@ fn kind_to_str(k: kind) -> str {
}
}
fn type_and_kind(tcx: &ty::ctxt, e: &@ast::expr)
-> {ty: ty::t, kind: ast::kind} {
fn type_and_kind(tcx: &ty::ctxt, e: &@ast::expr) ->
{ty: ty::t, kind: ast::kind} {
let t = ty::expr_ty(tcx, e);
let k = ty::type_kind(tcx, t);
{ty: t, kind: k}
}
fn need_expr_kind(tcx: &ty::ctxt, e: &@ast::expr,
k_need: ast::kind, descr: &str) {
fn need_expr_kind(tcx: &ty::ctxt, e: &@ast::expr, k_need: ast::kind,
descr: &str) {
let tk = type_and_kind(tcx, e);
log #fmt("for %s: want %s type, got %s type %s",
descr,
kind_to_str(k_need),
kind_to_str(tk.kind),
util::ppaux::ty_to_str(tcx, tk.ty));
log #fmt["for %s: want %s type, got %s type %s", descr,
kind_to_str(k_need), kind_to_str(tk.kind),
util::ppaux::ty_to_str(tcx, tk.ty)];
if ! kind_lteq(k_need, tk.kind) {
if !kind_lteq(k_need, tk.kind) {
let s =
#fmt("mismatched kinds for %s: needed %s type, got %s type %s",
descr,
kind_to_str(k_need),
kind_to_str(tk.kind),
util::ppaux::ty_to_str(tcx, tk.ty));
#fmt["mismatched kinds for %s: needed %s type, got %s type %s",
descr, kind_to_str(k_need), kind_to_str(tk.kind),
util::ppaux::ty_to_str(tcx, tk.ty)];
tcx.sess.span_err(e.span, s);
}
}
fn need_shared_lhs_rhs(tcx: &ty::ctxt,
a: &@ast::expr, b: &@ast::expr,
fn need_shared_lhs_rhs(tcx: &ty::ctxt, a: &@ast::expr, b: &@ast::expr,
op: &str) {
need_expr_kind(tcx, a, ast::kind_shared, op + " lhs");
need_expr_kind(tcx, b, ast::kind_shared, op + " rhs");
@ -142,6 +137,7 @@ fn check_expr(tcx: &ty::ctxt, e: &@ast::expr) {
ast::expr_swap(a, b) { need_shared_lhs_rhs(tcx, a, b, "<->"); }
ast::expr_call(callee, _) {
let tpt = ty::expr_ty_params_and_ty(tcx, callee);
// If we have typarams, we're calling an item; we need to check
// that all the types we're supplying as typarams conform to the
// typaram kind constraints on that item.
@ -149,17 +145,16 @@ fn check_expr(tcx: &ty::ctxt, e: &@ast::expr) {
let callee_def = ast::def_id_of_def(tcx.def_map.get(callee.id));
let item_tk = ty::lookup_item_type(tcx, callee_def);
let i = 0;
assert vec::len(item_tk.kinds) == vec::len(tpt.params);
assert (vec::len(item_tk.kinds) == vec::len(tpt.params));
for k_need: ast::kind in item_tk.kinds {
let t = tpt.params.(i);
let t = tpt.params[i];
let k = ty::type_kind(tcx, t);
if ! kind_lteq(k_need, k) {
let s = #fmt("mismatched kinds for typaram %d: \
if !kind_lteq(k_need, k) {
let s =
#fmt["mismatched kinds for typaram %d: \
needed %s type, got %s type %s",
i,
kind_to_str(k_need),
kind_to_str(k),
util::ppaux::ty_to_str(tcx, t));
i, kind_to_str(k_need), kind_to_str(k),
util::ppaux::ty_to_str(tcx, t)];
tcx.sess.span_err(e.span, s);
}
i += 1;
@ -171,9 +166,9 @@ fn check_expr(tcx: &ty::ctxt, e: &@ast::expr) {
}
fn check_crate(tcx: &ty::ctxt, crate: &@ast::crate) {
let visit = visit::mk_simple_visitor
(@{visit_expr: bind check_expr(tcx, _)
with *visit::default_simple_visitor()});
let visit =
visit::mk_simple_visitor(@{visit_expr: bind check_expr(tcx, _)
with *visit::default_simple_visitor()});
visit::visit_crate(*crate, (), visit);
tcx.sess.abort_if_errors();
}

View file

@ -65,9 +65,11 @@ tag import_state {
todo(ast::node_id, ast::ident, [ast::ident], codemap::span, scopes);
resolving(span);
resolved(option::t<def>,
/* value */
/* value */
option::t<def>,
/* type */
/* type */
option::t<def>); /* module */
}
@ -135,7 +137,7 @@ tag dir { inside; outside; }
tag namespace { ns_value; ns_type; ns_module; }
fn resolve_crate(sess: session, amap: &ast_map::map, crate: @ast::crate) ->
{def_map: def_map, ext_map: ext_map} {
{def_map: def_map, ext_map: ext_map} {
let e =
@{cstore: sess.get_cstore(),
def_map: new_int_hash::<def>(),
@ -144,7 +146,7 @@ fn resolve_crate(sess: session, amap: &ast_map::map, crate: @ast::crate) ->
mod_map: new_int_hash::<@indexed_mod>(),
ext_map: new_def_hash::<[ident]>(),
ext_cache: new_ext_hash(),
mutable reported: ~[],
mutable reported: [],
sess: sess};
map_crate(e, crate);
resolve_imports(*e);
@ -169,7 +171,7 @@ fn map_crate(e: &@env, c: &@ast::crate) {
e.mod_map.insert(-1,
@{m: some(c.node.module),
index: index_mod(c.node.module),
mutable glob_imports: ~[],
mutable glob_imports: [],
glob_imported_names: new_str_hash::<import_state>()});
fn index_vi(e: @env, i: &@ast::view_item, sc: &scopes, _v: &vt<scopes>) {
alt i.node {
@ -180,8 +182,8 @@ fn map_crate(e: &@env, c: &@ast::crate) {
for ident in idents {
e.imports.insert(ident.node.id,
todo(ident.node.id, ident.node.name,
mod_path + ~[ident.node.name],
ident.span, sc));
mod_path + [ident.node.name],
ident.span, sc));
}
}
_ { }
@ -195,7 +197,7 @@ fn map_crate(e: &@env, c: &@ast::crate) {
e.mod_map.insert(i.id,
@{m: some(md),
index: index_mod(md),
mutable glob_imports: ~[],
mutable glob_imports: [],
glob_imported_names: s});
}
ast::item_native_mod(nmd) {
@ -203,7 +205,7 @@ fn map_crate(e: &@env, c: &@ast::crate) {
e.mod_map.insert(i.id,
@{m: none::<ast::_mod>,
index: index_nmod(nmd),
mutable glob_imports: ~[],
mutable glob_imports: [],
glob_imported_names: s});
}
_ { }
@ -237,12 +239,13 @@ fn map_crate(e: &@env, c: &@ast::crate) {
}
alt vi.node {
//if it really is a glob import, that is
ast::view_item_import_glob(path, _) {
let imp = follow_import(*e, sc, path, vi.span);
if option::is_some(imp) {
find_mod(e, sc).glob_imports +=
~[{def: option::get(imp), item: vi}];
[{def: option::get(imp), item: vi}];
}
}
_ { }
@ -255,8 +258,7 @@ fn resolve_imports(e: &env) {
{
alt it.val {
todo(node_id, name, path, span, scopes) {
resolve_import(e, local_def(node_id),
name, path, span, scopes);
resolve_import(e, local_def(node_id), name, path, span, scopes);
}
resolved(_, _, _) { }
}
@ -318,8 +320,8 @@ fn resolve_names(e: &@env, c: &@ast::crate) {
e.def_map.insert(pat.id, option::get(fnd));
}
_ {
e.sess.span_err
(p.span, "not a tag variant: " + ast::path_name(p));
e.sess.span_err(p.span,
"not a tag variant: " + ast::path_name(p));
}
}
}
@ -349,7 +351,7 @@ fn visit_fn_with_scope(e: &@env, f: &ast::_fn, tp: &[ast::ty_param],
// is this a main fn declaration?
alt name {
some(nm) {
if is_main_name(~[nm]) && !e.sess.get_opts().library {
if is_main_name([nm]) && !e.sess.get_opts().library {
// This is a main function -- set it in the session
// as the main ID
e.sess.set_main_id(id);
@ -361,9 +363,7 @@ fn visit_fn_with_scope(e: &@env, f: &ast::_fn, tp: &[ast::ty_param],
// here's where we need to set up the mapping
// for f's constrs in the table.
for c: @ast::constr in f.decl.constraints {
resolve_constr(e, c, sc, v);
}
for c: @ast::constr in f.decl.constraints { resolve_constr(e, c, sc, v); }
visit::visit_fn(f, tp, sp, name, id,
cons(scope_fn(f.decl, f.proto, tp), @sc), v);
}
@ -372,24 +372,22 @@ fn visit_block_with_scope(b: &ast::blk, sc: &scopes, v: &vt<scopes>) {
let pos = @mutable 0u, loc = @mutable 0u;
let block_sc = cons(scope_block(b, pos, loc), @sc);
for stmt in b.node.stmts {
v.visit_stmt(stmt, block_sc, v);
*pos += 1u;
v.visit_stmt(stmt, block_sc, v);;
*pos += 1u;;
*loc = 0u;
}
visit::visit_expr_opt(b.node.expr, block_sc, v);
}
fn visit_decl_with_scope(d: &@decl, sc: &scopes, v: &vt<scopes>) {
let loc_pos = alt list::car(sc) {
scope_block(_, _, pos) { pos }
_ { @mutable 0u }
};
let loc_pos =
alt list::car(sc) {
scope_block(_, _, pos) { pos }
_ { @mutable 0u }
};
alt d.node {
decl_local(locs) {
for loc in locs {
v.visit_local(loc, sc, v);
*loc_pos += 1u;
}
for loc in locs { v.visit_local(loc, sc, v);; *loc_pos += 1u; }
}
decl_item(it) { v.visit_item(it, sc, v); }
}
@ -408,7 +406,7 @@ fn visit_expr_with_scope(x: &@ast::expr, sc: &scopes, v: &vt<scopes>) {
v.visit_block(blk, new_sc, v);
}
ast::expr_fn(f) {
visit::visit_expr(x, cons(scope_fn(f.decl, f.proto, ~[]), @sc), v);
visit::visit_expr(x, cons(scope_fn(f.decl, f.proto, []), @sc), v);
}
_ { visit::visit_expr(x, sc, v); }
}
@ -417,12 +415,12 @@ fn visit_expr_with_scope(x: &@ast::expr, sc: &scopes, v: &vt<scopes>) {
fn follow_import(e: &env, sc: &scopes, path: &[ident], sp: &span) ->
option::t<def> {
let path_len = vec::len(path);
let dcur = lookup_in_scope_strict(e, sc, sp, path.(0), ns_module);
let dcur = lookup_in_scope_strict(e, sc, sp, path[0], ns_module);
let i = 1u;
while true && option::is_some(dcur) {
if i == path_len { break; }
dcur =
lookup_in_mod_strict(e, sc, option::get(dcur), sp, path.(i),
lookup_in_mod_strict(e, sc, option::get(dcur), sp, path[i],
ns_module, outside);
i += 1u;
}
@ -461,23 +459,22 @@ fn resolve_import(e: &env, defid: ast::def_id, name: &ast::ident,
ids: &[ast::ident], sp: &codemap::span, sc_in: &scopes) {
e.imports.insert(defid.node, resolving(sp));
let n_idents = vec::len(ids);
let end_id = ids.(n_idents - 1u);
let end_id = ids[n_idents - 1u];
// Ignore the current scope if this import would shadow itself.
let sc =
if str::eq(name, ids.(0)) { std::list::cdr(sc_in) } else { sc_in };
if str::eq(name, ids[0]) { std::list::cdr(sc_in) } else { sc_in };
if n_idents == 1u {
register(e, defid, sp, end_id, sc_in,
lookup_in_scope(e, sc, sp, end_id, ns_value),
lookup_in_scope(e, sc, sp, end_id, ns_type),
lookup_in_scope(e, sc, sp, end_id, ns_module));
remove_if_unresolved(e.imports, defid.node);
} else {
let // FIXME (issue #521)
dcur =
alt lookup_in_scope(e, sc, sp, ids.(0), ns_module) {
} else { // FIXME (issue #521)
let dcur =
alt lookup_in_scope(e, sc, sp, ids[0], ns_module) {
some(dcur) { dcur }
none. {
unresolved_err(e, sc, sp, ids.(0), ns_name(ns_module));
unresolved_err(e, sc, sp, ids[0], ns_name(ns_module));
remove_if_unresolved(e.imports, defid.node);
ret ()
}
@ -488,20 +485,18 @@ fn resolve_import(e: &env, defid: ast::def_id, name: &ast::ident,
register(e, defid, sp, end_id, sc_in,
lookup_in_mod(e, dcur, sp, end_id, ns_value,
outside),
lookup_in_mod(e, dcur, sp, end_id, ns_type,
outside),
lookup_in_mod(e, dcur, sp, end_id, ns_type, outside),
lookup_in_mod(e, dcur, sp, end_id, ns_module,
outside));
remove_if_unresolved(e.imports, defid.node);
break;
} else {
dcur =
alt lookup_in_mod(e, dcur, sp, ids.(i), ns_module,
outside) {
alt lookup_in_mod(e, dcur, sp, ids[i], ns_module, outside)
{
some(dcur) { dcur }
none. {
unresolved_err(e, sc, sp, ids.(i),
ns_name(ns_module));
unresolved_err(e, sc, sp, ids[i], ns_name(ns_module));
remove_if_unresolved(e.imports, defid.node);
ret () // FIXME (issue #521)
}
@ -563,7 +558,7 @@ fn unresolved_err(e: &env, sc: &scopes, sp: &span, name: &ident, kind: &str) {
for rs: {ident: str, sc: scope} in e.reported {
if str::eq(rs.ident, name) && err_scope == rs.sc { ret; }
}
e.reported += ~[{ident: name, sc: err_scope}];
e.reported += [{ident: name, sc: err_scope}];
e.sess.span_err(sp, mk_unresolved_msg(name, kind));
}
@ -572,7 +567,7 @@ fn unresolved_fatal(e: &env, sp: &span, id: &ident, kind: &str) -> ! {
}
fn mk_unresolved_msg(id: &ident, kind: &str) -> str {
ret #fmt("unresolved %s: %s", kind, id);
ret #fmt["unresolved %s: %s", kind, id];
}
// Lookup helpers
@ -587,13 +582,13 @@ fn lookup_path_strict(e: &env, sc: &scopes, sp: &span, pth: &ast::path_,
} else { first_scope = sc; }
let dcur =
lookup_in_scope_strict(e, first_scope, sp, pth.idents.(0), headns);
lookup_in_scope_strict(e, first_scope, sp, pth.idents[0], headns);
let i = 1u;
while i < n_idents && option::is_some(dcur) {
let curns = if n_idents == i + 1u { ns } else { ns_module };
dcur =
lookup_in_mod_strict(e, sc, option::get(dcur), sp, pth.idents.(i),
lookup_in_mod_strict(e, sc, option::get(dcur), sp, pth.idents[i],
curns, outside);
i += 1u;
}
@ -630,7 +625,7 @@ fn def_is_obj_field(d: &def) -> bool {
}
fn def_is_ty_arg(d: &def) -> bool {
ret alt d { ast::def_ty_arg(_,_) { true } _ { false } };
ret alt d { ast::def_ty_arg(_, _) { true } _ { false } };
}
fn lookup_in_scope(e: &env, sc: scopes, sp: &span, name: &ident,
@ -675,7 +670,7 @@ fn lookup_in_scope(e: &env, sc: scopes, sp: &span, name: &ident,
if ns == ns_value {
alt lookup_in_pat(name, local.node.pat) {
some(did) { ret some(ast::def_local(did)); }
_ {}
_ { }
}
}
}
@ -685,7 +680,7 @@ fn lookup_in_scope(e: &env, sc: scopes, sp: &span, name: &ident,
scope_arm(a) {
if ns == ns_value {
ret option::map(ast::def_binding,
lookup_in_pat(name, a.pats.(0)));
lookup_in_pat(name, a.pats[0]));
}
}
}
@ -736,7 +731,7 @@ fn lookup_in_ty_params(name: &ident, ty_params: &[ast::ty_param]) ->
option::t<def> {
let i = 0u;
for tp: ast::ty_param in ty_params {
if str::eq(tp.ident, name) { ret some(ast::def_ty_arg(i,tp.kind)); }
if str::eq(tp.ident, name) { ret some(ast::def_ty_arg(i, tp.kind)); }
i += 1u;
}
ret none::<def>;
@ -746,9 +741,7 @@ fn lookup_in_pat(name: &ident, pat: &@ast::pat) -> option::t<def_id> {
let found = none;
for each bound in ast::pat_bindings(pat) {
let p_name = alt bound.node { ast::pat_bind(n) { n } };
if str::eq(p_name, name) {
found = some(local_def(bound.id));
}
if str::eq(p_name, name) { found = some(local_def(bound.id)); }
}
ret found;
}
@ -791,7 +784,7 @@ fn lookup_in_block(name: &ident, b: &ast::blk_, pos: uint, loc_pos: uint,
let i = vec::len(b.stmts);
while i > 0u {
i -= 1u;
let st = b.stmts.(i);
let st = b.stmts[i];
alt st.node {
ast::stmt_decl(d, _) {
alt d.node {
@ -800,11 +793,11 @@ fn lookup_in_block(name: &ident, b: &ast::blk_, pos: uint, loc_pos: uint,
let j = vec::len(locs);
while j > 0u {
j -= 1u;
let loc = locs.(j);
let loc = locs[j];
if ns == ns_value && (i < pos || j < loc_pos) {
alt lookup_in_pat(name, loc.node.pat) {
some(did) { ret some(ast::def_local(did)); }
_ {}
_ { }
}
}
}
@ -817,7 +810,7 @@ fn lookup_in_block(name: &ident, b: &ast::blk_, pos: uint, loc_pos: uint,
if str::eq(it.ident, name) {
ret some(ast::def_ty(local_def(it.id)));
}
} else if (ns == ns_value) {
} else if ns == ns_value {
for v: ast::variant in variants {
if str::eq(v.node.name, name) {
let i = v.node.id;
@ -904,7 +897,7 @@ fn lookup_in_mod(e: &env, m: &def, sp: &span, name: &ident, ns: namespace,
let cached = e.ext_cache.find({did: defid, ident: name, ns: ns});
if !is_none(cached) { ret cached; }
let path = ~[name];
let path = [name];
if defid.node != -1 { path = e.ext_map.get(defid) + path; }
let fnd = lookup_external(e, defid.crate, path, ns);
if !is_none(fnd) {
@ -923,8 +916,7 @@ fn lookup_in_mod(e: &env, m: &def, sp: &span, name: &ident, ns: namespace,
}
}
fn found_view_item(e: &env, vi: @ast::view_item) ->
option::t<def> {
fn found_view_item(e: &env, vi: @ast::view_item) -> option::t<def> {
alt vi.node {
ast::view_item_use(_, _, id) {
let cnum = cstore::get_use_stmt_cnum(e.cstore, id);
@ -936,8 +928,7 @@ fn found_view_item(e: &env, vi: @ast::view_item) ->
fn lookup_import(e: &env, defid: def_id, ns: namespace) -> option::t<def> {
alt e.imports.get(defid.node) {
todo(node_id, name, path, span, scopes) {
resolve_import(e, local_def(node_id),
name, path, span, scopes);
resolve_import(e, local_def(node_id), name, path, span, scopes);
ret lookup_import(e, defid, ns);
}
resolving(sp) { e.sess.span_err(sp, "cyclic import"); ret none; }
@ -998,15 +989,15 @@ fn lookup_glob_in_mod(e: &env, info: @indexed_mod, sp: &span, id: &ident,
let matches =
vec::filter_map(bind lookup_in_mod_(e, _, sp, id, ns, dr),
{ info.glob_imports });
{ info.glob_imports });
if vec::len(matches) == 0u {
ret none;
} else if (vec::len(matches) == 1u) {
ret some(matches.(0).def);
} else if vec::len(matches) == 1u {
ret some(matches[0].def);
} else {
for match: glob_imp_def in matches {
let sp = match.item.span;
e.sess.span_note(sp, #fmt("'%s' is imported here", id));
e.sess.span_note(sp, #fmt["'%s' is imported here", id]);
}
e.sess.span_fatal(sp,
"'" + id + "' is glob-imported from" +
@ -1051,7 +1042,7 @@ fn lookup_in_mie(e: &env, mie: &mod_index_entry, ns: namespace) ->
alt item.node {
ast::item_tag(variants, _) {
if ns == ns_value {
let vid = variants.(variant_idx).node.id;
let vid = variants[variant_idx].node.id;
ret some(ast::def_variant(local_def(item.id),
local_def(vid)));
} else { ret none::<def>; }
@ -1090,15 +1081,16 @@ fn index_mod(md: &ast::_mod) -> mod_index {
let index = new_str_hash::<list<mod_index_entry>>();
for it: @ast::view_item in md.view_items {
alt it.node {
ast::view_item_use(ident, _, _)
{
ast::view_item_use(ident, _, _) {
add_to_index(index, ident, mie_view_item(it));
}
ast::view_item_import(ident, _, id) {
add_to_index(index, ident, mie_import_ident(id, it.span));
}
ast::view_item_import_from(_, idents, _) {
for ident in idents {
add_to_index(index, ident.node.name,
@ -1106,6 +1098,7 @@ fn index_mod(md: &ast::_mod) -> mod_index {
}
}
//globbed imports have to be resolved lazily.
ast::view_item_import_glob(_, _) | ast::view_item_export(_, _) {
}
@ -1191,20 +1184,19 @@ fn lookup_external(e: &env, cnum: int, ids: &[ident], ns: namespace) ->
fn check_for_collisions(e: &@env, c: &ast::crate) {
// Module indices make checking those relatively simple -- just check each
// name for multiple entities in the same namespace.
for each m: @{key: ast::node_id, val: @indexed_mod}
in e.mod_map.items() {
for each name: @{key: ident, val: list<mod_index_entry>}
in m.val.index.items() {
for each m: @{key: ast::node_id, val: @indexed_mod} in e.mod_map.items() {
for each name: @{key: ident, val: list<mod_index_entry>} in
m.val.index.items() {
check_mod_name(*e, name.key, name.val);
}
}
// Other scopes have to be checked the hard way.
let v = @{visit_item: bind check_item(e, _, _, _),
visit_block: bind check_block(e, _, _, _),
visit_arm: bind check_arm(e, _, _, _),
visit_expr: bind check_expr(e, _, _, _),
visit_ty: bind check_ty(e, _, _, _)
with *visit::default_visitor()};
let v =
@{visit_item: bind check_item(e, _, _, _),
visit_block: bind check_block(e, _, _, _),
visit_arm: bind check_arm(e, _, _, _),
visit_expr: bind check_expr(e, _, _, _),
visit_ty: bind check_ty(e, _, _, _) with *visit::default_visitor()};
visit::visit_crate(c, (), visit::mk_vt(v));
}
@ -1252,16 +1244,16 @@ fn mie_span(mie: &mod_index_entry) -> span {
fn check_item(e: &@env, i: &@ast::item, x: &(), v: &vt<()>) {
fn typaram_names(tps: &[ast::ty_param]) -> [ident] {
let x: [ast::ident] = ~[];
for tp: ast::ty_param in tps { x += ~[tp.ident] }
let x: [ast::ident] = [];
for tp: ast::ty_param in tps { x += [tp.ident] }
ret x;
}
visit::visit_item(i, x, v);
alt i.node {
ast::item_fn(f, ty_params) {
check_fn(*e, i.span, f);
ensure_unique(*e, i.span, typaram_names(ty_params),
ident_id, "type parameter");
ensure_unique(*e, i.span, typaram_names(ty_params), ident_id,
"type parameter");
}
ast::item_obj(ob, ty_params, _) {
fn field_name(field: &ast::obj_field) -> ident { ret field.ident; }
@ -1269,12 +1261,12 @@ fn check_item(e: &@env, i: &@ast::item, x: &(), v: &vt<()>) {
for m: @ast::method in ob.methods {
check_fn(*e, m.span, m.node.meth);
}
ensure_unique(*e, i.span, typaram_names(ty_params),
ident_id, "type parameter");
ensure_unique(*e, i.span, typaram_names(ty_params), ident_id,
"type parameter");
}
ast::item_tag(_, ty_params) {
ensure_unique(*e, i.span, typaram_names(ty_params),
ident_id, "type parameter");
ensure_unique(*e, i.span, typaram_names(ty_params), ident_id,
"type parameter");
}
_ { }
}
@ -1290,25 +1282,25 @@ fn check_pat(ch: checker, p: &@ast::pat) {
fn check_arm(e: &@env, a: &ast::arm, x: &(), v: &vt<()>) {
visit::visit_arm(a, x, v);
let ch0 = checker(*e, "binding");
check_pat(ch0, a.pats.(0));
check_pat(ch0, a.pats[0]);
let seen0 = ch0.seen;
let i = vec::len(a.pats);
while i > 1u {
i -= 1u;
let ch = checker(*e, "binding");
check_pat(ch, a.pats.(i));
check_pat(ch, a.pats[i]);
// Ensure the bindings introduced in this pattern are the same as in
// the first pattern.
if vec::len(ch.seen) != vec::len(seen0) {
e.sess.span_err(a.pats.(i).span,
e.sess.span_err(a.pats[i].span,
"inconsistent number of bindings");
} else {
for name: ident in ch.seen {
if is_none(vec::find(bind str::eq(name, _), seen0)) {
// Fight the alias checker
let name_ = name;
e.sess.span_err(a.pats.(i).span,
e.sess.span_err(a.pats[i].span,
"binding " + name_ +
" does not occur in first pattern");
}
@ -1395,7 +1387,7 @@ fn check_ty(e: &@env, ty: &@ast::ty, x: &(), v: &vt<()>) {
type checker = @{mutable seen: [ident], kind: str, sess: session};
fn checker(e: &env, kind: str) -> checker {
let seen: [ident] = ~[];
let seen: [ident] = [];
ret @{mutable seen: seen, kind: kind, sess: e.sess};
}
@ -1408,12 +1400,12 @@ fn check_name(ch: &checker, sp: &span, name: &ident) {
}
fn add_name(ch: &checker, sp: &span, name: &ident) {
check_name(ch, sp, name);
ch.seen += ~[name];
ch.seen += [name];
}
fn ident_id(i: &ident) -> ident { ret i; }
fn ensure_unique<T>(e: &env, sp: &span, elts: &[T], id: fn(&T) -> ident ,
fn ensure_unique<T>(e: &env, sp: &span, elts: &[T], id: fn(&T) -> ident,
kind: &str) {
let ch = checker(e, kind);
for elt: T in elts { add_name(ch, sp, id(elt)); }

View file

@ -34,61 +34,65 @@ import std::str;
import ty_ctxt = middle::ty::ctxt;
type res_info = { did: ast::def_id, t: ty::t };
type res_info = {did: ast::def_id, t: ty::t};
type ctxt = {
mutable next_tag_id: u16,
pad: u16,
tag_id_to_index: hashmap<ast::def_id,u16>,
mutable tag_order: [ast::def_id],
resources: interner::interner<res_info>,
llshapetablesty: TypeRef,
llshapetables: ValueRef
};
type ctxt =
{mutable next_tag_id: u16,
pad: u16,
tag_id_to_index: hashmap<ast::def_id, u16>,
mutable tag_order: [ast::def_id],
resources: interner::interner<res_info>,
llshapetablesty: TypeRef,
llshapetables: ValueRef};
const shape_u8 : u8 = 0u8;
const shape_u16 : u8 = 1u8;
const shape_u32 : u8 = 2u8;
const shape_u64 : u8 = 3u8;
const shape_i8 : u8 = 4u8;
const shape_i16 : u8 = 5u8;
const shape_i32 : u8 = 6u8;
const shape_i64 : u8 = 7u8;
const shape_f32 : u8 = 8u8;
const shape_f64 : u8 = 9u8;
const shape_evec : u8 = 10u8;
const shape_ivec : u8 = 11u8;
const shape_tag : u8 = 12u8;
const shape_box : u8 = 13u8;
const shape_struct : u8 = 17u8;
const shape_fn : u8 = 18u8;
const shape_obj : u8 = 19u8;
const shape_res : u8 = 20u8;
const shape_var : u8 = 21u8;
const shape_uniq : u8 = 22u8;
const shape_u8: u8 = 0u8;
const shape_u16: u8 = 1u8;
const shape_u32: u8 = 2u8;
const shape_u64: u8 = 3u8;
const shape_i8: u8 = 4u8;
const shape_i16: u8 = 5u8;
const shape_i32: u8 = 6u8;
const shape_i64: u8 = 7u8;
const shape_f32: u8 = 8u8;
const shape_f64: u8 = 9u8;
const shape_evec: u8 = 10u8;
const shape_ivec: u8 = 11u8;
const shape_tag: u8 = 12u8;
const shape_box: u8 = 13u8;
const shape_struct: u8 = 17u8;
const shape_fn: u8 = 18u8;
const shape_obj: u8 = 19u8;
const shape_res: u8 = 20u8;
const shape_var: u8 = 21u8;
const shape_uniq: u8 = 22u8;
// FIXME: This is a bad API in trans_common.
fn C_u8(n : u8) -> ValueRef { ret trans_common::C_u8(n as uint); }
fn C_u8(n: u8) -> ValueRef { ret trans_common::C_u8(n as uint); }
fn hash_res_info(ri : &res_info) -> uint {
fn hash_res_info(ri: &res_info) -> uint {
let h = 5381u;
h *= 33u; h += (ri.did.crate as uint);
h *= 33u; h += (ri.did.node as uint);
h *= 33u; h += (ri.t as uint);
h *= 33u;
h += ri.did.crate as uint;
h *= 33u;
h += ri.did.node as uint;
h *= 33u;
h += ri.t as uint;
ret h;
}
fn eq_res_info(a : &res_info, b : &res_info) -> bool {
fn eq_res_info(a: &res_info, b: &res_info) -> bool {
ret a.did.crate == b.did.crate && a.did.node == b.did.node && a.t == b.t;
}
fn mk_global(ccx : &@crate_ctxt, name : &str, llval : ValueRef) -> ValueRef {
let llglobal = lib::llvm::llvm::LLVMAddGlobal(ccx.llmod, val_ty(llval),
str::buf(name));
fn mk_global(ccx: &@crate_ctxt, name: &str, llval: ValueRef) -> ValueRef {
let llglobal =
lib::llvm::llvm::LLVMAddGlobal(ccx.llmod, val_ty(llval),
str::buf(name));
lib::llvm::llvm::LLVMSetInitializer(llglobal, llval);
lib::llvm::llvm::LLVMSetGlobalConstant(llglobal, True);
lib::llvm::llvm::LLVMSetLinkage(llglobal, lib::llvm::LLVMInternalLinkage
as lib::llvm::llvm::Linkage);
lib::llvm::llvm::LLVMSetLinkage(llglobal,
lib::llvm::LLVMInternalLinkage as
lib::llvm::llvm::Linkage);
ret llglobal;
}
@ -99,18 +103,18 @@ fn mk_global(ccx : &@crate_ctxt, name : &str, llval : ValueRef) -> ValueRef {
//
// TODO: Use this in dynamic_size_of() as well.
fn largest_variants(ccx : &@crate_ctxt, tag_id : &ast::def_id) -> [uint] {
fn largest_variants(ccx: &@crate_ctxt, tag_id: &ast::def_id) -> [uint] {
// Compute the minimum and maximum size and alignment for each variant.
//
// TODO: We could do better here; e.g. we know that any variant that
// contains (T,T) must be as least as large as any variant that contains
// just T.
let ranges = ~[];
let ranges = [];
let variants = ty::tag_variants(ccx.tcx, tag_id);
for variant : ty::variant_info in variants {
for variant: ty::variant_info in variants {
let bounded = true;
let { a: min_size, b: min_align } = { a: 0u, b: 0u };
for elem_t : ty::t in variant.args {
let {a: min_size, b: min_align} = {a: 0u, b: 0u};
for elem_t: ty::t in variant.args {
if ty::type_contains_params(ccx.tcx, elem_t) {
// TODO: We could do better here; this causes us to
// conservatively assume that (int, T) has minimum size 0,
@ -123,34 +127,34 @@ fn largest_variants(ccx : &@crate_ctxt, tag_id : &ast::def_id) -> [uint] {
}
}
ranges += ~[{ size: { min: min_size, bounded: bounded },
align: { min: min_align, bounded: bounded } }];
ranges +=
[{size: {min: min_size, bounded: bounded},
align: {min: min_align, bounded: bounded}}];
}
// Initialize the candidate set to contain all variants.
let candidates = ~[mutable];
for variant in variants { candidates += ~[mutable true]; }
let candidates = [mutable];
for variant in variants { candidates += [mutable true]; }
// Do a pairwise comparison among all variants still in the candidate set.
// Throw out any variant that we know has size and alignment at least as
// small as some other variant.
let i = 0u;
while i < vec::len(ranges) - 1u {
if candidates.(i) {
if candidates[i] {
let j = i + 1u;
while (j < vec::len(ranges)) {
if candidates.(j) {
if ranges.(i).size.bounded && ranges.(i).align.bounded &&
ranges.(j).size.bounded &&
ranges.(j).align.bounded {
if ranges.(i).size >= ranges.(j).size &&
ranges.(i).align >= ranges.(j).align {
while j < vec::len(ranges) {
if candidates[j] {
if ranges[i].size.bounded && ranges[i].align.bounded &&
ranges[j].size.bounded && ranges[j].align.bounded {
if ranges[i].size >= ranges[j].size &&
ranges[i].align >= ranges[j].align {
// Throw out j.
candidates.(j) = false;
} else if ranges.(j).size >= ranges.(i).size &&
ranges.(j).align >= ranges.(j).align {
candidates[j] = false;
} else if ranges[j].size >= ranges[i].size &&
ranges[j].align >= ranges[j].align {
// Throw out i.
candidates.(i) = false;
candidates[i] = false;
}
}
}
@ -161,10 +165,10 @@ fn largest_variants(ccx : &@crate_ctxt, tag_id : &ast::def_id) -> [uint] {
}
// Return the resulting set.
let result = ~[];
let result = [];
i = 0u;
while i < vec::len(candidates) {
if candidates.(i) { result += ~[i]; }
if candidates[i] { result += [i]; }
i += 1u;
}
ret result;
@ -175,23 +179,24 @@ fn largest_variants(ccx : &@crate_ctxt, tag_id : &ast::def_id) -> [uint] {
//
// TODO: Migrate trans over to use this.
fn round_up(size : u16, align : u8) -> u16 {
assert align >= 1u8;
fn round_up(size: u16, align: u8) -> u16 {
assert (align >= 1u8);
let alignment = align as u16;
ret ((size-1u16) + alignment) & !(alignment-1u16);
ret size - 1u16 + alignment & !(alignment - 1u16);
}
type size_align = { size: u16, align: u8 };
type size_align = {size: u16, align: u8};
fn compute_static_tag_size(ccx : &@crate_ctxt, largest_variants : &[uint],
did : &ast::def_id) -> size_align {
let max_size = 0u16; let max_align = 1u8;
fn compute_static_tag_size(ccx: &@crate_ctxt, largest_variants: &[uint],
did: &ast::def_id) -> size_align {
let max_size = 0u16;
let max_align = 1u8;
let variants = ty::tag_variants(ccx.tcx, did);
for vid : uint in largest_variants {
for vid: uint in largest_variants {
// We increment a "virtual data pointer" to compute the size.
let lltys = ~[];
for typ : ty::t in variants.(vid).args {
lltys += ~[trans::type_of(ccx, dummy_sp(), typ)];
let lltys = [];
for typ: ty::t in variants[vid].args {
lltys += [trans::type_of(ccx, dummy_sp(), typ)];
}
let llty = trans_common::T_struct(lltys);
@ -205,24 +210,17 @@ fn compute_static_tag_size(ccx : &@crate_ctxt, largest_variants : &[uint],
// Add space for the tag if applicable.
// FIXME (issue #792): This is wrong. If the tag starts with an 8 byte
// aligned quantity, we don't align it.
if vec::len(variants) > 1u {
max_size += 4u16;
max_align = 4u8;
}
if vec::len(variants) > 1u { max_size += 4u16; max_align = 4u8; }
ret { size: max_size, align: max_align };
ret {size: max_size, align: max_align};
}
tag tag_kind {
tk_unit;
tk_enum;
tk_complex;
}
tag tag_kind { tk_unit; tk_enum; tk_complex; }
fn tag_kind(ccx : &@crate_ctxt, did : &ast::def_id) -> tag_kind {
fn tag_kind(ccx: &@crate_ctxt, did: &ast::def_id) -> tag_kind {
let variants = ty::tag_variants(ccx.tcx, did);
if vec::len(variants) == 0u { ret tk_complex; }
for v : ty::variant_info in variants {
for v: ty::variant_info in variants {
if vec::len(v.args) > 0u { ret tk_complex; }
}
if vec::len(variants) == 1u { ret tk_unit; }
@ -231,92 +229,101 @@ fn tag_kind(ccx : &@crate_ctxt, did : &ast::def_id) -> tag_kind {
// Returns the code corresponding to the pointer size on this architecture.
fn s_int(_tcx : &ty_ctxt) -> u8 {
ret shape_i32; // TODO: x86-64
fn s_int(_tcx: &ty_ctxt) -> u8 {
ret shape_i32; // TODO: x86-64
}
fn s_uint(_tcx : &ty_ctxt) -> u8 {
ret shape_u32; // TODO: x86-64
fn s_uint(_tcx: &ty_ctxt) -> u8 {
ret shape_u32; // TODO: x86-64
}
fn s_float(_tcx : &ty_ctxt) -> u8 {
ret shape_f64; // TODO: x86-64
fn s_float(_tcx: &ty_ctxt) -> u8 {
ret shape_f64; // TODO: x86-64
}
fn mk_ctxt(llmod : ModuleRef) -> ctxt {
fn mk_ctxt(llmod: ModuleRef) -> ctxt {
let llshapetablesty = trans_common::T_named_struct("shapes");
let llshapetables =
lib::llvm::llvm::LLVMAddGlobal(llmod, llshapetablesty,
str::buf("shapes"));
ret {
mutable next_tag_id: 0u16,
pad: 0u16,
tag_id_to_index: common::new_def_hash(),
mutable tag_order: ~[],
resources: interner::mk(hash_res_info, eq_res_info),
llshapetablesty: llshapetablesty,
llshapetables: llshapetables
};
ret {mutable next_tag_id: 0u16,
pad: 0u16,
tag_id_to_index: common::new_def_hash(),
mutable tag_order: [],
resources: interner::mk(hash_res_info, eq_res_info),
llshapetablesty: llshapetablesty,
llshapetables: llshapetables};
}
fn add_bool(dest : &mutable [u8], val : bool) {
dest += ~[if val { 1u8 } else { 0u8 }];
fn add_bool(dest: &mutable [u8], val: bool) {
dest += [if val { 1u8 } else { 0u8 }];
}
fn add_u16(dest : &mutable [u8], val : u16) {
dest += ~[(val & 0xffu16) as u8, (val >> 8u16) as u8];
fn add_u16(dest: &mutable [u8], val: u16) {
dest += [val & 0xffu16 as u8, val >> 8u16 as u8];
}
fn add_substr(dest : &mutable [u8], src : &[u8]) {
fn add_substr(dest: &mutable [u8], src: &[u8]) {
add_u16(dest, vec::len(src) as u16);
dest += src;
}
fn shape_of(ccx : &@crate_ctxt, t : ty::t) -> [u8] {
let s = ~[];
fn shape_of(ccx: &@crate_ctxt, t: ty::t) -> [u8] {
let s = [];
alt ty::struct(ccx.tcx, t) {
ty::ty_nil. | ty::ty_bool. | ty::ty_machine(ast::ty_u8.) | ty::ty_bot. {
s += ~[shape_u8];
s += [shape_u8];
}
ty::ty_int. { s += ~[s_int(ccx.tcx)]; }
ty::ty_float. { s += ~[s_float(ccx.tcx)]; }
ty::ty_int. {
s += [s_int(ccx.tcx)];
}
ty::ty_float. { s += [s_float(ccx.tcx)]; }
ty::ty_uint. | ty::ty_ptr(_) | ty::ty_type. | ty::ty_native(_) {
s += ~[s_uint(ccx.tcx)];
s += [s_uint(ccx.tcx)];
}
ty::ty_machine(ast::ty_i8.) { s += ~[shape_i8]; }
ty::ty_machine(ast::ty_u16.) { s += ~[shape_u16]; }
ty::ty_machine(ast::ty_i16.) { s += ~[shape_i16]; }
ty::ty_machine(ast::ty_u32.) | ty::ty_char. { s += ~[shape_u32]; }
ty::ty_machine(ast::ty_i32.) { s += ~[shape_i32]; }
ty::ty_machine(ast::ty_u64.) { s += ~[shape_u64]; }
ty::ty_machine(ast::ty_i64.) { s += ~[shape_i64]; }
ty::ty_str. { s += ~[shape_evec, 1u8, 1u8, 0u8, shape_u8]; }
ty::ty_istr. { s += ~[shape_ivec, 1u8, 1u8, 0u8, shape_u8]; }
ty::ty_machine(ast::ty_i8.) {
s += [shape_i8];
}
ty::ty_machine(ast::ty_u16.) { s += [shape_u16]; }
ty::ty_machine(ast::ty_i16.) { s += [shape_i16]; }
ty::ty_machine(ast::ty_u32.) | ty::ty_char. { s += [shape_u32]; }
ty::ty_machine(ast::ty_i32.) { s += [shape_i32]; }
ty::ty_machine(ast::ty_u64.) { s += [shape_u64]; }
ty::ty_machine(ast::ty_i64.) { s += [shape_i64]; }
ty::ty_str. {
s += [shape_evec, 1u8, 1u8, 0u8, shape_u8];
}
ty::ty_istr. { s += [shape_ivec, 1u8, 1u8, 0u8, shape_u8]; }
ty::ty_tag(did, tps) {
alt tag_kind(ccx, did) {
tk_unit. {
// FIXME: For now we do this.
s += ~[shape_u32];
s += [shape_u32];
}
tk_enum. { s += ~[shape_u32]; }
tk_enum. { s += [shape_u32]; }
tk_complex. {
s += ~[shape_tag];
s += [shape_tag];
let sub = ~[];
let sub = [];
let id;
alt ccx.shape_cx.tag_id_to_index.find(did) {
none. {
id = ccx.shape_cx.next_tag_id;
ccx.shape_cx.tag_id_to_index.insert(did, id);
ccx.shape_cx.tag_order += ~[did];
ccx.shape_cx.tag_order += [did];
ccx.shape_cx.next_tag_id += 1u16;
}
some(existing_id) { id = existing_id; }
@ -324,7 +331,7 @@ fn shape_of(ccx : &@crate_ctxt, t : ty::t) -> [u8] {
add_u16(sub, id as u16);
add_u16(sub, vec::len(tps) as u16);
for tp : ty::t in tps {
for tp: ty::t in tps {
let subshape = shape_of(ccx, tp);
add_u16(sub, vec::len(subshape) as u16);
sub += subshape;
@ -335,91 +342,97 @@ fn shape_of(ccx : &@crate_ctxt, t : ty::t) -> [u8] {
}
}
ty::ty_box(mt) {
s += ~[shape_box];
s += [shape_box];
add_substr(s, shape_of(ccx, mt.ty));
}
ty::ty_uniq(subt) {
s += ~[shape_uniq];
s += [shape_uniq];
add_substr(s, shape_of(ccx, subt));
}
ty::ty_vec(mt) {
s += ~[shape_ivec];
s += [shape_ivec];
add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty));
add_size_hint(ccx, s, mt.ty);
add_substr(s, shape_of(ccx, mt.ty));
}
ty::ty_rec(fields) {
s += ~[shape_struct];
let sub = ~[];
for f : field in fields { sub += shape_of(ccx, f.mt.ty); }
s += [shape_struct];
let sub = [];
for f: field in fields { sub += shape_of(ccx, f.mt.ty); }
add_substr(s, sub);
}
ty::ty_tup(elts) {
s += ~[shape_struct];
let sub = ~[];
s += [shape_struct];
let sub = [];
for elt in elts { sub += shape_of(ccx, elt); }
add_substr(s, sub);
}
ty::ty_fn(_,_,_,_,_) { s += ~[shape_fn]; }
ty::ty_native_fn(_,_,_) { s += ~[shape_u32]; }
ty::ty_obj(_) { s += ~[shape_obj]; }
ty::ty_fn(_, _, _, _, _) {
s += [shape_fn];
}
ty::ty_native_fn(_, _, _) { s += [shape_u32]; }
ty::ty_obj(_) { s += [shape_obj]; }
ty::ty_res(did, raw_subt, tps) {
let subt = ty::substitute_type_params(ccx.tcx, tps, raw_subt);
let ri = { did: did, t: subt };
let ri = {did: did, t: subt};
let id = interner::intern(ccx.shape_cx.resources, ri);
s += ~[shape_res];
s += [shape_res];
add_u16(s, id as u16);
add_u16(s, vec::len(tps) as u16);
let sub = ~[];
for tp : ty::t in tps { add_substr(s, sub); }
let sub = [];
for tp: ty::t in tps { add_substr(s, sub); }
add_substr(s, sub);
add_substr(s, shape_of(ccx, subt));
}
ty::ty_var(n) { fail "shape_of ty_var"; }
ty::ty_param(n,_) { s += ~[shape_var, n as u8]; }
ty::ty_var(n) {
fail "shape_of ty_var";
}
ty::ty_param(n, _) { s += [shape_var, n as u8]; }
}
ret s;
}
fn add_size_hint(ccx : &@crate_ctxt, s : &mutable [u8], typ : ty::t) {
if (ty::type_has_dynamic_size(ccx.tcx, typ)) {
s += ~[ 0u8, 0u8, 0u8 ];
ret;
}
fn add_size_hint(ccx: &@crate_ctxt, s: &mutable [u8], typ: ty::t) {
if ty::type_has_dynamic_size(ccx.tcx, typ) { s += [0u8, 0u8, 0u8]; ret; }
let llty = trans::type_of(ccx, dummy_sp(), typ);
add_u16(s, trans::llsize_of_real(ccx, llty) as u16);
s += ~[ trans::llalign_of_real(ccx, llty) as u8 ];
s += [trans::llalign_of_real(ccx, llty) as u8];
}
// FIXME: We might discover other variants as we traverse these. Handle this.
fn shape_of_variant(ccx : &@crate_ctxt, v : &ty::variant_info) -> [u8] {
let s = ~[];
for t : ty::t in v.args { s += shape_of(ccx, t); }
fn shape_of_variant(ccx: &@crate_ctxt, v: &ty::variant_info) -> [u8] {
let s = [];
for t: ty::t in v.args { s += shape_of(ccx, t); }
ret s;
}
fn gen_tag_shapes(ccx : &@crate_ctxt) -> ValueRef {
fn gen_tag_shapes(ccx: &@crate_ctxt) -> ValueRef {
// Loop over all the tag variants and write their shapes into a data
// buffer. As we do this, it's possible for us to discover new tags, so we
// must do this first.
let i = 0u;
let data = ~[]; let offsets = ~[];
while (i < vec::len(ccx.shape_cx.tag_order)) {
let did = ccx.shape_cx.tag_order.(i);
let data = [];
let offsets = [];
while i < vec::len(ccx.shape_cx.tag_order) {
let did = ccx.shape_cx.tag_order[i];
let variants = ty::tag_variants(ccx.tcx, did);
for v : ty::variant_info in variants {
offsets += ~[vec::len(data) as u16];
for v: ty::variant_info in variants {
offsets += [vec::len(data) as u16];
let variant_shape = shape_of_variant(ccx, v);
add_substr(data, variant_shape);
@ -432,13 +445,14 @@ fn gen_tag_shapes(ccx : &@crate_ctxt) -> ValueRef {
// info records for each tag) and the info space (which contains offsets
// to each variant shape). As we do so, build up the header.
let header = ~[]; let info = ~[];
let header = [];
let info = [];
let header_sz = 2u16 * ccx.shape_cx.next_tag_id;
let data_sz = vec::len(data) as u16;
let info_sz = 0u16;
for did_ : ast::def_id in ccx.shape_cx.tag_order {
let did = did_; // Satisfy alias checker.
for did_: ast::def_id in ccx.shape_cx.tag_order {
let did = did_; // Satisfy alias checker.
let variants = ty::tag_variants(ccx.tcx, did);
add_u16(header, header_sz + info_sz);
info_sz += 2u16 * ((vec::len(variants) as u16) + 2u16) + 3u16;
@ -448,25 +462,25 @@ fn gen_tag_shapes(ccx : &@crate_ctxt) -> ValueRef {
// variant. Also construct the largest-variant table for each tag, which
// contains the variants that the size-of operation needs to look at.
let lv_table = ~[];
let lv_table = [];
i = 0u;
for did_ : ast::def_id in ccx.shape_cx.tag_order {
let did = did_; // Satisfy alias checker.
for did_: ast::def_id in ccx.shape_cx.tag_order {
let did = did_; // Satisfy alias checker.
let variants = ty::tag_variants(ccx.tcx, did);
add_u16(info, vec::len(variants) as u16);
// Construct the largest-variants table.
add_u16(info, header_sz + info_sz + data_sz +
(vec::len(lv_table) as u16));
add_u16(info,
header_sz + info_sz + data_sz + (vec::len(lv_table) as u16));
let lv = largest_variants(ccx, did);
add_u16(lv_table, vec::len(lv) as u16);
for v : uint in lv { add_u16(lv_table, v as u16); }
for v: uint in lv { add_u16(lv_table, v as u16); }
// Determine whether the tag has dynamic size.
let dynamic = false;
for variant : ty::variant_info in variants {
for typ : ty::t in variant.args {
for variant: ty::variant_info in variants {
for typ: ty::t in variant.args {
if ty::type_has_dynamic_size(ccx.tcx, typ) { dynamic = true; }
}
}
@ -475,24 +489,22 @@ fn gen_tag_shapes(ccx : &@crate_ctxt) -> ValueRef {
// Otherwise, write a placeholder.
let size_align;
if dynamic {
size_align = { size: 0u16, align: 0u8 };
} else {
size_align = compute_static_tag_size(ccx, lv, did);
}
size_align = {size: 0u16, align: 0u8};
} else { size_align = compute_static_tag_size(ccx, lv, did); }
add_u16(info, size_align.size);
info += ~[size_align.align];
info += [size_align.align];
// Now write in the offset of each variant.
for v : ty::variant_info in variants {
add_u16(info, header_sz + info_sz + offsets.(i));
for v: ty::variant_info in variants {
add_u16(info, header_sz + info_sz + offsets[i]);
i += 1u;
}
}
assert (i == vec::len(offsets));
assert (header_sz == (vec::len(header) as u16));
assert (info_sz == (vec::len(info) as u16));
assert (data_sz == (vec::len(data) as u16));
assert (header_sz == vec::len(header) as u16);
assert (info_sz == vec::len(info) as u16);
assert (data_sz == vec::len(data) as u16);
header += info;
header += data;
@ -501,32 +513,33 @@ fn gen_tag_shapes(ccx : &@crate_ctxt) -> ValueRef {
ret mk_global(ccx, "tag_shapes", C_bytes(header));
}
fn gen_resource_shapes(ccx : &@crate_ctxt) -> ValueRef {
let dtors = ~[];
fn gen_resource_shapes(ccx: &@crate_ctxt) -> ValueRef {
let dtors = [];
let i = 0u;
let len = interner::len(ccx.shape_cx.resources);
while i < len {
let ri = interner::get(ccx.shape_cx.resources, i);
dtors += ~[trans_common::get_res_dtor(ccx, dummy_sp(), ri.did, ri.t)];
dtors += [trans_common::get_res_dtor(ccx, dummy_sp(), ri.did, ri.t)];
i += 1u;
}
ret mk_global(ccx, "resource_shapes", C_struct(dtors));
}
fn gen_shape_tables(ccx : &@crate_ctxt) {
fn gen_shape_tables(ccx: &@crate_ctxt) {
let lltagstable = gen_tag_shapes(ccx);
let llresourcestable = gen_resource_shapes(ccx);
trans_common::set_struct_body(ccx.shape_cx.llshapetablesty,
~[val_ty(lltagstable),
val_ty(llresourcestable)]);
[val_ty(lltagstable),
val_ty(llresourcestable)]);
let lltables = C_named_struct(ccx.shape_cx.llshapetablesty,
~[lltagstable, llresourcestable]);
let lltables =
C_named_struct(ccx.shape_cx.llshapetablesty,
[lltagstable, llresourcestable]);
lib::llvm::llvm::LLVMSetInitializer(ccx.shape_cx.llshapetables, lltables);
lib::llvm::llvm::LLVMSetGlobalConstant(ccx.shape_cx.llshapetables, True);
lib::llvm::llvm::LLVMSetLinkage(ccx.shape_cx.llshapetables,
lib::llvm::LLVMInternalLinkage as
lib::llvm::llvm::Linkage);
lib::llvm::llvm::Linkage);
}

File diff suppressed because it is too large Load diff

View file

@ -72,24 +72,24 @@ fn matches_always(p: &@ast::pat) -> bool {
fn bind_for_pat(p: &@ast::pat, br: &match_branch, val: ValueRef) {
alt p.node {
ast::pat_bind(name) { br.bound += ~[{ident: name, val: val}]; }
ast::pat_bind(name) { br.bound += [{ident: name, val: val}]; }
_ { }
}
}
type enter_pat = fn(&@ast::pat) -> option::t<[@ast::pat]> ;
type enter_pat = fn(&@ast::pat) -> option::t<[@ast::pat]>;
fn enter_match(m: &match, col: uint, val: ValueRef, e: &enter_pat) -> match {
let result = ~[];
let result = [];
for br: match_branch in m {
alt e(br.pats.(col)) {
alt e(br.pats[col]) {
some(sub) {
let pats =
vec::slice(br.pats, 0u, col) + sub +
vec::slice(br.pats, col + 1u, vec::len(br.pats));
let new_br = @{pats: pats with *br};
result += ~[new_br];
bind_for_pat(br.pats.(col), new_br, val);
result += [new_br];
bind_for_pat(br.pats[col], new_br, val);
}
none. { }
}
@ -99,7 +99,7 @@ fn enter_match(m: &match, col: uint, val: ValueRef, e: &enter_pat) -> match {
fn enter_default(m: &match, col: uint, val: ValueRef) -> match {
fn e(p: &@ast::pat) -> option::t<[@ast::pat]> {
ret if matches_always(p) { some(~[]) } else { none };
ret if matches_always(p) { some([]) } else { none };
}
ret enter_match(m, col, val, e);
}
@ -116,7 +116,7 @@ fn enter_opt(ccx: &@crate_ctxt, m: &match, opt: &opt, col: uint,
} else { none };
}
ast::pat_lit(l) {
ret if opt_eq(lit(l), opt) { some(~[]) } else { none };
ret if opt_eq(lit(l), opt) { some([]) } else { none };
}
_ { ret some(vec::init_elt(dummy, size)); }
}
@ -131,13 +131,13 @@ fn enter_rec(m: &match, col: uint, fields: &[ast::ident], val: ValueRef) ->
option::t<[@ast::pat]> {
alt p.node {
ast::pat_rec(fpats, _) {
let pats = ~[];
let pats = [];
for fname: ast::ident in fields {
let pat = dummy;
for fpat: ast::field_pat in fpats {
if str::eq(fpat.ident, fname) { pat = fpat.pat; break; }
}
pats += ~[pat];
pats += [pat];
}
ret some(pats);
}
@ -149,8 +149,8 @@ fn enter_rec(m: &match, col: uint, fields: &[ast::ident], val: ValueRef) ->
fn enter_tup(m: &match, col: uint, val: ValueRef, n_elts: uint) -> match {
let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()};
fn e(dummy: &@ast::pat, n_elts: uint, p: &@ast::pat)
-> option::t<[@ast::pat]> {
fn e(dummy: &@ast::pat, n_elts: uint, p: &@ast::pat) ->
option::t<[@ast::pat]> {
alt p.node {
ast::pat_tup(elts) { ret some(elts); }
_ { ret some(vec::init_elt(dummy, n_elts)); }
@ -163,8 +163,8 @@ fn enter_box(m: &match, col: uint, val: ValueRef) -> match {
let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()};
fn e(dummy: &@ast::pat, p: &@ast::pat) -> option::t<[@ast::pat]> {
alt p.node {
ast::pat_box(sub) { ret some(~[sub]); }
_ { ret some(~[dummy]); }
ast::pat_box(sub) { ret some([sub]); }
_ { ret some([dummy]); }
}
}
ret enter_match(m, col, val, bind e(dummy, _));
@ -173,15 +173,15 @@ fn enter_box(m: &match, col: uint, val: ValueRef) -> match {
fn get_options(ccx: &@crate_ctxt, m: &match, col: uint) -> [opt] {
fn add_to_set(set: &mutable [opt], val: &opt) {
for l: opt in set { if opt_eq(l, val) { ret; } }
set += ~[val];
set += [val];
}
let found = ~[];
let found = [];
for br: match_branch in m {
alt br.pats.(col).node {
alt br.pats[col].node {
ast::pat_lit(l) { add_to_set(found, lit(l)); }
ast::pat_tag(_, _) {
add_to_set(found, variant_opt(ccx, br.pats.(col).id));
add_to_set(found, variant_opt(ccx, br.pats[col].id));
}
_ { }
}
@ -190,20 +190,20 @@ fn get_options(ccx: &@crate_ctxt, m: &match, col: uint) -> [opt] {
}
fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id,
vdefs: &{tg: def_id, var: def_id}, val: ValueRef)
-> {vals: [ValueRef], bcx: @block_ctxt} {
vdefs: &{tg: def_id, var: def_id}, val: ValueRef) ->
{vals: [ValueRef], bcx: @block_ctxt} {
let ccx = bcx.fcx.lcx.ccx;
let ty_param_substs = ty::node_id_to_type_params(ccx.tcx, pat_id);
let blobptr = val;
let variants = ty::tag_variants(ccx.tcx, vdefs.tg);
let args = ~[];
let args = [];
let size =
vec::len(ty::tag_variant_with_id(ccx.tcx, vdefs.tg, vdefs.var).args);
if size > 0u && vec::len(variants) != 1u {
let tagptr =
bcx.build.PointerCast(val,
trans_common::T_opaque_tag_ptr(ccx.tn));
blobptr = bcx.build.GEP(tagptr, ~[C_int(0), C_int(1)]);
blobptr = bcx.build.GEP(tagptr, [C_int(0), C_int(1)]);
}
let i = 0u;
while i < size {
@ -211,20 +211,20 @@ fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id,
trans::GEP_tag(bcx, blobptr, vdefs.tg, vdefs.var, ty_param_substs,
i as int);
bcx = r.bcx;
args += ~[r.val];
args += [r.val];
i += 1u;
}
ret {vals: args, bcx: bcx};
}
fn collect_record_fields(m: &match, col: uint) -> [ast::ident] {
let fields = ~[];
let fields = [];
for br: match_branch in m {
alt br.pats.(col).node {
alt br.pats[col].node {
ast::pat_rec(fs, _) {
for f: ast::field_pat in fs {
if !vec::any(bind str::eq(f.ident, _), fields) {
fields += ~[f.ident];
fields += [f.ident];
}
}
}
@ -236,14 +236,14 @@ fn collect_record_fields(m: &match, col: uint) -> [ast::ident] {
fn any_box_pat(m: &match, col: uint) -> bool {
for br: match_branch in m {
alt br.pats.(col).node { ast::pat_box(_) { ret true; } _ { } }
alt br.pats[col].node { ast::pat_box(_) { ret true; } _ { } }
}
ret false;
}
fn any_tup_pat(m: &match, col: uint) -> bool {
for br: match_branch in m {
alt br.pats.(col).node { ast::pat_tup(_) { ret true; } _ { } }
alt br.pats[col].node { ast::pat_tup(_) { ret true; } _ { } }
}
ret false;
}
@ -252,13 +252,13 @@ type exit_node = {bound: bind_map, from: BasicBlockRef, to: BasicBlockRef};
type mk_fail = fn() -> BasicBlockRef;
fn pick_col(m: &match) -> uint {
let scores = vec::init_elt_mut(0u, vec::len(m.(0).pats));
let scores = vec::init_elt_mut(0u, vec::len(m[0].pats));
for br: match_branch in m {
let i = 0u;
for p: @ast::pat in br.pats {
alt p.node {
ast::pat_lit(_) | ast::pat_tag(_, _) { scores.(i) += 1u; }
_ {}
ast::pat_lit(_) | ast::pat_tag(_, _) { scores[i] += 1u; }
_ { }
}
i += 1u;
}
@ -272,10 +272,7 @@ fn pick_col(m: &match) -> uint {
if score == 0u { ret i; }
// If no irrefutable ones are found, we pick the one with the biggest
// branching factor.
if score > max_score {
max_score = score;
best_col = i;
}
if score > max_score { max_score = score; best_col = i; }
i += 1u;
}
ret best_col;
@ -284,22 +281,24 @@ fn pick_col(m: &match) -> uint {
fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
f: &mk_fail, exits: &mutable [exit_node]) {
if vec::len(m) == 0u { bcx.build.Br(f()); ret; }
if vec::len(m.(0).pats) == 0u {
exits += ~[{bound: m.(0).bound, from: bcx.llbb, to: m.(0).body}];
bcx.build.Br(m.(0).body);
if vec::len(m[0].pats) == 0u {
exits += [{bound: m[0].bound, from: bcx.llbb, to: m[0].body}];
bcx.build.Br(m[0].body);
ret;
}
let col = pick_col(m);
let val = vals.(col);
let vals_left = vec::slice(vals, 0u, col) +
vec::slice(vals, col + 1u, vec::len(vals));
let val = vals[col];
let vals_left =
vec::slice(vals, 0u, col) +
vec::slice(vals, col + 1u, vec::len(vals));
let ccx = bcx.fcx.lcx.ccx;
let pat_id = 0;
for br: match_branch in m {
// Find a real id (we're adding placeholder wildcard patterns, but
// each column is guaranteed to have at least one real pattern)
if pat_id == 0 { pat_id = br.pats.(col).id; }
if pat_id == 0 { pat_id = br.pats[col].id; }
}
let rec_fields = collect_record_fields(m, col);
@ -308,12 +307,12 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
let rec_ty = ty::node_id_to_monotype(ccx.tcx, pat_id);
let fields =
alt ty::struct(ccx.tcx, rec_ty) { ty::ty_rec(fields) { fields } };
let rec_vals = ~[];
let rec_vals = [];
for field_name: ast::ident in rec_fields {
let ix: uint =
ty::field_idx(ccx.sess, dummy_sp(), field_name, fields);
let r = trans::GEP_tup_like(bcx, rec_ty, val, ~[0, ix as int]);
rec_vals += ~[r.val];
let r = trans::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
rec_vals += [r.val];
bcx = r.bcx;
}
compile_submatch(bcx, enter_rec(m, col, rec_fields, val),
@ -323,13 +322,14 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
if any_tup_pat(m, col) {
let tup_ty = ty::node_id_to_monotype(ccx.tcx, pat_id);
let n_tup_elts = alt ty::struct(ccx.tcx, tup_ty) {
ty::ty_tup(elts) { vec::len(elts) }
};
let tup_vals = ~[], i = 0u;
let n_tup_elts =
alt ty::struct(ccx.tcx, tup_ty) {
ty::ty_tup(elts) { vec::len(elts) }
};
let tup_vals = [], i = 0u;
while i < n_tup_elts {
let r = trans::GEP_tup_like(bcx, tup_ty, val, ~[0, i as int]);
tup_vals += ~[r.val];
let r = trans::GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
tup_vals += [r.val];
bcx = r.bcx;
i += 1u;
}
@ -343,9 +343,9 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
let box = bcx.build.Load(val);
let unboxed =
bcx.build.InBoundsGEP(box,
~[C_int(0),
C_int(back::abi::box_rc_field_body)]);
compile_submatch(bcx, enter_box(m, col, val), ~[unboxed] + vals_left,
[C_int(0),
C_int(back::abi::box_rc_field_body)]);
compile_submatch(bcx, enter_box(m, col, val), [unboxed] + vals_left,
f, exits);
ret;
}
@ -356,14 +356,16 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
let kind = no_branch;
let test_val = val;
if vec::len(opts) > 0u {
alt opts.(0) {
alt opts[0] {
var(_, vdef) {
if vec::len(ty::tag_variants(ccx.tcx, vdef.tg)) == 1u {
kind = single;
} else {
let tagptr = bcx.build.PointerCast
(val, trans_common::T_opaque_tag_ptr(ccx.tn));
let discrimptr = bcx.build.GEP(tagptr, ~[C_int(0), C_int(0)]);
let tagptr =
bcx.build.PointerCast(
val,
trans_common::T_opaque_tag_ptr(ccx.tn));
let discrimptr = bcx.build.GEP(tagptr, [C_int(0), C_int(0)]);
test_val = bcx.build.Load(discrimptr);
kind = switch;
}
@ -398,15 +400,15 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
let r = trans_opt(bcx, opt);
bcx = r.bcx;
let t = ty::node_id_to_type(ccx.tcx, pat_id);
let eq = trans::trans_compare(bcx, ast::eq, test_val, t,
r.val, t);
let eq =
trans::trans_compare(bcx, ast::eq, test_val, t, r.val, t);
bcx = new_sub_block_ctxt(bcx, "next");
eq.bcx.build.CondBr(eq.val, opt_cx.llbb, bcx.llbb);
}
_ { }
}
let size = 0u;
let unpacked = ~[];
let unpacked = [];
alt opt {
var(_, vdef) {
let args = extract_variant_args(opt_cx, pat_id, vdef, val);
@ -441,18 +443,18 @@ fn make_phi_bindings(bcx: &@block_ctxt, map: &[exit_node],
let our_block = bcx.llbb as uint;
let success = true;
for each item: @{key: ast::ident, val: ast::node_id} in ids.items() {
let llbbs = ~[];
let vals = ~[];
let llbbs = [];
let vals = [];
for ex: exit_node in map {
if ex.to as uint == our_block {
alt assoc(item.key, ex.bound) {
some(val) { llbbs += ~[ex.from]; vals += ~[val]; }
some(val) { llbbs += [ex.from]; vals += [val]; }
none. { }
}
}
}
if vec::len(vals) > 0u {
let phi = bcx.build.Phi(val_ty(vals.(0)), vals, llbbs);
let phi = bcx.build.Phi(val_ty(vals[0]), vals, llbbs);
bcx.fcx.lllocals.insert(item.val, phi);
} else { success = false; }
}
@ -461,25 +463,23 @@ fn make_phi_bindings(bcx: &@block_ctxt, map: &[exit_node],
fn trans_alt(cx: &@block_ctxt, expr: &@ast::expr, arms: &[ast::arm],
output: &trans::out_method) -> result {
let bodies = ~[];
let match: match = ~[];
let bodies = [];
let match: match = [];
let er = trans::trans_expr(cx, expr);
if (ty::type_is_bot(bcx_tcx(cx), ty::expr_ty(bcx_tcx(cx), expr))) {
if ty::type_is_bot(bcx_tcx(cx), ty::expr_ty(bcx_tcx(cx), expr)) {
// No need to generate code for alt,
// since the disc diverges.
if (!cx.build.is_terminated()) {
if !cx.build.is_terminated() {
ret rslt(cx, cx.build.Unreachable());
}
else {
ret er;
}
} else { ret er; }
}
for a: ast::arm in arms {
let body = new_scope_block_ctxt(cx, "case_body");
bodies += ~[body];
bodies += [body];
for p: @ast::pat in a.pats {
match += ~[@{pats: ~[p], body: body.llbb, mutable bound: ~[]}];
match += [@{pats: [p], body: body.llbb, mutable bound: []}];
}
}
@ -489,26 +489,26 @@ fn trans_alt(cx: &@block_ctxt, expr: &@ast::expr, arms: &[ast::arm],
done: @mutable option::t<BasicBlockRef>) -> BasicBlockRef {
alt *done { some(bb) { ret bb; } _ { } }
let fail_cx = new_sub_block_ctxt(cx, "case_fallthrough");
trans::trans_fail(fail_cx, some(sp), "non-exhaustive match failure");
trans::trans_fail(fail_cx, some(sp), "non-exhaustive match failure");;
*done = some(fail_cx.llbb);
ret fail_cx.llbb;
}
let exit_map = ~[];
let exit_map = [];
let t = trans::node_id_type(cx.fcx.lcx.ccx, expr.id);
let v = trans::spill_if_immediate(er.bcx, er.val, t);
compile_submatch(er.bcx, match, ~[v],
bind mk_fail(cx, expr.span, fail_cx), exit_map);
compile_submatch(er.bcx, match, [v], bind mk_fail(cx, expr.span, fail_cx),
exit_map);
let i = 0u;
let arm_results = ~[];
let arm_results = [];
for a: ast::arm in arms {
let body_cx = bodies.(i);
if make_phi_bindings(body_cx, exit_map, ast::pat_id_map(a.pats.(0))) {
let body_cx = bodies[i];
if make_phi_bindings(body_cx, exit_map, ast::pat_id_map(a.pats[0])) {
let block_res = trans::trans_block(body_cx, a.body, output);
arm_results += ~[block_res];
arm_results += [block_res];
} else { // Unreachable
arm_results += ~[rslt(body_cx, C_nil())];
arm_results += [rslt(body_cx, C_nil())];
}
i += 1u;
}
@ -518,8 +518,7 @@ fn trans_alt(cx: &@block_ctxt, expr: &@ast::expr, arms: &[ast::arm],
// Not alt-related, but similar to the pattern-munging code above
fn bind_irrefutable_pat(bcx: @block_ctxt, pat: &@ast::pat, val: ValueRef,
table: hashmap<ast::node_id, ValueRef>,
make_copy: bool)
-> @block_ctxt {
make_copy: bool) -> @block_ctxt {
let ccx = bcx.fcx.lcx.ccx;
alt pat.node {
ast::pat_bind(_) {
@ -532,9 +531,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: &@ast::pat, val: ValueRef,
bcx = trans::copy_ty(bcx, loaded, ty).bcx;
table.insert(pat.id, alloc);
trans_common::add_clean(bcx, alloc, ty);
} else {
table.insert(pat.id, val);
}
} else { table.insert(pat.id, val); }
}
ast::pat_tag(_, sub) {
if vec::len(sub) == 0u { ret bcx; }
@ -542,8 +539,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: &@ast::pat, val: ValueRef,
let args = extract_variant_args(bcx, pat.id, vdefs, val);
let i = 0;
for argval: ValueRef in args.vals {
bcx = bind_irrefutable_pat(bcx, sub.(i), argval, table,
make_copy);
bcx = bind_irrefutable_pat(bcx, sub[i], argval, table, make_copy);
i += 1;
}
}
@ -554,7 +550,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: &@ast::pat, val: ValueRef,
for f: ast::field_pat in fields {
let ix: uint =
ty::field_idx(ccx.sess, pat.span, f.ident, rec_fields);
let r = trans::GEP_tup_like(bcx, rec_ty, val, ~[0, ix as int]);
let r = trans::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
bcx = bind_irrefutable_pat(r.bcx, f.pat, r.val, table, make_copy);
}
}
@ -562,18 +558,20 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: &@ast::pat, val: ValueRef,
let tup_ty = ty::node_id_to_monotype(ccx.tcx, pat.id);
let i = 0u;
for elem in elems {
let r = trans::GEP_tup_like(bcx, tup_ty, val, ~[0, i as int]);
let r = trans::GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
bcx = bind_irrefutable_pat(r.bcx, elem, r.val, table, make_copy);
i += 1u;
}
}
ast::pat_box(inner) {
let box = bcx.build.Load(val);
let unboxed = bcx.build.InBoundsGEP
(box, ~[C_int(0), C_int(back::abi::box_rc_field_body)]);
let unboxed =
bcx.build.InBoundsGEP(box,
[C_int(0),
C_int(back::abi::box_rc_field_body)]);
bcx = bind_irrefutable_pat(bcx, inner, unboxed, table, true);
}
ast::pat_wild. | ast::pat_lit(_) {}
ast::pat_wild. | ast::pat_lit(_) { }
}
ret bcx;
}

View file

@ -71,9 +71,9 @@ type derived_tydesc_info = {lltydesc: ValueRef, escapes: bool};
type glue_fns = {no_op_type_glue: ValueRef};
tag tydesc_kind {
tk_static; // Static (monomorphic) type descriptor.
tk_param; // Type parameter.
tk_derived; // Derived from a typaram or another derived tydesc.
tk_static; // Static (monomorphic) type descriptor.
tk_param; // Type parameter.
tk_derived; // Derived from a typaram or another derived tydesc.
}
type tydesc_info =
@ -111,46 +111,44 @@ type stats =
fn_times: @mutable [{ident: str, time: int}]};
// Crate context. Every crate we compile has one of these.
type crate_ctxt = {
sess: session::session,
llmod: ModuleRef,
td: target_data,
tn: type_names,
externs: hashmap<str, ValueRef>,
intrinsics: hashmap<str, ValueRef>,
type crate_ctxt =
// A mapping from the def_id of each item in this crate to the address
// of the first instruction of the item's definition in the executable
// we're generating.
item_ids: hashmap<ast::node_id, ValueRef>,
ast_map: ast_map::map,
item_symbols: hashmap<ast::node_id, str>,
mutable main_fn: option::t<ValueRef>,
link_meta: link::link_meta,
// TODO: hashmap<tup(tag_id,subtys), @tag_info>
tag_sizes: hashmap<ty::t, uint>,
discrims: hashmap<ast::node_id, ValueRef>,
discrim_symbols: hashmap<ast::node_id, str>,
fn_pairs: hashmap<ast::node_id, ValueRef>,
consts: hashmap<ast::node_id, ValueRef>,
obj_methods: hashmap<ast::node_id, ()>,
tydescs: hashmap<ty::t, @tydesc_info>,
module_data: hashmap<str, ValueRef>,
lltypes: hashmap<ty::t, TypeRef>,
glues: @glue_fns,
names: namegen,
sha: std::sha1::sha1,
type_sha1s: hashmap<ty::t, str>,
type_short_names: hashmap<ty::t, str>,
tcx: ty::ctxt,
stats: stats,
upcalls: @upcall::upcalls,
rust_object_type: TypeRef,
tydesc_type: TypeRef,
task_type: TypeRef,
shape_cx: shape::ctxt,
gc_cx: gc::ctxt
};
{sess: session::session,
llmod: ModuleRef,
td: target_data,
tn: type_names,
externs: hashmap<str, ValueRef>,
intrinsics: hashmap<str, ValueRef>,
item_ids: hashmap<ast::node_id, ValueRef>,
ast_map: ast_map::map,
item_symbols: hashmap<ast::node_id, str>,
mutable main_fn: option::t<ValueRef>,
link_meta: link::link_meta,
tag_sizes: hashmap<ty::t, uint>,
discrims: hashmap<ast::node_id, ValueRef>,
discrim_symbols: hashmap<ast::node_id, str>,
fn_pairs: hashmap<ast::node_id, ValueRef>,
consts: hashmap<ast::node_id, ValueRef>,
obj_methods: hashmap<ast::node_id, ()>,
tydescs: hashmap<ty::t, @tydesc_info>,
module_data: hashmap<str, ValueRef>,
lltypes: hashmap<ty::t, TypeRef>,
glues: @glue_fns,
names: namegen,
sha: std::sha1::sha1,
type_sha1s: hashmap<ty::t, str>,
type_short_names: hashmap<ty::t, str>,
tcx: ty::ctxt,
stats: stats,
upcalls: @upcall::upcalls,
rust_object_type: TypeRef,
tydesc_type: TypeRef,
task_type: TypeRef,
shape_cx: shape::ctxt,
gc_cx: gc::ctxt};
type local_ctxt =
{path: [str],
@ -164,12 +162,11 @@ type val_self_pair = {v: ValueRef, t: ty::t};
// Function context. Every LLVM function we create will have one of
// these.
type fn_ctxt = {
type fn_ctxt =
// The ValueRef returned from a call to llvm::LLVMAddFunction; the
// address of the first instruction in the sequence of
// instructions for this function that will go in the .text
// section of the executable we're generating.
llfn: ValueRef,
// The three implicit arguments that arrive in the function we're
// creating. For instance, foo(int, int) is really foo(ret*,
@ -179,16 +176,13 @@ type fn_ctxt = {
// convenience.
// Points to the current task.
lltaskptr: ValueRef,
// Points to the current environment (bindings of variables to
// values), if this is a regular function; points to the current
// object, if this is a method.
llenv: ValueRef,
// Points to where the return value of this function should end
// up.
llretptr: ValueRef,
// The next three elements: "hoisted basic blocks" containing
// administrative activities that have to happen in only one place in
@ -196,20 +190,16 @@ type fn_ctxt = {
// A block for all the function's static allocas, so that LLVM
// will coalesce them into a single alloca call.
mutable llstaticallocas: BasicBlockRef,
// A block containing code that copies incoming arguments to space
// already allocated by code in one of the llallocas blocks.
// (LLVM requires that arguments be copied to local allocas before
// allowing most any operation to be performed on them.)
mutable llcopyargs: BasicBlockRef,
// The first block containing derived tydescs received from the
// runtime. See description of derived_tydescs, below.
mutable llderivedtydescs_first: BasicBlockRef,
// The last block of the llderivedtydescs group.
mutable llderivedtydescs: BasicBlockRef,
// A block for all of the dynamically sized allocas. This must be
// after llderivedtydescs, because these sometimes depend on
@ -219,52 +209,41 @@ type fn_ctxt = {
// for incoming function arguments? Or is it merely the block
// containing code that copies incoming args to space already
// alloca'd by code in llallocas?
mutable lldynamicallocas: BasicBlockRef,
mutable llreturn: BasicBlockRef,
// The token used to clear the dynamic allocas at the end of this frame.
mutable llobstacktoken: option::t<ValueRef>,
// The 'self' object currently in use in this function, if there
// is one.
mutable llself: option::t<val_self_pair>,
// If this function is actually a iter, a block containing the
// code called whenever the iter calls 'put'.
mutable lliterbody: option::t<ValueRef>,
// If this function is actually a iter, the type of the function
// that that we call when we call 'put'. Having to track this is
// pretty irritating. We have to do it because we need the type if
// we are going to put the iterbody into a closure (if it appears
// in a for-each inside of an iter).
mutable iterbodyty: option::t<ty::t>,
// The next four items: hash tables mapping from AST def_ids to
// LLVM-stuff-in-the-frame.
// Maps arguments to allocas created for them in llallocas.
llargs: hashmap<ast::node_id, ValueRef>,
// Maps fields in objects to pointers into the interior of
// llself's body.
llobjfields: hashmap<ast::node_id, ValueRef>,
// Maps the def_ids for local variables to the allocas created for
// them in llallocas.
lllocals: hashmap<ast::node_id, ValueRef>,
// The same as above, but for variables accessed via the frame
// pointer we pass into an iter, for access to the static
// environment of the iter-calling frame.
llupvars: hashmap<ast::node_id, ValueRef>,
// For convenience, a vector of the incoming tydescs for each of
// this functions type parameters, fetched via llvm::LLVMGetParam.
// For example, for a function foo::<A, B, C>(), lltydescs contains
// the ValueRefs for the tydescs for A, B, and C.
mutable lltydescs: [ValueRef],
// Derived tydescs are tydescs created at runtime, for types that
// involve type parameters inside type constructors. For example,
@ -275,31 +254,48 @@ type fn_ctxt = {
// when information about both "[T]" and "T" are available. When
// such a tydesc is created, we cache it in the derived_tydescs
// table for the next time that such a tydesc is needed.
derived_tydescs: hashmap<ty::t, derived_tydesc_info>,
// The node_id of the function, or -1 if it doesn't correspond to
// a user-defined function.
id: ast::node_id,
// The source span where this function comes from, for error
// reporting.
sp: span,
// This function's enclosing local context.
lcx: @local_ctxt
};
{llfn: ValueRef,
lltaskptr: ValueRef,
llenv: ValueRef,
llretptr: ValueRef,
mutable llstaticallocas: BasicBlockRef,
mutable llcopyargs: BasicBlockRef,
mutable llderivedtydescs_first: BasicBlockRef,
mutable llderivedtydescs: BasicBlockRef,
mutable lldynamicallocas: BasicBlockRef,
mutable llreturn: BasicBlockRef,
mutable llobstacktoken: option::t<ValueRef>,
mutable llself: option::t<val_self_pair>,
mutable lliterbody: option::t<ValueRef>,
mutable iterbodyty: option::t<ty::t>,
llargs: hashmap<ast::node_id, ValueRef>,
llobjfields: hashmap<ast::node_id, ValueRef>,
lllocals: hashmap<ast::node_id, ValueRef>,
llupvars: hashmap<ast::node_id, ValueRef>,
mutable lltydescs: [ValueRef],
derived_tydescs: hashmap<ty::t, derived_tydesc_info>,
id: ast::node_id,
sp: span,
lcx: @local_ctxt};
tag cleanup {
clean(fn(&@block_ctxt) -> result );
clean_temp(ValueRef, fn(&@block_ctxt) -> result );
clean(fn(&@block_ctxt) -> result);
clean_temp(ValueRef, fn(&@block_ctxt) -> result);
}
fn add_clean(cx: &@block_ctxt, val: ValueRef, ty: ty::t) {
find_scope_cx(cx).cleanups += ~[clean(bind drop_slot(_, val, ty))];
find_scope_cx(cx).cleanups += [clean(bind drop_slot(_, val, ty))];
}
fn add_clean_temp(cx: &@block_ctxt, val: ValueRef, ty: ty::t) {
find_scope_cx(cx).cleanups +=
~[clean_temp(val, bind drop_ty(_, val, ty))];
find_scope_cx(cx).cleanups += [clean_temp(val, bind drop_ty(_, val, ty))];
}
// Note that this only works for temporaries. We should, at some point, move
@ -326,22 +322,23 @@ fn revoke_clean(cx: &@block_ctxt, val: ValueRef) {
sc_cx.cleanups =
std::vec::slice(sc_cx.cleanups, 0u, found as uint) +
std::vec::slice(sc_cx.cleanups, (found as uint) + 1u,
std::vec::len(sc_cx.cleanups));
std::vec::len(sc_cx.cleanups));
}
fn get_res_dtor(ccx : &@crate_ctxt, sp : &span, did : &ast::def_id,
inner_t : ty::t) -> ValueRef {
fn get_res_dtor(ccx: &@crate_ctxt, sp: &span, did: &ast::def_id,
inner_t: ty::t) -> ValueRef {
if did.crate == ast::local_crate {
alt ccx.fn_pairs.find(did.node) {
some(x) { ret x; }
_ { ccx.tcx.sess.bug("get_res_dtor: can't find resource dtor!"); }
some(x) { ret x; }
_ { ccx.tcx.sess.bug("get_res_dtor: can't find resource dtor!"); }
}
}
let params = csearch::get_type_param_count(ccx.sess.get_cstore(), did);
let f_t = trans::type_of_fn(ccx, sp, ast::proto_fn,
~[{ mode: ty::mo_alias(false), ty: inner_t }],
ty::mk_nil(ccx.tcx), params);
let f_t =
trans::type_of_fn(ccx, sp, ast::proto_fn,
[{mode: ty::mo_alias(false), ty: inner_t}],
ty::mk_nil(ccx.tcx), params);
ret trans::get_extern_const(ccx.externs, ccx.llmod,
csearch::get_symbol(ccx.sess.get_cstore(),
did),
@ -378,36 +375,35 @@ tag block_kind {
// code. Each basic block we generate is attached to a function, typically
// with many basic blocks per function. All the basic blocks attached to a
// function are organized as a directed graph.
type block_ctxt = {
type block_ctxt =
// The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
// block to the function pointed to by llfn. We insert
// instructions into that block by way of this block context.
llbb: BasicBlockRef,
// The llvm::builder object serving as an interface to LLVM's
// LLVMBuild* functions.
build: builder,
// The block pointing to this one in the function's digraph.
parent: block_parent,
// The 'kind' of basic block this is.
kind: block_kind,
// A list of functions that run at the end of translating this
// block, cleaning up any variables that were introduced in the
// block and need to go out of scope at the end of it.
mutable cleanups: [cleanup],
// The source span where this block comes from, for error
// reporting.
sp: span,
// The function context for the function to which this block is
// attached.
fcx: @fn_ctxt
};
{llbb: BasicBlockRef,
build: builder,
parent: block_parent,
kind: block_kind,
mutable cleanups: [cleanup],
sp: span,
fcx: @fn_ctxt};
// FIXME: we should be able to use option::t<@block_parent> here but
// the infinite-tag check in rustboot gets upset.
@ -417,7 +413,7 @@ type result = {bcx: @block_ctxt, val: ValueRef};
type result_t = {bcx: @block_ctxt, val: ValueRef, ty: ty::t};
fn extend_path(cx: @local_ctxt, name: &str) -> @local_ctxt {
ret @{path: cx.path + ~[name] with *cx};
ret @{path: cx.path + [name] with *cx};
}
fn rslt(bcx: @block_ctxt, val: ValueRef) -> result {
@ -438,7 +434,7 @@ fn struct_elt(llstructty: TypeRef, n: uint) -> TypeRef {
assert (n < elt_count);
let elt_tys = std::vec::init_elt(T_nil(), elt_count);
llvm::LLVMGetStructElementTypes(llstructty, std::vec::to_ptr(elt_tys));
ret llvm::LLVMGetElementType(elt_tys.(n));
ret llvm::LLVMGetElementType(elt_tys[n]);
}
fn find_scope_cx(cx: &@block_ctxt) -> @block_ctxt {
@ -459,8 +455,8 @@ fn bcx_tcx(bcx: &@block_ctxt) -> ty::ctxt { ret bcx.fcx.lcx.ccx.tcx; }
fn bcx_ccx(bcx: &@block_ctxt) -> @crate_ctxt { ret bcx.fcx.lcx.ccx; }
fn bcx_lcx(bcx: &@block_ctxt) -> @local_ctxt { ret bcx.fcx.lcx; }
fn bcx_fcx(bcx: &@block_ctxt) -> @fn_ctxt { ret bcx.fcx; }
fn fcx_ccx(fcx: &@fn_ctxt) -> @crate_ctxt { ret fcx.lcx.ccx; }
fn fcx_tcx(fcx: &@fn_ctxt) -> ty::ctxt { ret fcx.lcx.ccx.tcx; }
fn fcx_ccx(fcx: &@fn_ctxt) -> @crate_ctxt { ret fcx.lcx.ccx; }
fn fcx_tcx(fcx: &@fn_ctxt) -> ty::ctxt { ret fcx.lcx.ccx.tcx; }
fn lcx_ccx(lcx: &@local_ctxt) -> @crate_ctxt { ret lcx.ccx; }
fn ccx_tcx(ccx: &@crate_ctxt) -> ty::ctxt { ret ccx.tcx; }
@ -527,7 +523,7 @@ fn T_fn(inputs: &[TypeRef], output: TypeRef) -> TypeRef {
}
fn T_fn_pair(cx: &crate_ctxt, tfn: TypeRef) -> TypeRef {
ret T_struct(~[T_ptr(tfn), T_opaque_closure_ptr(cx)]);
ret T_struct([T_ptr(tfn), T_opaque_closure_ptr(cx)]);
}
fn T_ptr(t: TypeRef) -> TypeRef { ret llvm::LLVMPointerType(t, 0u); }
@ -547,7 +543,7 @@ fn set_struct_body(t: TypeRef, elts: &[TypeRef]) {
False);
}
fn T_empty_struct() -> TypeRef { ret T_struct(~[]); }
fn T_empty_struct() -> TypeRef { ret T_struct([]); }
// NB: This will return something different every time it's called. If
// you need a generic object type that matches the type of your
@ -556,25 +552,27 @@ fn T_empty_struct() -> TypeRef { ret T_struct(~[]); }
fn T_rust_object() -> TypeRef {
let t = T_named_struct("rust_object");
let e = T_ptr(T_empty_struct());
set_struct_body(t, ~[e, e]);
set_struct_body(t, [e, e]);
ret t;
}
fn T_task() -> TypeRef {
let t = T_named_struct("task");
let // Refcount
// Delegate pointer
// Stack segment pointer
// Runtime SP
// Rust SP
// GC chain
// Refcount
// Delegate pointer
// Stack segment pointer
// Runtime SP
// Rust SP
// GC chain
// Domain pointer
// Crate cache pointer
elems =
~[T_int(), T_int(), T_int(), T_int(), T_int(), T_int(), T_int(),
T_int()];
// Domain pointer
// Crate cache pointer
let elems =
[T_int(), T_int(), T_int(), T_int(), T_int(), T_int(), T_int(),
T_int()];
set_struct_body(t, elems);
ret t;
}
@ -586,7 +584,7 @@ fn T_tydesc_field(cx: &crate_ctxt, field: int) -> TypeRef {
std::vec::init_elt::<TypeRef>(T_nil(), abi::n_tydesc_fields as uint);
llvm::LLVMGetStructElementTypes(cx.tydesc_type,
std::vec::to_ptr::<TypeRef>(tydesc_elts));
let t = llvm::LLVMGetElementType(tydesc_elts.(field));
let t = llvm::LLVMGetElementType(tydesc_elts[field]);
ret t;
}
@ -611,16 +609,16 @@ fn T_tydesc(taskptr_type: TypeRef) -> TypeRef {
let tydescpp = T_ptr(T_ptr(tydesc));
let pvoid = T_ptr(T_i8());
let glue_fn_ty =
T_ptr(T_fn(~[T_ptr(T_nil()), taskptr_type, T_ptr(T_nil()), tydescpp,
pvoid], T_void()));
T_ptr(T_fn([T_ptr(T_nil()), taskptr_type, T_ptr(T_nil()), tydescpp,
pvoid], T_void()));
let cmp_glue_fn_ty =
T_ptr(T_fn(~[T_ptr(T_i1()), taskptr_type, T_ptr(tydesc), tydescpp,
pvoid, pvoid, T_i8()], T_void()));
T_ptr(T_fn([T_ptr(T_i1()), taskptr_type, T_ptr(tydesc), tydescpp,
pvoid, pvoid, T_i8()], T_void()));
let elems =
~[tydescpp, T_int(), T_int(), glue_fn_ty, glue_fn_ty, glue_fn_ty,
glue_fn_ty, glue_fn_ty, glue_fn_ty, glue_fn_ty, cmp_glue_fn_ty,
T_ptr(T_i8()), T_ptr(T_i8()), T_int()];
[tydescpp, T_int(), T_int(), glue_fn_ty, glue_fn_ty, glue_fn_ty,
glue_fn_ty, glue_fn_ty, glue_fn_ty, glue_fn_ty, cmp_glue_fn_ty,
T_ptr(T_i8()), T_ptr(T_i8()), T_int()];
set_struct_body(tydesc, elems);
ret tydesc;
}
@ -628,13 +626,13 @@ fn T_tydesc(taskptr_type: TypeRef) -> TypeRef {
fn T_array(t: TypeRef, n: uint) -> TypeRef { ret llvm::LLVMArrayType(t, n); }
fn T_evec(t: TypeRef) -> TypeRef {
ret T_struct(~[T_int(), // Refcount
T_int(), // Alloc
T_int(), // Fill
ret T_struct([T_int(), // Refcount
T_int(), // Alloc
T_int(), // Fill
T_int(), // Pad
// Body elements
T_array(t, 0u)]);
T_int(), // Pad
// Body elements
T_array(t, 0u)]);
}
fn T_opaque_vec_ptr() -> TypeRef { ret T_ptr(T_evec(T_int())); }
@ -644,24 +642,24 @@ fn T_opaque_vec_ptr() -> TypeRef { ret T_ptr(T_evec(T_int())); }
//
// TODO: Support user-defined vector sizes.
fn T_ivec(t: TypeRef) -> TypeRef {
ret T_struct(~[T_int(), // Length ("fill"; if zero, heapified)
T_int(), // Alloc
T_array(t, abi::ivec_default_length)]); // Body elements
ret T_struct([T_int(), // Length ("fill"; if zero, heapified)
T_int(), // Alloc
T_array(t, abi::ivec_default_length)]); // Body elements
}
// Note that the size of this one is in bytes.
fn T_opaque_ivec() -> TypeRef {
ret T_struct(~[T_int(), // Length ("fill"; if zero, heapified)
T_int(), // Alloc
T_array(T_i8(), 0u)]); // Body elements
ret T_struct([T_int(), // Length ("fill"; if zero, heapified)
T_int(), // Alloc
T_array(T_i8(), 0u)]); // Body elements
}
fn T_ivec_heap_part(t: TypeRef) -> TypeRef {
ret T_struct(~[T_int(), // Real length
T_array(t, 0u)]); // Body elements
ret T_struct([T_int(), // Real length
T_array(t, 0u)]); // Body elements
}
@ -669,36 +667,36 @@ fn T_ivec_heap_part(t: TypeRef) -> TypeRef {
// Interior vector on the heap, also known as the "stub". Cast to this when
// the allocated length (second element of T_ivec above) is zero.
fn T_ivec_heap(t: TypeRef) -> TypeRef {
ret T_struct(~[T_int(), // Length (zero)
T_int(), // Alloc
T_ptr(T_ivec_heap_part(t))]); // Pointer
ret T_struct([T_int(), // Length (zero)
T_int(), // Alloc
T_ptr(T_ivec_heap_part(t))]); // Pointer
}
fn T_opaque_ivec_heap_part() -> TypeRef {
ret T_struct(~[T_int(), // Real length
T_array(T_i8(), 0u)]); // Body elements
ret T_struct([T_int(), // Real length
T_array(T_i8(), 0u)]); // Body elements
}
fn T_opaque_ivec_heap() -> TypeRef {
ret T_struct(~[T_int(), // Length (zero)
T_int(), // Alloc
T_ptr(T_opaque_ivec_heap_part())]); // Pointer
ret T_struct([T_int(), // Length (zero)
T_int(), // Alloc
T_ptr(T_opaque_ivec_heap_part())]); // Pointer
}
fn T_str() -> TypeRef { ret T_evec(T_i8()); }
fn T_box(t: TypeRef) -> TypeRef { ret T_struct(~[T_int(), t]); }
fn T_box(t: TypeRef) -> TypeRef { ret T_struct([T_int(), t]); }
fn T_port(_t: TypeRef) -> TypeRef {
ret T_struct(~[T_int()]); // Refcount
ret T_struct([T_int()]); // Refcount
}
fn T_chan(_t: TypeRef) -> TypeRef {
ret T_struct(~[T_int()]); // Refcount
ret T_struct([T_int()]); // Refcount
}
@ -716,14 +714,13 @@ fn T_typaram(tn: &type_names) -> TypeRef {
fn T_typaram_ptr(tn: &type_names) -> TypeRef { ret T_ptr(T_typaram(tn)); }
fn T_closure_ptr(cx: &crate_ctxt, llbindings_ty: TypeRef,
n_ty_params: uint) -> TypeRef {
fn T_closure_ptr(cx: &crate_ctxt, llbindings_ty: TypeRef, n_ty_params: uint)
-> TypeRef {
// NB: keep this in sync with code in trans_bind; we're making
// an LLVM typeref structure that has the same "shape" as the ty::t
// it constructs.
ret T_ptr(T_box(T_struct(~[T_ptr(cx.tydesc_type),
llbindings_ty,
T_captured_tydescs(cx, n_ty_params)])));
ret T_ptr(T_box(T_struct([T_ptr(cx.tydesc_type), llbindings_ty,
T_captured_tydescs(cx, n_ty_params)])));
}
fn T_opaque_closure_ptr(cx: &crate_ctxt) -> TypeRef {
@ -737,7 +734,7 @@ fn T_opaque_closure_ptr(cx: &crate_ctxt) -> TypeRef {
fn T_tag(tn: &type_names, size: uint) -> TypeRef {
let s = "tag_" + uint::to_str(size, 10u);
if tn.name_has_type(s) { ret tn.get_type(s); }
let t = T_struct(~[T_int(), T_array(T_i8(), size)]);
let t = T_struct([T_int(), T_array(T_i8(), size)]);
tn.associate(s, t);
ret t;
}
@ -745,7 +742,7 @@ fn T_tag(tn: &type_names, size: uint) -> TypeRef {
fn T_opaque_tag(tn: &type_names) -> TypeRef {
let s = "opaque_tag";
if tn.name_has_type(s) { ret tn.get_type(s); }
let t = T_struct(~[T_int(), T_i8()]);
let t = T_struct([T_int(), T_i8()]);
tn.associate(s, t);
ret t;
}
@ -763,8 +760,8 @@ fn T_obj_ptr(cx: &crate_ctxt, n_captured_tydescs: uint) -> TypeRef {
// type. The dynamically-sized fields follow the captured tydescs.
fn T_obj(cx: &crate_ctxt, n_captured_tydescs: uint) -> TypeRef {
ret T_struct(~[T_ptr(cx.tydesc_type),
T_captured_tydescs(cx, n_captured_tydescs)]);
ret T_struct([T_ptr(cx.tydesc_type),
T_captured_tydescs(cx, n_captured_tydescs)]);
}
ret T_ptr(T_box(T_obj(cx, n_captured_tydescs)));
}
@ -832,14 +829,15 @@ fn C_cstr(cx: &@crate_ctxt, s: &str) -> ValueRef {
// A rust boxed-and-length-annotated string.
fn C_str(cx: &@crate_ctxt, s: &str) -> ValueRef {
let len = str::byte_len(s);
let // 'alloc'
// 'fill'
// 'pad'
box =
C_struct(~[C_int(abi::const_refcount as int), C_int(len + 1u as int),
C_int(len + 1u as int), C_int(0),
llvm::LLVMConstString(str::buf(s), len, False)]);
let len =
str::byte_len(s); // 'alloc'
// 'fill'
// 'pad'
let box =
C_struct([C_int(abi::const_refcount as int), C_int(len + 1u as int),
C_int(len + 1u as int), C_int(0),
llvm::LLVMConstString(str::buf(s), len, False)]);
let g =
llvm::LLVMAddGlobal(cx.llmod, val_ty(box),
str::buf(cx.names.next("str")));
@ -856,8 +854,8 @@ fn C_postr(s: &str) -> ValueRef {
fn C_zero_byte_arr(size: uint) -> ValueRef {
let i = 0u;
let elts: [ValueRef] = ~[];
while i < size { elts += ~[C_u8(0u)]; i += 1u; }
let elts: [ValueRef] = [];
while i < size { elts += [C_u8(0u)]; i += 1u; }
ret llvm::LLVMConstArray(T_i8(), std::vec::to_ptr(elts),
std::vec::len(elts));
}
@ -873,19 +871,19 @@ fn C_named_struct(T: TypeRef, elts: &[ValueRef]) -> ValueRef {
}
fn C_array(ty: TypeRef, elts: &[ValueRef]) -> ValueRef {
ret llvm::LLVMConstArray(ty, std::vec::to_ptr(elts),
std::vec::len(elts));
ret llvm::LLVMConstArray(ty, std::vec::to_ptr(elts), std::vec::len(elts));
}
fn C_bytes(bytes : &[u8]) -> ValueRef {
fn C_bytes(bytes: &[u8]) -> ValueRef {
ret llvm::LLVMConstString(unsafe::reinterpret_cast(vec::to_ptr(bytes)),
vec::len(bytes), False);
}
fn C_shape(ccx : &@crate_ctxt, bytes : &[u8]) -> ValueRef {
fn C_shape(ccx: &@crate_ctxt, bytes: &[u8]) -> ValueRef {
let llshape = C_bytes(bytes);
let llglobal = llvm::LLVMAddGlobal(ccx.llmod, val_ty(llshape),
str::buf(ccx.names.next("shape")));
let llglobal =
llvm::LLVMAddGlobal(ccx.llmod, val_ty(llshape),
str::buf(ccx.names.next("shape")));
llvm::LLVMSetInitializer(llglobal, llshape);
llvm::LLVMSetGlobalConstant(llglobal, True);
llvm::LLVMSetLinkage(llglobal,

View file

@ -43,10 +43,10 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
// The fields of our object will become the arguments to the function
// we're creating.
let fn_args: [ast::arg] = ~[];
let fn_args: [ast::arg] = [];
for f: ast::obj_field in ob.fields {
fn_args +=
~[{mode: ast::alias(false), ty: f.ty, ident: f.ident, id: f.id}];
[{mode: ast::alias(false), ty: f.ty, ident: f.ident, id: f.id}];
}
let fcx = new_fn_ctxt(cx, sp, llctor_decl);
@ -77,15 +77,14 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
// abi::obj_field_vtbl and abi::obj_field_box simply specify words 0 and 1
// of 'pair'.
let pair_vtbl =
bcx.build.GEP(pair, ~[C_int(0), C_int(abi::obj_field_vtbl)]);
let pair_box =
bcx.build.GEP(pair, ~[C_int(0), C_int(abi::obj_field_box)]);
bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
let pair_box = bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_box)]);
// Make a vtable for this object: a static array of pointers to functions.
// It will be located in the read-only memory of the executable we're
// creating and will contain ValueRefs for all of this object's methods.
// create_vtbl returns a pointer to the vtable, which we store.
let vtbl = create_vtbl(cx, sp, self_ty, ob, ty_params, none, ~[]);
let vtbl = create_vtbl(cx, sp, self_ty, ob, ty_params, none, []);
vtbl = bcx.build.PointerCast(vtbl, T_ptr(T_empty_struct()));
bcx.build.Store(vtbl, pair_vtbl);
@ -103,18 +102,18 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
// Store null into pair, if no args or typarams.
bcx.build.Store(C_null(llbox_ty), pair_box);
} else {
let obj_fields: [ty::t] = ~[];
for a: ty::arg in arg_tys { obj_fields += ~[a.ty]; }
let obj_fields: [ty::t] = [];
for a: ty::arg in arg_tys { obj_fields += [a.ty]; }
let tps: [ty::t] = ~[];
let tps: [ty::t] = [];
let tydesc_ty = ty::mk_type(ccx.tcx);
for tp: ast::ty_param in ty_params { tps += ~[tydesc_ty]; }
for tp: ast::ty_param in ty_params { tps += [tydesc_ty]; }
// Synthesize an object body type and hand it off to
// trans_malloc_boxed, which allocates a box, including space for a
// refcount.
let body_ty: ty::t = create_object_body_type(ccx.tcx, obj_fields, tps,
none);
let body_ty: ty::t =
create_object_body_type(ccx.tcx, obj_fields, tps, none);
let box = trans_malloc_boxed(bcx, body_ty);
bcx = box.bcx;
let body = box.body;
@ -129,8 +128,7 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
// later.
let body_tydesc =
GEP_tup_like(bcx, body_ty, body,
~[0, abi::obj_body_elt_tydesc]);
GEP_tup_like(bcx, body_ty, body, [0, abi::obj_body_elt_tydesc]);
bcx = body_tydesc.bcx;
let ti = none::<@tydesc_info>;
let body_td = get_tydesc(bcx, body_ty, true, ti).result;
@ -148,16 +146,15 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
// Copy typarams into captured typarams.
let body_typarams =
GEP_tup_like(bcx, body_ty, body,
~[0, abi::obj_body_elt_typarams]);
GEP_tup_like(bcx, body_ty, body, [0, abi::obj_body_elt_typarams]);
bcx = body_typarams.bcx;
// TODO: can we just get typarams_ty out of body_ty instead?
let typarams_ty: ty::t = ty::mk_tup(ccx.tcx, tps);
let i: int = 0;
for tp: ast::ty_param in ty_params {
let typaram = bcx.fcx.lltydescs.(i);
let typaram = bcx.fcx.lltydescs[i];
let capture =
GEP_tup_like(bcx, typarams_ty, body_typarams.val, ~[0, i]);
GEP_tup_like(bcx, typarams_ty, body_typarams.val, [0, i]);
bcx = capture.bcx;
bcx = copy_val(bcx, INIT, capture.val, typaram, tydesc_ty).bcx;
i += 1;
@ -165,20 +162,19 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
// Copy args into body fields.
let body_fields =
GEP_tup_like(bcx, body_ty, body,
~[0, abi::obj_body_elt_fields]);
GEP_tup_like(bcx, body_ty, body, [0, abi::obj_body_elt_fields]);
bcx = body_fields.bcx;
i = 0;
for f: ast::obj_field in ob.fields {
alt bcx.fcx.llargs.find(f.id) {
some(arg1) {
let arg = load_if_immediate(bcx, arg1, arg_tys.(i).ty);
let arg = load_if_immediate(bcx, arg1, arg_tys[i].ty);
// TODO: can we just get fields_ty out of body_ty instead?
let fields_ty: ty::t = ty::mk_tup(ccx.tcx, obj_fields);
let field =
GEP_tup_like(bcx, fields_ty, body_fields.val, ~[0, i]);
GEP_tup_like(bcx, fields_ty, body_fields.val, [0, i]);
bcx = field.bcx;
bcx = copy_val(bcx, INIT, field.val, arg, arg_tys.(i).ty).bcx;
bcx = copy_val(bcx, INIT, field.val, arg, arg_tys[i].ty).bcx;
i += 1;
}
none. {
@ -210,16 +206,16 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// Fields. FIXME (part of issue #538): Where do we fill in the field
// *values* from the outer object?
let additional_fields: [ast::anon_obj_field] = ~[];
let additional_field_vals: [result] = ~[];
let additional_field_tys: [ty::t] = ~[];
let additional_fields: [ast::anon_obj_field] = [];
let additional_field_vals: [result] = [];
let additional_field_tys: [ty::t] = [];
alt anon_obj.fields {
none. { }
some(fields) {
additional_fields = fields;
for f: ast::anon_obj_field in fields {
additional_field_tys += ~[node_id_type(ccx, f.id)];
additional_field_vals += ~[trans_expr(bcx, f.expr)];
additional_field_tys += [node_id_type(ccx, f.id)];
additional_field_vals += [trans_expr(bcx, f.expr)];
}
}
}
@ -236,7 +232,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
let wrapper_obj: ast::_obj =
{fields:
std::vec::map(ast::obj_field_from_anon_obj_field,
additional_fields),
additional_fields),
methods: anon_obj.methods};
let inner_obj_ty: ty::t;
@ -251,7 +247,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// just pass the outer object to create_vtbl(). Our vtable won't need
// to have any forwarding slots.
vtbl =
create_vtbl(bcx.fcx.lcx, sp, outer_obj_ty, wrapper_obj, ~[], none,
create_vtbl(bcx.fcx.lcx, sp, outer_obj_ty, wrapper_obj, [], none,
additional_field_tys);
}
some(e) {
@ -269,7 +265,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// forwarding slot. And, of course, we need to create a normal vtable
// entry for every method being added.
vtbl =
create_vtbl(bcx.fcx.lcx, sp, outer_obj_ty, wrapper_obj, ~[],
create_vtbl(bcx.fcx.lcx, sp, outer_obj_ty, wrapper_obj, [],
some(inner_obj_ty), additional_field_tys);
}
}
@ -283,9 +279,8 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// Grab onto the first and second elements of the pair.
let pair_vtbl =
bcx.build.GEP(pair, ~[C_int(0), C_int(abi::obj_field_vtbl)]);
let pair_box =
bcx.build.GEP(pair, ~[C_int(0), C_int(abi::obj_field_box)]);
bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
let pair_box = bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_box)]);
vtbl = bcx.build.PointerCast(vtbl, T_ptr(T_empty_struct()));
bcx.build.Store(vtbl, pair_vtbl);
@ -307,9 +302,9 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// Synthesize a type for the object body and hand it off to
// trans_malloc_boxed, which allocates a box, including space for a
// refcount.
let body_ty: ty::t = create_object_body_type(ccx.tcx,
additional_field_tys,
~[], some(inner_obj_ty));
let body_ty: ty::t =
create_object_body_type(ccx.tcx, additional_field_tys, [],
some(inner_obj_ty));
let box = trans_malloc_boxed(bcx, body_ty);
bcx = box.bcx;
let body = box.body;
@ -323,8 +318,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// the types of the object's fields, so that the fields can be freed
// later.
let body_tydesc =
GEP_tup_like(bcx, body_ty, body,
~[0, abi::obj_body_elt_tydesc]);
GEP_tup_like(bcx, body_ty, body, [0, abi::obj_body_elt_tydesc]);
bcx = body_tydesc.bcx;
let ti = none::<@tydesc_info>;
let body_td = get_tydesc(bcx, body_ty, true, ti).result;
@ -338,23 +332,20 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// function in its closure: the fields were passed to the object
// constructor and are now available to the object's methods.
let body_fields =
GEP_tup_like(bcx, body_ty, body,
~[0, abi::obj_body_elt_fields]);
GEP_tup_like(bcx, body_ty, body, [0, abi::obj_body_elt_fields]);
bcx = body_fields.bcx;
let i: int = 0;
for f: ast::anon_obj_field in additional_fields {
// FIXME (part of issue #538): make this work eventually, when we
// have additional field exprs in the AST.
load_if_immediate(bcx, additional_field_vals.(i).val,
additional_field_tys.(i));
let fields_ty: ty::t = ty::mk_tup(ccx.tcx,
additional_field_tys);
let field =
GEP_tup_like(bcx, fields_ty, body_fields.val, ~[0, i]);
load_if_immediate(bcx, additional_field_vals[i].val,
additional_field_tys[i]);
let fields_ty: ty::t = ty::mk_tup(ccx.tcx, additional_field_tys);
let field = GEP_tup_like(bcx, fields_ty, body_fields.val, [0, i]);
bcx = field.bcx;
bcx =
copy_val(bcx, INIT, field.val, additional_field_vals.(i).val,
additional_field_tys.(i)).bcx;
copy_val(bcx, INIT, field.val, additional_field_vals[i].val,
additional_field_tys[i]).bcx;
i += 1;
}
@ -370,7 +361,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
let body_inner_obj =
GEP_tup_like(bcx, body_ty, body,
~[0, abi::obj_body_elt_inner_obj]);
[0, abi::obj_body_elt_inner_obj]);
bcx = body_inner_obj.bcx;
bcx =
copy_val(bcx, INIT, body_inner_obj.val, inner_obj_val.val,
@ -390,6 +381,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// Used only inside create_vtbl and create_backwarding_vtbl to distinguish
// different kinds of slots we'll have to create.
tag vtbl_mthd {
// Normal methods are complete AST nodes, but for forwarding methods, the
// only information we'll have about them is their type.
normal_mthd(@ast::method);
@ -421,9 +413,8 @@ fn vtbl_mthd_lteq(a: &vtbl_mthd, b: &vtbl_mthd) -> bool {
// filtering_fn: Used by create_vtbl to filter a list of methods to remove the
// ones that we don't need forwarding slots for.
fn filtering_fn(cx: @local_ctxt, m: &vtbl_mthd,
addtl_meths: [@ast::method]) ->
option::t<vtbl_mthd> {
fn filtering_fn(cx: @local_ctxt, m: &vtbl_mthd, addtl_meths: [@ast::method])
-> option::t<vtbl_mthd> {
// Since m is a fwding_mthd, and we're checking to see if it's in
// addtl_meths (which only contains normal_mthds), we can't just check if
@ -448,10 +439,10 @@ fn filtering_fn(cx: @local_ctxt, m: &vtbl_mthd,
// object, and return a pointer to it.
fn create_vtbl(cx: @local_ctxt, sp: &span, outer_obj_ty: ty::t,
ob: &ast::_obj, ty_params: &[ast::ty_param],
inner_obj_ty: option::t<ty::t>,
additional_field_tys: &[ty::t]) -> ValueRef {
inner_obj_ty: option::t<ty::t>, additional_field_tys: &[ty::t])
-> ValueRef {
let llmethods: [ValueRef] = ~[];
let llmethods: [ValueRef] = [];
alt inner_obj_ty {
none. {
@ -460,12 +451,12 @@ fn create_vtbl(cx: @local_ctxt, sp: &span, outer_obj_ty: ty::t,
// Sort and process all the methods.
let meths =
std::sort::merge_sort::<@ast::method>
(bind ast_mthd_lteq(_, _), ob.methods);
std::sort::merge_sort::<@ast::method>(bind ast_mthd_lteq(_, _),
ob.methods);
for m: @ast::method in meths {
llmethods += ~[process_normal_mthd(cx, m, outer_obj_ty,
ty_params)];
llmethods +=
[process_normal_mthd(cx, m, outer_obj_ty, ty_params)];
}
}
some(inner_obj_ty) {
@ -478,13 +469,13 @@ fn create_vtbl(cx: @local_ctxt, sp: &span, outer_obj_ty: ty::t,
// we take the set difference of { methods on the original object }
// and { methods being added, whether entirely new or overriding }.
let meths: [vtbl_mthd] = ~[];
let meths: [vtbl_mthd] = [];
// Gather up methods on the inner object.
alt ty::struct(cx.ccx.tcx, inner_obj_ty) {
ty::ty_obj(inner_obj_methods) {
for m: ty::method in inner_obj_methods {
meths += ~[fwding_mthd(@m)];
meths += [fwding_mthd(@m)];
}
}
_ {
@ -500,12 +491,12 @@ fn create_vtbl(cx: @local_ctxt, sp: &span, outer_obj_ty: ty::t,
// And now add the additional ones, both overriding ones and entirely
// new ones. These will just be normal methods.
for m: @ast::method in ob.methods { meths += ~[normal_mthd(m)]; }
for m: @ast::method in ob.methods { meths += [normal_mthd(m)]; }
// Sort all the methods and process them.
meths =
std::sort::merge_sort::<vtbl_mthd>
(bind vtbl_mthd_lteq(_, _), meths);
std::sort::merge_sort::<vtbl_mthd>(bind vtbl_mthd_lteq(_, _),
meths);
// To create forwarding methods, we'll need a "backwarding" vtbl. See
// create_backwarding_vtbl and process_bkwding_method for details.
@ -516,13 +507,13 @@ fn create_vtbl(cx: @local_ctxt, sp: &span, outer_obj_ty: ty::t,
alt m {
normal_mthd(nm) {
llmethods +=
~[process_normal_mthd(cx, nm, outer_obj_ty, ty_params)];
[process_normal_mthd(cx, nm, outer_obj_ty, ty_params)];
}
fwding_mthd(fm) {
llmethods +=
~[process_fwding_mthd(cx, sp, fm, ty_params, inner_obj_ty,
backwarding_vtbl,
additional_field_tys)];
[process_fwding_mthd(cx, sp, fm, ty_params, inner_obj_ty,
backwarding_vtbl,
additional_field_tys)];
}
}
}
@ -542,40 +533,36 @@ fn create_backwarding_vtbl(cx: @local_ctxt, sp: &span, inner_obj_ty: ty::t,
// object, and it needs to forward them to the corresponding slots on the
// outer object. All we know about either one are their types.
let llmethods: [ValueRef] = ~[];
let meths: [ty::method]= ~[];
let llmethods: [ValueRef] = [];
let meths: [ty::method] = [];
// Gather up methods on the inner object.
alt ty::struct(cx.ccx.tcx, inner_obj_ty) {
ty::ty_obj(inner_obj_methods) {
for m: ty::method in inner_obj_methods {
meths += ~[m];
}
}
_ {
// Shouldn't happen.
cx.ccx.sess.bug("create_backwarding_vtbl(): trying to extend a \
ty::ty_obj(inner_obj_methods) {
for m: ty::method in inner_obj_methods { meths += [m]; }
}
_ {
// Shouldn't happen.
cx.ccx.sess.bug("create_backwarding_vtbl(): trying to extend a \
non-object");
}
}
}
// Methods should have already been sorted, so no need to do so again.
for m: ty::method in meths {
// We pass outer_obj_ty to process_fwding_mthd() because it's the one
// being forwarded to.
llmethods += ~[process_bkwding_mthd(
cx, sp, @m, ~[], outer_obj_ty, ~[])];
llmethods += [process_bkwding_mthd(cx, sp, @m, [], outer_obj_ty, [])];
}
ret finish_vtbl(cx, llmethods, "backwarding_vtbl");
}
// finish_vtbl: Given a vector of vtable entries, create the table in
// read-only memory and return a pointer to it.
fn finish_vtbl(cx: @local_ctxt, llmethods: [ValueRef], name: str)
-> ValueRef {
fn finish_vtbl(cx: @local_ctxt, llmethods: [ValueRef], name: str) ->
ValueRef {
let vtbl = C_struct(llmethods);
let vtbl_name = mangle_internal_name_by_path(cx.ccx, cx.path + ~[name]);
let vtbl_name = mangle_internal_name_by_path(cx.ccx, cx.path + [name]);
let gvar =
llvm::LLVMAddGlobal(cx.ccx.llmod, val_ty(vtbl), str::buf(vtbl_name));
llvm::LLVMSetInitializer(gvar, vtbl);
@ -600,17 +587,17 @@ fn finish_vtbl(cx: @local_ctxt, llmethods: [ValueRef], name: str)
// the corresponding method on inner does, calls that method on outer, and
// returns the value returned from that call.
fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
ty_params: &[ast::ty_param], outer_obj_ty: ty::t,
_additional_field_tys: &[ty::t]) -> ValueRef {
ty_params: &[ast::ty_param], outer_obj_ty: ty::t,
_additional_field_tys: &[ty::t]) -> ValueRef {
// Create a local context that's aware of the name of the method we're
// creating.
let mcx: @local_ctxt = @{path: cx.path + ~["method", m.ident] with *cx};
let mcx: @local_ctxt = @{path: cx.path + ["method", m.ident] with *cx};
// Make up a name for the backwarding function.
let fn_name: str = "backwarding_fn";
let s: str = mangle_internal_name_by_path_and_seq(mcx.ccx, mcx.path,
fn_name);
let s: str =
mangle_internal_name_by_path_and_seq(mcx.ccx, mcx.path, fn_name);
// Get the backwarding function's type and declare it.
let llbackwarding_fn_ty: TypeRef =
@ -630,19 +617,17 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// self-stack to get to the one we really want.
// Cast to self-stack's type.
let llenv = bcx.build.PointerCast(
fcx.llenv,
T_ptr(T_struct(~[cx.ccx.rust_object_type,
T_ptr(cx.ccx.rust_object_type)])));
let llself_obj_ptr = bcx.build.GEP(llenv,
~[C_int(0),
C_int(1)]);
let llenv =
bcx.build.PointerCast(
fcx.llenv,
T_ptr(T_struct([cx.ccx.rust_object_type,
T_ptr(cx.ccx.rust_object_type)])));
let llself_obj_ptr = bcx.build.GEP(llenv, [C_int(0), C_int(1)]);
llself_obj_ptr = bcx.build.Load(llself_obj_ptr);
// Cast it back to pointer-to-object-type, so LLVM won't complain.
llself_obj_ptr = bcx.build.PointerCast(llself_obj_ptr,
T_ptr(cx.ccx.rust_object_type));
llself_obj_ptr =
bcx.build.PointerCast(llself_obj_ptr, T_ptr(cx.ccx.rust_object_type));
// The 'llretptr' that will arrive in the backwarding function we're
// creating also needs to be the correct type. Cast it to the method's
@ -670,13 +655,12 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
let vtbl_type = T_ptr(T_array(T_ptr(T_nil()), ix + 1u));
let llouter_obj_vtbl =
bcx.build.GEP(llself_obj_ptr,
~[C_int(0), C_int(abi::obj_field_vtbl)]);
bcx.build.GEP(llself_obj_ptr, [C_int(0), C_int(abi::obj_field_vtbl)]);
llouter_obj_vtbl = bcx.build.Load(llouter_obj_vtbl);
llouter_obj_vtbl = bcx.build.PointerCast(llouter_obj_vtbl, vtbl_type);
let llouter_mthd =
bcx.build.GEP(llouter_obj_vtbl, ~[C_int(0), C_int(ix as int)]);
bcx.build.GEP(llouter_obj_vtbl, [C_int(0), C_int(ix as int)]);
// Set up the outer method to be called.
let outer_mthd_ty = ty::method_ty_to_fn_ty(cx.ccx.tcx, *m);
@ -692,7 +676,7 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// Set up the three implicit arguments to the outer method we'll need to
// call.
let self_arg = llself_obj_ptr;
let llouter_mthd_args: [ValueRef] = ~[llretptr, fcx.lltaskptr, self_arg];
let llouter_mthd_args: [ValueRef] = [llretptr, fcx.lltaskptr, self_arg];
// Copy the explicit arguments that are being passed into the forwarding
// function (they're in fcx.llargs) to llouter_mthd_args.
@ -703,7 +687,7 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
if arg.mode == ty::mo_val {
passed_arg = load_if_immediate(bcx, passed_arg, arg.ty);
}
llouter_mthd_args += ~[passed_arg];
llouter_mthd_args += [passed_arg];
a += 1u;
}
@ -737,12 +721,12 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// Create a local context that's aware of the name of the method we're
// creating.
let mcx: @local_ctxt = @{path: cx.path + ~["method", m.ident] with *cx};
let mcx: @local_ctxt = @{path: cx.path + ["method", m.ident] with *cx};
// Make up a name for the forwarding function.
let fn_name: str = "forwarding_fn";
let s: str = mangle_internal_name_by_path_and_seq(mcx.ccx, mcx.path,
fn_name);
let s: str =
mangle_internal_name_by_path_and_seq(mcx.ccx, mcx.path, fn_name);
// Get the forwarding function's type and declare it.
let llforwarding_fn_ty: TypeRef =
@ -776,7 +760,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// First, grab the box out of the self_obj. It contains a refcount and a
// body.
let llself_obj_box =
bcx.build.GEP(llself_obj_ptr, ~[C_int(0), C_int(abi::obj_field_box)]);
bcx.build.GEP(llself_obj_ptr, [C_int(0), C_int(abi::obj_field_box)]);
llself_obj_box = bcx.build.Load(llself_obj_box);
let ccx = bcx_ccx(bcx);
@ -786,13 +770,13 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// Now, reach into the box and grab the body.
let llself_obj_body =
bcx.build.GEP(llself_obj_box,
~[C_int(0), C_int(abi::box_rc_field_body)]);
[C_int(0), C_int(abi::box_rc_field_body)]);
// Now, we need to figure out exactly what type the body is supposed to be
// cast to.
let body_ty: ty::t = create_object_body_type(cx.ccx.tcx,
additional_field_tys, ~[],
some(inner_obj_ty));
let body_ty: ty::t =
create_object_body_type(cx.ccx.tcx, additional_field_tys, [],
some(inner_obj_ty));
// And cast to that type.
llself_obj_body =
bcx.build.PointerCast(llself_obj_body,
@ -801,7 +785,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// Now, reach into the body and grab the inner_obj.
let llinner_obj =
GEP_tup_like(bcx, body_ty, llself_obj_body,
~[0, abi::obj_body_elt_inner_obj]);
[0, abi::obj_body_elt_inner_obj]);
bcx = llinner_obj.bcx;
// And, now, somewhere in inner_obj is a vtable with an entry for the
@ -810,12 +794,11 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// call it.
let llinner_obj_vtbl =
bcx.build.GEP(llinner_obj.val,
~[C_int(0), C_int(abi::obj_field_vtbl)]);
[C_int(0), C_int(abi::obj_field_vtbl)]);
llinner_obj_vtbl = bcx.build.Load(llinner_obj_vtbl);
let llinner_obj_body =
bcx.build.GEP(llinner_obj.val,
~[C_int(0), C_int(abi::obj_field_box)]);
bcx.build.GEP(llinner_obj.val, [C_int(0), C_int(abi::obj_field_box)]);
llinner_obj_body = bcx.build.Load(llinner_obj_body);
// Get the index of the method we want.
@ -836,7 +819,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
llinner_obj_vtbl = bcx.build.PointerCast(llinner_obj_vtbl, vtbl_type);
let llorig_mthd =
bcx.build.GEP(llinner_obj_vtbl, ~[C_int(0), C_int(ix as int)]);
bcx.build.GEP(llinner_obj_vtbl, [C_int(0), C_int(ix as int)]);
// Set up the original method to be called.
let orig_mthd_ty = ty::method_ty_to_fn_ty(cx.ccx.tcx, *m);
@ -850,21 +833,21 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
llorig_mthd = bcx.build.Load(llorig_mthd);
// Set up the self-stack.
let self_stack = alloca(bcx, T_struct(~[cx.ccx.rust_object_type,
T_ptr(cx.ccx.rust_object_type)]));
self_stack = populate_self_stack(bcx,
self_stack,
llself_obj_ptr,
backwarding_vtbl,
llinner_obj_body);
let self_stack =
alloca(bcx,
T_struct([cx.ccx.rust_object_type,
T_ptr(cx.ccx.rust_object_type)]));
self_stack =
populate_self_stack(bcx, self_stack, llself_obj_ptr, backwarding_vtbl,
llinner_obj_body);
// Cast self_stack back to pointer-to-object-type to make LLVM happy.
self_stack = bcx.build.PointerCast(self_stack,
T_ptr(cx.ccx.rust_object_type));
self_stack =
bcx.build.PointerCast(self_stack, T_ptr(cx.ccx.rust_object_type));
// Set up the three implicit arguments to the original method we'll need
// to call.
let llorig_mthd_args: [ValueRef] = ~[llretptr, fcx.lltaskptr, self_stack];
let llorig_mthd_args: [ValueRef] = [llretptr, fcx.lltaskptr, self_stack];
// Copy the explicit arguments that are being passed into the forwarding
// function (they're in fcx.llargs) to llorig_mthd_args.
@ -875,7 +858,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
if arg.mode == ty::mo_val {
passed_arg = load_if_immediate(bcx, passed_arg, arg.ty);
}
llorig_mthd_args += ~[passed_arg];
llorig_mthd_args += [passed_arg];
a += 1u;
}
@ -901,12 +884,14 @@ fn create_object_body_type(tcx: &ty::ctxt, fields_ty: &[ty::t],
let body_ty: ty::t;
alt maybe_inner_obj_ty {
some(inner_obj_ty) {
body_ty = ty::mk_tup(tcx, ~[tydesc_ty, typarams_ty_tup,
fields_ty_tup, inner_obj_ty]);
body_ty =
ty::mk_tup(tcx,
[tydesc_ty, typarams_ty_tup, fields_ty_tup,
inner_obj_ty]);
}
none {
body_ty = ty::mk_tup(tcx, ~[tydesc_ty, typarams_ty_tup,
fields_ty_tup]);
body_ty =
ty::mk_tup(tcx, [tydesc_ty, typarams_ty_tup, fields_ty_tup]);
}
}
@ -927,7 +912,7 @@ fn process_normal_mthd(cx: @local_ctxt, m: @ast::method, self_ty: ty::t,
}
}
let mcx: @local_ctxt =
@{path: cx.path + ~["method", m.node.ident] with *cx};
@{path: cx.path + ["method", m.node.ident] with *cx};
let s: str = mangle_internal_name_by_path(mcx.ccx, mcx.path);
let llfn: ValueRef = decl_internal_fastcall_fn(cx.ccx.llmod, s, llfnty);
@ -949,32 +934,23 @@ fn process_normal_mthd(cx: @local_ctxt, m: @ast::method, self_ty: ty::t,
// via the llenv argument, and we want the forwarding function to call a
// method on a "self" that's inner-obj-shaped, but we also want to hold onto
// the outer obj for potential use later by backwarding functions.
fn populate_self_stack(bcx: @block_ctxt,
self_stack: ValueRef, outer_obj: ValueRef,
backwarding_vtbl: ValueRef, inner_obj_body: ValueRef)
-> ValueRef {
fn populate_self_stack(bcx: @block_ctxt, self_stack: ValueRef,
outer_obj: ValueRef, backwarding_vtbl: ValueRef,
inner_obj_body: ValueRef) -> ValueRef {
// Drop the outer obj into the second slot.
let self_pair_ptr = bcx.build.GEP(self_stack,
~[C_int(0),
C_int(1)]);
let self_pair_ptr = bcx.build.GEP(self_stack, [C_int(0), C_int(1)]);
bcx.build.Store(outer_obj, self_pair_ptr);
// Drop in the backwarding vtbl.
let wrapper_pair = bcx.build.GEP(self_stack,
~[C_int(0),
C_int(0)]);
let wrapper_vtbl_ptr = bcx.build.GEP(wrapper_pair,
~[C_int(0),
C_int(0)]);
let wrapper_pair = bcx.build.GEP(self_stack, [C_int(0), C_int(0)]);
let wrapper_vtbl_ptr = bcx.build.GEP(wrapper_pair, [C_int(0), C_int(0)]);
let backwarding_vtbl_cast =
bcx.build.PointerCast(backwarding_vtbl, T_ptr(T_empty_struct()));
bcx.build.Store(backwarding_vtbl_cast, wrapper_vtbl_ptr);
// Drop in the inner obj body.
let wrapper_body_ptr = bcx.build.GEP(wrapper_pair,
~[C_int(0),
C_int(1)]);
let wrapper_body_ptr = bcx.build.GEP(wrapper_pair, [C_int(0), C_int(1)]);
bcx.build.Store(inner_obj_body, wrapper_body_ptr);
ret self_stack;

View file

@ -23,21 +23,21 @@ import aux::crate_ctxt;
import aux::add_node;
import middle::tstate::ann::empty_ann;
fn collect_ids_expr(e: &@expr, rs: @mutable [node_id]) { *rs += ~[e.id]; }
fn collect_ids_expr(e: &@expr, rs: @mutable [node_id]) { *rs += [e.id]; }
fn collect_ids_block(b: &blk, rs: @mutable [node_id]) { *rs += ~[b.node.id]; }
fn collect_ids_block(b: &blk, rs: @mutable [node_id]) { *rs += [b.node.id]; }
fn collect_ids_stmt(s: &@stmt, rs: @mutable [node_id]) {
alt s.node {
stmt_decl(_, id) {
log "node_id " + int::str(id);
log_stmt(*s);
*rs += ~[id];
log_stmt(*s);;
*rs += [id];
}
stmt_expr(_, id) {
log "node_id " + int::str(id);
log_stmt(*s);
*rs += ~[id];
log_stmt(*s);;
*rs += [id];
}
_ { }
}
@ -67,7 +67,7 @@ fn init_vecs(ccx: &crate_ctxt, node_ids: &[node_id], len: uint) {
fn visit_fn(ccx: &crate_ctxt, num_constraints: uint, f: &_fn,
tps: &[ty_param], sp: &span, i: &fn_ident, id: node_id) {
let node_ids: @mutable [node_id] = @mutable ~[];
let node_ids: @mutable [node_id] = @mutable [];
node_ids_in_fn(f, tps, sp, i, id, node_ids);
let node_id_vec = *node_ids;
init_vecs(ccx, node_id_vec, num_constraints);

View file

@ -122,7 +122,7 @@ fn tos(v: &[uint]) -> str {
for i: uint in v {
if i == 0u {
rslt += "0";
} else if (i == 1u) { rslt += "1"; } else { rslt += "?"; }
} else if i == 1u { rslt += "1"; } else { rslt += "?"; }
}
ret rslt;
}
@ -239,45 +239,45 @@ type norm_constraint = {bit_num: uint, c: sp_constr};
type constr_map = @std::map::hashmap<def_id, constraint>;
/* Contains stuff that has to be computed up front */
/* For easy access, the fn_info stores two special constraints for each
function. i_return holds if all control paths in this function terminate
in either a return expression, or an appropriate tail expression.
i_diverge holds if all control paths in this function terminate in a fail
or diverging call.
It might be tempting to use a single constraint C for both properties,
where C represents i_return and !C represents i_diverge. This is
inadvisable, because then the sense of the bit depends on context. If we're
inside a ! function, that reverses the sense of the bit: C would be
i_diverge and !C would be i_return. That's awkward, because we have to
pass extra context around to functions that shouldn't care.
Okay, suppose C represents i_return and !C represents i_diverge, regardless
of context. Consider this code:
if (foo) { ret; } else { fail; }
C is true in the consequent and false in the alternative. What's T `join`
F, then? ? doesn't work, because this code should definitely-return if the
context is a returning function (and be definitely-rejected if the context
is a ! function). F doesn't work, because then the code gets incorrectly
rejected if the context is a returning function. T would work, but it
doesn't make sense for T `join` F to be T (consider init constraints, for
example).;
So we need context. And so it seems clearer to just have separate
constraints.
*/
type fn_info =
{constrs: constr_map,
num_constraints: uint,
cf: controlflow,
/* For easy access, the fn_info stores two special constraints for each
function. i_return holds if all control paths in this function terminate
in either a return expression, or an appropriate tail expression.
i_diverge holds if all control paths in this function terminate in a fail
or diverging call.
It might be tempting to use a single constraint C for both properties,
where C represents i_return and !C represents i_diverge. This is
inadvisable, because then the sense of the bit depends on context. If we're
inside a ! function, that reverses the sense of the bit: C would be
i_diverge and !C would be i_return. That's awkward, because we have to
pass extra context around to functions that shouldn't care.
Okay, suppose C represents i_return and !C represents i_diverge, regardless
of context. Consider this code:
if (foo) { ret; } else { fail; }
C is true in the consequent and false in the alternative. What's T `join`
F, then? ? doesn't work, because this code should definitely-return if the
context is a returning function (and be definitely-rejected if the context
is a ! function). F doesn't work, because then the code gets incorrectly
rejected if the context is a returning function. T would work, but it
doesn't make sense for T `join` F to be T (consider init constraints, for
example).;
So we need context. And so it seems clearer to just have separate
constraints.
*/
i_return: tsconstr,
i_diverge: tsconstr,
/* list, accumulated during pre/postcondition
/* list, accumulated during pre/postcondition
computation, of all local variables that may be
used */
// Doesn't seem to work without the @ -- bug
// Doesn't seem to work without the @ -- bug
used_vars: @mutable [node_id]};
fn tsconstr_to_def_id(t: &tsconstr) -> def_id {
@ -285,9 +285,10 @@ fn tsconstr_to_def_id(t: &tsconstr) -> def_id {
}
fn tsconstr_to_node_id(t: &tsconstr) -> node_id {
alt t { ninit(id, _) { id }
npred(_, id, _) {
fail "tsconstr_to_node_id called on pred constraint" } }
alt t {
ninit(id, _) { id }
npred(_, id, _) { fail "tsconstr_to_node_id called on pred constraint" }
}
}
/* mapping from node ID to typestate annotation */
@ -298,10 +299,7 @@ type node_ann_table = @mutable [mutable ts_ann];
type fn_info_map = @std::map::hashmap<node_id, fn_info>;
type fn_ctxt =
{enclosing: fn_info,
id: node_id,
name: ident,
ccx: crate_ctxt};
{enclosing: fn_info, id: node_id, name: ident, ccx: crate_ctxt};
type crate_ctxt = {tcx: ty::ctxt, node_anns: node_ann_table, fm: fn_info_map};
@ -315,12 +313,12 @@ fn add_node(ccx: &crate_ctxt, i: node_id, a: &ts_ann) {
if sz <= i as uint {
vec::grow_mut(*ccx.node_anns, (i as uint) - sz + 1u, empty_ann(0u));
}
ccx.node_anns.(i) = a;
ccx.node_anns[i] = a;
}
fn get_ts_ann(ccx: &crate_ctxt, i: node_id) -> option::t<ts_ann> {
if i as uint < vec::len(*ccx.node_anns) {
ret some::<ts_ann>(ccx.node_anns.(i));
ret some::<ts_ann>(ccx.node_anns[i]);
} else { ret none::<ts_ann>; }
}
@ -507,7 +505,7 @@ fn pure_exp(ccx: &crate_ctxt, id: node_id, p: &prestate) -> bool {
fn num_constraints(m: fn_info) -> uint { ret m.num_constraints; }
fn new_crate_ctxt(cx: ty::ctxt) -> crate_ctxt {
let na: [mutable ts_ann] = ~[mutable];
let na: [mutable ts_ann] = [mutable];
ret {tcx: cx, node_anns: @mutable na, fm: @new_int_hash::<fn_info>()};
}
@ -524,7 +522,7 @@ fn controlflow_expr(ccx: &crate_ctxt, e: @expr) -> controlflow {
fn constraints_expr(cx: &ty::ctxt, e: @expr) -> [@ty::constr] {
alt ty::struct(cx, ty::node_id_to_type(cx, e.id)) {
ty::ty_fn(_, _, _, _, cs) { ret cs; }
_ { ret ~[]; }
_ { ret []; }
}
}
@ -557,14 +555,14 @@ fn node_id_to_def_upvar(cx: &fn_ctxt, id: node_id) -> option::t<def> {
fn norm_a_constraint(id: def_id, c: &constraint) -> [norm_constraint] {
alt c {
cinit(n, sp, i) {
ret ~[{bit_num: n, c: respan(sp, ninit(id.node, i))}];
ret [{bit_num: n, c: respan(sp, ninit(id.node, i))}];
}
cpred(p, descs) {
let rslt: [norm_constraint] = ~[];
let rslt: [norm_constraint] = [];
for pd: pred_args in *descs {
rslt +=
~[{bit_num: pd.node.bit_num,
c: respan(pd.span, npred(p, id, pd.node.args))}];
[{bit_num: pd.node.bit_num,
c: respan(pd.span, npred(p, id, pd.node.args))}];
}
ret rslt;
}
@ -575,8 +573,8 @@ fn norm_a_constraint(id: def_id, c: &constraint) -> [norm_constraint] {
// Tried to write this as an iterator, but I got a
// non-exhaustive match in trans.
fn constraints(fcx: &fn_ctxt) -> [norm_constraint] {
let rslt: [norm_constraint] = ~[];
for each p: @{key: def_id, val: constraint} in
let rslt: [norm_constraint] = [];
for each p: @{key: def_id, val: constraint} in
fcx.enclosing.constrs.items() {
rslt += norm_a_constraint(p.key, p.val);
}
@ -614,12 +612,12 @@ fn expr_to_constr_arg(tcx: ty::ctxt, e: &@expr) -> @constr_arg_use {
alt tcx.def_map.find(e.id) {
some(def_local(l_id)) {
ret @respan(p.span,
carg_ident({ident: p.node.idents.(0),
carg_ident({ident: p.node.idents[0],
node: l_id.node}));
}
some(def_arg(a_id)) {
ret @respan(p.span,
carg_ident({ident: p.node.idents.(0),
carg_ident({ident: p.node.idents[0],
node: a_id.node}));
}
_ {
@ -638,17 +636,17 @@ fn expr_to_constr_arg(tcx: ty::ctxt, e: &@expr) -> @constr_arg_use {
}
}
fn exprs_to_constr_args(tcx: ty::ctxt, args: &[@expr]) ->
[@constr_arg_use] {
fn exprs_to_constr_args(tcx: ty::ctxt, args: &[@expr]) -> [@constr_arg_use] {
let f = bind expr_to_constr_arg(tcx, _);
let rslt: [@constr_arg_use] = ~[];
for e: @expr in args { rslt += ~[f(e)]; }
let rslt: [@constr_arg_use] = [];
for e: @expr in args { rslt += [f(e)]; }
rslt
}
fn expr_to_constr(tcx: ty::ctxt, e: &@expr) -> sp_constr {
alt e.node {
// FIXME change the first pattern to expr_path to test a
// typechecker bug
expr_call(operator, args) {
@ -681,9 +679,9 @@ fn pred_args_to_str(p: &pred_args) -> str {
fn substitute_constr_args(cx: &ty::ctxt, actuals: &[@expr], c: &@ty::constr)
-> tsconstr {
let rslt: [@constr_arg_use] = ~[];
let rslt: [@constr_arg_use] = [];
for a: @constr_arg in c.node.args {
rslt += ~[substitute_arg(cx, actuals, a)];
rslt += [substitute_arg(cx, actuals, a)];
}
ret npred(c.node.path, c.node.id, rslt);
}
@ -694,7 +692,7 @@ fn substitute_arg(cx: &ty::ctxt, actuals: &[@expr], a: @constr_arg) ->
alt a.node {
carg_ident(i) {
if i < num_actuals {
ret expr_to_constr_arg(cx, actuals.(i));
ret expr_to_constr_arg(cx, actuals[i]);
} else {
cx.sess.span_fatal(a.span, "Constraint argument out of bounds");
}
@ -704,11 +702,11 @@ fn substitute_arg(cx: &ty::ctxt, actuals: &[@expr], a: @constr_arg) ->
}
}
fn pred_args_matches(pattern: &[constr_arg_general_<inst>],
desc: &pred_args) -> bool {
fn pred_args_matches(pattern: &[constr_arg_general_<inst>], desc: &pred_args)
-> bool {
let i = 0u;
for c: @constr_arg_use in desc.node.args {
let n = pattern.(i);
let n = pattern[i];
alt c.node {
carg_ident(p) {
alt n {
@ -729,8 +727,8 @@ fn pred_args_matches(pattern: &[constr_arg_general_<inst>],
ret true;
}
fn find_instance_(pattern: &[constr_arg_general_<inst>],
descs: &[pred_args]) -> option::t<uint> {
fn find_instance_(pattern: &[constr_arg_general_<inst>], descs: &[pred_args])
-> option::t<uint> {
for d: pred_args in descs {
if pred_args_matches(pattern, d) { ret some(d.node.bit_num); }
}
@ -743,7 +741,7 @@ type subst = [{from: inst, to: inst}];
fn find_instances(_fcx: &fn_ctxt, subst: &subst, c: &constraint) ->
[{from: uint, to: uint}] {
let rslt = ~[];
let rslt = [];
if vec::len(subst) == 0u { ret rslt; }
alt c {
@ -754,7 +752,7 @@ fn find_instances(_fcx: &fn_ctxt, subst: &subst, c: &constraint) ->
let old_bit_num = d.node.bit_num;
let new = replace(subst, d);
alt find_instance_(new, *descs) {
some(d1) { rslt += ~[{from: old_bit_num, to: d1}]; }
some(d1) { rslt += [{from: old_bit_num, to: d1}]; }
_ { }
}
}
@ -791,18 +789,18 @@ fn insts_to_str(stuff: &[constr_arg_general_<inst>]) -> str {
}
fn replace(subst: subst, d: pred_args) -> [constr_arg_general_<inst>] {
let rslt: [constr_arg_general_<inst>] = ~[];
let rslt: [constr_arg_general_<inst>] = [];
for c: @constr_arg_use in d.node.args {
alt c.node {
carg_ident(p) {
alt find_in_subst(p.node, subst) {
some(new) { rslt += ~[carg_ident(new)]; }
_ { rslt += ~[c.node]; }
some(new) { rslt += [carg_ident(new)]; }
_ { rslt += [c.node]; }
}
}
_ {
// log_err "##";
rslt += ~[c.node];
rslt += [c.node];
}
}
}
@ -893,15 +891,15 @@ fn copy_in_poststate_two(fcx: &fn_ctxt, src_post: &poststate,
ty: oper_type) {
let subst;
alt ty {
oper_swap. { subst = ~[{from: dest, to: src}, {from: src, to: dest}]; }
oper_swap. { subst = [{from: dest, to: src}, {from: src, to: dest}]; }
oper_assign_op. {
ret; // Don't do any propagation
}
_ { subst = ~[{from: src, to: dest}]; }
_ { subst = [{from: src, to: dest}]; }
}
for each p: @{key: def_id, val: constraint} in
for each p: @{key: def_id, val: constraint} in
fcx.enclosing.constrs.items() {
// replace any occurrences of the src def_id with the
// dest def_id
@ -1000,7 +998,7 @@ fn constraint_mentions(_fcx: &fn_ctxt, c: &norm_constraint, v: node_id) ->
bool {
ret alt c.c.node {
ninit(id, _) { v == id }
npred(_, _, args) { args_mention(args, any_eq, ~[v]) }
npred(_, _, args) { args_mention(args, any_eq, [v]) }
};
}
@ -1008,11 +1006,11 @@ fn non_init_constraint_mentions(_fcx: &fn_ctxt, c: &norm_constraint,
v: &node_id) -> bool {
ret alt c.c.node {
ninit(_, _) { false }
npred(_, _, args) { args_mention(args, any_eq, ~[v]) }
npred(_, _, args) { args_mention(args, any_eq, [v]) }
};
}
fn args_mention<T>(args: &[@constr_arg_use], q: fn(&[T], node_id) -> bool ,
fn args_mention<T>(args: &[@constr_arg_use], q: fn(&[T], node_id) -> bool,
s: &[T]) -> bool {
/*
FIXME
@ -1038,7 +1036,7 @@ fn args_mention<T>(args: &[@constr_arg_use], q: fn(&[T], node_id) -> bool ,
ret false;
}
fn use_var(fcx: &fn_ctxt, v: &node_id) { *fcx.enclosing.used_vars += ~[v]; }
fn use_var(fcx: &fn_ctxt, v: &node_id) { *fcx.enclosing.used_vars += [v]; }
// FIXME: This should be a function in std::vec::.
fn vec_contains(v: &@mutable [node_id], i: &node_id) -> bool {
@ -1057,9 +1055,9 @@ fn do_nothing<T>(_f: &_fn, _tp: &[ty_param], _sp: &span, _i: &fn_ident,
fn args_to_constr_args(sp: &span, args: &[arg]) -> [@constr_arg_use] {
let actuals: [@constr_arg_use] = ~[];
let actuals: [@constr_arg_use] = [];
for a: arg in args {
actuals += ~[@respan(sp, carg_ident({ident: a.ident, node: a.id}))];
actuals += [@respan(sp, carg_ident({ident: a.ident, node: a.id}))];
}
ret actuals;
}
@ -1079,56 +1077,49 @@ fn ast_constr_to_sp_constr(tcx: &ty::ctxt, args: &[arg], c: &@constr) ->
type binding = {lhs: [inst], rhs: option::t<initializer>};
fn local_to_bindings(loc : &@local) -> binding {
let lhs = ~[];
fn local_to_bindings(loc: &@local) -> binding {
let lhs = [];
for each p: @pat in pat_bindings(loc.node.pat) {
let ident = alt p.node { pat_bind(name) { name } };
lhs += ~[{ident: ident, node: p.id}];
lhs += [{ident: ident, node: p.id}];
}
{lhs: lhs,
rhs: loc.node.init}
{lhs: lhs, rhs: loc.node.init}
}
fn locals_to_bindings(locals : &[@local]) -> [binding] {
fn locals_to_bindings(locals: &[@local]) -> [binding] {
vec::map(local_to_bindings, locals)
}
fn callee_modes(fcx: &fn_ctxt, callee: node_id) -> [ty::mode] {
let ty = ty::type_autoderef(fcx.ccx.tcx,
ty::node_id_to_type(fcx.ccx.tcx, callee));
let ty =
ty::type_autoderef(fcx.ccx.tcx,
ty::node_id_to_type(fcx.ccx.tcx, callee));
alt ty::struct(fcx.ccx.tcx, ty) {
ty::ty_fn(_, args, _, _, _)
| ty::ty_native_fn(_, args, _) {
let modes = ~[];
for arg: ty::arg in args {
modes += ~[arg.mode];
}
ty::ty_fn(_, args, _, _, _) | ty::ty_native_fn(_, args, _) {
let modes = [];
for arg: ty::arg in args { modes += [arg.mode]; }
ret modes;
}
_ {
// Shouldn't happen; callee should be ty_fn.
fcx.ccx.tcx.sess.bug("non-fn callee type in callee_modes: "
+ util::ppaux::ty_to_str(fcx.ccx.tcx, ty));
fcx.ccx.tcx.sess.bug("non-fn callee type in callee_modes: " +
util::ppaux::ty_to_str(fcx.ccx.tcx, ty));
}
}
}
}
fn callee_arg_init_ops(fcx: &fn_ctxt, callee: node_id) -> [init_op] {
fn mode_to_op(m: &ty::mode) -> init_op {
alt m {
ty::mo_move. { init_move }
_ { init_assign }
}
alt m { ty::mo_move. { init_move } _ { init_assign } }
}
vec::map(mode_to_op, callee_modes(fcx, callee))
}
fn anon_bindings(ops: &[init_op], es : &[@expr]) -> [binding] {
let bindings: [binding] = ~[];
fn anon_bindings(ops: &[init_op], es: &[@expr]) -> [binding] {
let bindings: [binding] = [];
let i = 0;
for op: init_op in ops {
bindings += ~[{lhs: ~[],
rhs: some({op:op, expr: es.(i)})}];
bindings += [{lhs: [], rhs: some({op: op, expr: es[i]})}];
i += 1;
}
ret bindings;

View file

@ -79,7 +79,7 @@ fn seq_tritv(p: &postcond, q: &postcond) {
fn seq_postconds(fcx: &fn_ctxt, ps: &[postcond]) -> postcond {
let sz = vec::len(ps);
if sz >= 1u {
let prev = tritv_clone(ps.(0));
let prev = tritv_clone(ps[0]);
for p: postcond in vec::slice(ps, 1u, sz) { seq_tritv(prev, p); }
ret prev;
} else { ret ann::empty_poststate(num_constraints(fcx.enclosing)); }
@ -97,7 +97,7 @@ fn seq_preconds(fcx: &fn_ctxt, pps: &[pre_and_post]) -> precond {
first: &pre_and_post) -> precond {
let sz: uint = vec::len(pps);
if sz >= 1u {
let second = pps.(0);
let second = pps[0];
assert (pps_len(second) == num_constraints(fcx.enclosing));
let second_pre = clone(second.precondition);
difference(second_pre, first.postcondition);
@ -113,7 +113,7 @@ fn seq_preconds(fcx: &fn_ctxt, pps: &[pre_and_post]) -> precond {
if sz >= 1u {
let first = pps.(0);
let first = pps[0];
assert (pps_len(first) == num_vars);
ret seq_preconds_go(fcx, vec::slice(pps, 1u, sz), first);
} else { ret true_precond(num_vars); }
@ -150,7 +150,7 @@ fn relax_precond_stmt(s: &@stmt, cx: &relax_ctxt,
visit::visit_stmt(s, cx, vt);
}
type relax_ctxt = {fcx:fn_ctxt, i:node_id};
type relax_ctxt = {fcx: fn_ctxt, i: node_id};
fn relax_precond_block_inner(b: &blk, cx: &relax_ctxt,
vt: &visit::vt<relax_ctxt>) {
@ -158,16 +158,16 @@ fn relax_precond_block_inner(b: &blk, cx: &relax_ctxt,
visit::visit_block(b, cx, vt);
}
fn relax_precond_block(fcx: &fn_ctxt, i: node_id, b:&blk) {
fn relax_precond_block(fcx: &fn_ctxt, i: node_id, b: &blk) {
let cx = {fcx: fcx, i: i};
let visitor = visit::default_visitor::<relax_ctxt>();
visitor =
@{visit_block: relax_precond_block_inner,
visit_expr: relax_precond_expr,
visit_stmt: relax_precond_stmt,
visit_item: (fn (_i: &@item, _cx: &relax_ctxt,
_vt: &visit::vt<relax_ctxt>) {})
with *visitor};
visit_item:
fn (_i: &@item, _cx: &relax_ctxt, _vt: &visit::vt<relax_ctxt>) {
} with *visitor};
let v1 = visit::mk_vt(visitor);
v1.visit_block(b, cx, v1);
}
@ -217,7 +217,7 @@ fn clear_in_poststate_expr(fcx: &fn_ctxt, e: &@expr, t: &poststate) {
}
}
fn kill_poststate_(fcx : &fn_ctxt, c : &tsconstr, post : &poststate) -> bool {
fn kill_poststate_(fcx: &fn_ctxt, c: &tsconstr, post: &poststate) -> bool {
log "kill_poststate_";
ret clear_in_poststate_(bit_num(fcx, c), post);
}
@ -241,8 +241,8 @@ fn clear_in_prestate_ident(fcx: &fn_ctxt, id: &node_id, ident: &ident,
ret kill_prestate(fcx, parent, ninit(id, ident));
}
fn clear_in_poststate_ident_(fcx : &fn_ctxt, id : &node_id, ident : &ident,
post : &poststate) -> bool {
fn clear_in_poststate_ident_(fcx: &fn_ctxt, id: &node_id, ident: &ident,
post: &poststate) -> bool {
ret kill_poststate_(fcx, ninit(id, ident), post);
}

View file

@ -52,8 +52,8 @@ fn check_unused_vars(fcx: &fn_ctxt) {
for c: norm_constraint in constraints(fcx) {
alt c.c.node {
ninit(id, v) {
if !vec_contains(fcx.enclosing.used_vars, id) &&
v.(0) != ('_' as u8) {
if !vec_contains(fcx.enclosing.used_vars, id) && v[0] != '_' as u8
{
fcx.ccx.tcx.sess.span_warn(c.c.span, "unused variable " + v);
}
}
@ -143,17 +143,18 @@ fn check_states_against_conditions(fcx: &fn_ctxt, f: &_fn,
/* Check that the return value is initialized */
let post = aux::block_poststate(fcx.ccx, f.body);
if f.proto == ast::proto_fn &&
!promises(fcx, post, fcx.enclosing.i_return) &&
!type_is_nil(fcx.ccx.tcx, ret_ty_of_fn(fcx.ccx.tcx, id)) &&
f.decl.cf == return {
!promises(fcx, post, fcx.enclosing.i_return) &&
!type_is_nil(fcx.ccx.tcx, ret_ty_of_fn(fcx.ccx.tcx, id)) &&
f.decl.cf == return {
fcx.ccx.tcx.sess.span_err(f.body.span,
"In function " + fcx.name +
", not all control paths \
"In function " + fcx.name +
", not all control paths \
return a value");
fcx.ccx.tcx.sess.span_fatal(f.decl.output.span,
"see declared return type of '" +
ty_to_str(f.decl.output) + "'");
} else if (f.decl.cf == noreturn) {
} else if f.decl.cf == noreturn {
// check that this really always fails
// Note that it's ok for i_diverge and i_return to both be true.
// In fact, i_diverge implies i_return. (But not vice versa!)

View file

@ -16,16 +16,17 @@ type ctxt = {cs: @mutable [sp_constr], tcx: ty::ctxt};
fn collect_local(loc: &@local, cx: &ctxt, v: &visit::vt<ctxt>) {
for each p: @pat in pat_bindings(loc.node.pat) {
let ident = alt p.node { pat_bind(id) { id } };
log "collect_local: pushing " + ident;
*cx.cs += ~[respan(loc.span, ninit(p.id, ident))];
log "collect_local: pushing " + ident;;
*cx.cs += [respan(loc.span, ninit(p.id, ident))];
}
visit::visit_local(loc, cx, v);
}
fn collect_pred(e: &@expr, cx: &ctxt, v: &visit::vt<ctxt>) {
alt e.node {
expr_check(_, ch) { *cx.cs += ~[expr_to_constr(cx.tcx, ch)]; }
expr_if_check(ex, _, _) { *cx.cs += ~[expr_to_constr(cx.tcx, ex)]; }
expr_check(_, ch) { *cx.cs += [expr_to_constr(cx.tcx, ch)]; }
expr_if_check(ex, _, _) { *cx.cs += [expr_to_constr(cx.tcx, ex)]; }
// If it's a call, generate appropriate instances of the
// call's constraints.
@ -34,7 +35,7 @@ fn collect_pred(e: &@expr, cx: &ctxt, v: &visit::vt<ctxt>) {
let ct: sp_constr =
respan(c.span,
aux::substitute_constr_args(cx.tcx, operands, c));
*cx.cs += ~[ct];
*cx.cs += [ct];
}
}
_ { }
@ -45,7 +46,7 @@ fn collect_pred(e: &@expr, cx: &ctxt, v: &visit::vt<ctxt>) {
fn find_locals(tcx: &ty::ctxt, f: &_fn, tps: &[ty_param], sp: &span,
i: &fn_ident, id: node_id) -> ctxt {
let cx: ctxt = {cs: @mutable ~[], tcx: tcx};
let cx: ctxt = {cs: @mutable [], tcx: tcx};
let visitor = visit::default_visitor::<ctxt>();
visitor =
@ -70,13 +71,13 @@ fn add_constraint(tcx: &ty::ctxt, c: sp_constr, next: uint, tbl: constr_map)
" as a variable and a pred");
}
cpred(_, pds) {
*pds += ~[respan(c.span, {args: args, bit_num: next})];
*pds += [respan(c.span, {args: args, bit_num: next})];
}
}
}
none. {
let rslt: @mutable [pred_args] =
@mutable ~[respan(c.span, {args: args, bit_num: next})];
@mutable [respan(c.span, {args: args, bit_num: next})];
tbl.insert(d_id, cpred(p, rslt));
}
}
@ -111,18 +112,18 @@ fn mk_fn_info(ccx: &crate_ctxt, f: &_fn, tp: &[ty_param], f_sp: &span,
/* Need to add constraints for args too, b/c they
can be deinitialized */
for a:arg in f.decl.inputs {
next = add_constraint(cx.tcx, respan(f_sp,
ninit(a.id, a.ident)),
next, res_map);
for a: arg in f.decl.inputs {
next =
add_constraint(cx.tcx, respan(f_sp, ninit(a.id, a.ident)), next,
res_map);
}
/* add the special i_diverge and i_return constraints
(see the type definition for auxiliary::fn_info for an explanation) */
// use the name of the function for the "return" constraint
next = add_constraint(cx.tcx, respan(f_sp, ninit(id, name)), next,
res_map);
next =
add_constraint(cx.tcx, respan(f_sp, ninit(id, name)), next, res_map);
// and the name of the function, with a '!' appended to it, for the
// "diverges" constraint
let diverges_id = ccx.tcx.sess.next_node_id();
@ -130,13 +131,14 @@ fn mk_fn_info(ccx: &crate_ctxt, f: &_fn, tp: &[ty_param], f_sp: &span,
add_constraint(cx.tcx, respan(f_sp, ninit(diverges_id, diverges_name)),
next, res_map);
let v: @mutable [node_id] = @mutable ~[];
let v: @mutable [node_id] = @mutable [];
let rslt =
{constrs: res_map,
num_constraints:
// add 2 to account for the i_return and i_diverge constraints
vec::len(*cx.cs) + vec::len(f.decl.constraints)
+ vec::len(f.decl.inputs) + 2u,
num_constraints:
vec::len(*cx.cs) + vec::len(f.decl.constraints) +
vec::len(f.decl.inputs) + 2u,
cf: f.decl.cf,
i_return: ninit(id, name),
i_diverge: ninit(diverges_id, diverges_name),

View file

@ -59,13 +59,14 @@ fn find_pre_post_item(ccx: &crate_ctxt, i: &item) {
alt i.node {
item_const(_, e) {
// make a fake fcx
let v: @mutable [node_id] = @mutable ~[];
let v: @mutable [node_id] = @mutable [];
let fake_fcx =
{enclosing:
{
// just bogus
enclosing:
{constrs: @new_def_hash::<constraint>(),
num_constraints: 0u,
cf: return,
// just bogus
i_return: ninit(0, ""),
i_diverge: ninit(0, ""),
used_vars: v},
@ -104,7 +105,7 @@ fn find_pre_post_item(ccx: &crate_ctxt, i: &item) {
fn find_pre_post_exprs(fcx: &fn_ctxt, args: &[@expr], id: node_id) {
if vec::len::<@expr>(args) > 0u {
log "find_pre_post_exprs: oper =";
log_expr(*args.(0));
log_expr(*args[0]);
}
fn do_one(fcx: fn_ctxt, e: &@expr) { find_pre_post_expr(fcx, e); }
for e: @expr in args { do_one(fcx, e); }
@ -132,8 +133,7 @@ fn find_pre_post_loop(fcx: &fn_ctxt, l: &@local, index: &@expr, body: &blk,
}
let loop_precond =
seq_preconds(fcx,
~[expr_pp(fcx.ccx, index), block_pp(fcx.ccx, body)]);
seq_preconds(fcx, [expr_pp(fcx.ccx, index), block_pp(fcx.ccx, body)]);
let loop_postcond =
intersect_states(expr_postcond(fcx.ccx, index),
block_postcond(fcx.ccx, body));
@ -159,8 +159,8 @@ fn join_then_else(fcx: &fn_ctxt, antec: &@expr, conseq: &blk,
let precond_res =
seq_preconds(fcx,
~[expr_pp(fcx.ccx, antec),
block_pp(fcx.ccx, conseq)]);
[expr_pp(fcx.ccx, antec),
block_pp(fcx.ccx, conseq)]);
set_pre_and_post(fcx.ccx, id, precond_res,
expr_poststate(fcx.ccx, antec));
}
@ -173,12 +173,11 @@ fn join_then_else(fcx: &fn_ctxt, antec: &@expr, conseq: &blk,
find_pre_post_expr(fcx, altern);
let precond_false_case =
seq_preconds(fcx,
~[expr_pp(fcx.ccx, antec),
expr_pp(fcx.ccx, altern)]);
[expr_pp(fcx.ccx, antec), expr_pp(fcx.ccx, altern)]);
let postcond_false_case =
seq_postconds(fcx,
~[expr_postcond(fcx.ccx, antec),
expr_postcond(fcx.ccx, altern)]);
[expr_postcond(fcx.ccx, antec),
expr_postcond(fcx.ccx, altern)]);
/* Be sure to set the bit for the check condition here,
so that it's *not* set in the alternative. */
@ -191,15 +190,15 @@ fn join_then_else(fcx: &fn_ctxt, antec: &@expr, conseq: &blk,
}
let precond_true_case =
seq_preconds(fcx,
~[expr_pp(fcx.ccx, antec),
block_pp(fcx.ccx, conseq)]);
[expr_pp(fcx.ccx, antec),
block_pp(fcx.ccx, conseq)]);
let postcond_true_case =
seq_postconds(fcx,
~[expr_postcond(fcx.ccx, antec),
block_postcond(fcx.ccx, conseq)]);
[expr_postcond(fcx.ccx, antec),
block_postcond(fcx.ccx, conseq)]);
let precond_res =
seq_postconds(fcx, ~[precond_true_case, precond_false_case]);
seq_postconds(fcx, [precond_true_case, precond_false_case]);
let postcond_res =
intersect_states(postcond_true_case, postcond_false_case);
set_pre_and_post(fcx.ccx, id, precond_res, postcond_res);
@ -220,10 +219,10 @@ fn gen_if_local(fcx: &fn_ctxt, lhs: @expr, rhs: @expr, larger_id: node_id,
gen(fcx, larger_id,
ninit(d_id.node, path_to_ident(fcx.ccx.tcx, pth)));
}
_ { find_pre_post_exprs(fcx, ~[lhs, rhs], larger_id); }
_ { find_pre_post_exprs(fcx, [lhs, rhs], larger_id); }
}
}
_ { find_pre_post_exprs(fcx, ~[lhs, rhs], larger_id); }
_ { find_pre_post_exprs(fcx, [lhs, rhs], larger_id); }
}
}
@ -303,13 +302,12 @@ fn handle_var(fcx: &fn_ctxt, rslt: &pre_and_post, id: node_id, name: ident) {
}
}
fn forget_args_moved_in(fcx: &fn_ctxt, parent: &@expr,
modes: &[ty::mode],
fn forget_args_moved_in(fcx: &fn_ctxt, parent: &@expr, modes: &[ty::mode],
operands: &[@expr]) {
let i = 0u;
for mode: ty::mode in modes {
if mode == ty::mo_move {
forget_in_postcond(fcx, parent.id, operands.(i).id);
forget_in_postcond(fcx, parent.id, operands[i].id);
}
i += 1u;
}
@ -324,8 +322,10 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
alt e.node {
expr_call(operator, operands) {
let /* copy */args = operands;
args += ~[operator];
/* copy */
let args = operands;
args += [operator];
find_pre_post_exprs(fcx, args, e.id);
/* see if the call has any constraints on its type */
@ -377,12 +377,10 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
}
expr_rec(fields, maybe_base) {
let es = field_exprs(fields);
alt maybe_base { none. {/* no-op */ } some(b) { es += ~[b]; } }
alt maybe_base { none. {/* no-op */ } some(b) { es += [b]; } }
find_pre_post_exprs(fcx, es, e.id);
}
expr_tup(elts) {
find_pre_post_exprs(fcx, elts, e.id);
}
expr_tup(elts) { find_pre_post_exprs(fcx, elts, e.id); }
expr_copy(a) {
find_pre_post_expr(fcx, a);
copy_pre_post(fcx.ccx, e.id, a);
@ -394,7 +392,7 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
/* Different from expr_assign in that the lhs *must*
already be initialized */
find_pre_post_exprs(fcx, ~[lhs, rhs], e.id);
find_pre_post_exprs(fcx, [lhs, rhs], e.id);
forget_in_postcond_still_init(fcx, e.id, lhs.id);
}
expr_lit(_) { clear_pp(expr_pp(fcx.ccx, e)); }
@ -426,12 +424,11 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
find_pre_post_expr(fcx, l);
find_pre_post_expr(fcx, r);
let overall_pre =
seq_preconds(fcx,
~[expr_pp(fcx.ccx, l), expr_pp(fcx.ccx, r)]);
seq_preconds(fcx, [expr_pp(fcx.ccx, l), expr_pp(fcx.ccx, r)]);
set_precondition(node_id_to_ts_ann(fcx.ccx, e.id), overall_pre);
set_postcondition(node_id_to_ts_ann(fcx.ccx, e.id),
expr_postcond(fcx.ccx, l));
} else { find_pre_post_exprs(fcx, ~[l, r], e.id); }
} else { find_pre_post_exprs(fcx, [l, r], e.id); }
}
expr_unary(_, operand) {
find_pre_post_expr(fcx, operand);
@ -446,8 +443,8 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
find_pre_post_block(fcx, body);
set_pre_and_post(fcx.ccx, e.id,
seq_preconds(fcx,
~[expr_pp(fcx.ccx, test),
block_pp(fcx.ccx, body)]),
[expr_pp(fcx.ccx, test),
block_pp(fcx.ccx, body)]),
intersect_states(expr_postcond(fcx.ccx, test),
block_postcond(fcx.ccx, body)));
}
@ -456,8 +453,8 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
find_pre_post_expr(fcx, test);
let loop_postcond =
seq_postconds(fcx,
~[block_postcond(fcx.ccx, body),
expr_postcond(fcx.ccx, test)]);
[block_postcond(fcx.ccx, body),
expr_postcond(fcx.ccx, test)]);
/* conservative approximation: if the body
could break or cont, the test may never be executed */
@ -466,8 +463,8 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
}
set_pre_and_post(fcx.ccx, e.id,
seq_preconds(fcx,
~[block_pp(fcx.ccx, body),
expr_pp(fcx.ccx, test)]),
[block_pp(fcx.ccx, body),
expr_pp(fcx.ccx, test)]),
loop_postcond);
}
expr_for(d, index, body) {
@ -476,18 +473,18 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
expr_for_each(d, index, body) {
find_pre_post_loop(fcx, d, index, body, e.id);
}
expr_index(val, sub) { find_pre_post_exprs(fcx, ~[val, sub], e.id); }
expr_index(val, sub) { find_pre_post_exprs(fcx, [val, sub], e.id); }
expr_alt(ex, alts) {
find_pre_post_expr(fcx, ex);
fn do_an_alt(fcx: &fn_ctxt, an_alt: &arm) -> pre_and_post {
find_pre_post_block(fcx, an_alt.body);
ret block_pp(fcx.ccx, an_alt.body);
}
let alt_pps = ~[];
for a: arm in alts { alt_pps += ~[do_an_alt(fcx, a)]; }
let alt_pps = [];
for a: arm in alts { alt_pps += [do_an_alt(fcx, a)]; }
fn combine_pp(antec: pre_and_post, fcx: fn_ctxt, pp: &pre_and_post,
next: &pre_and_post) -> pre_and_post {
union(pp.precondition, seq_preconds(fcx, ~[antec, next]));
union(pp.precondition, seq_preconds(fcx, [antec, next]));
intersect(pp.postcondition, next.postcondition);
ret pp;
}
@ -536,22 +533,20 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
}
expr_bind(operator, maybe_args) {
let args = ~[];
let args = [];
let cmodes = callee_modes(fcx, operator.id);
let modes = ~[];
let modes = [];
let i = 0;
for expr_opt: option::t<@expr> in maybe_args {
alt expr_opt {
none. {/* no-op */ }
some(expr) {
modes += ~[cmodes.(i)];
args += ~[expr];
}
some(expr) { modes += [cmodes[i]]; args += [expr]; }
}
i += 1;
}
args += ~[operator]; /* ??? order of eval? */
args += [operator]; /* ??? order of eval? */
forget_args_moved_in(fcx, e, modes, args);
find_pre_post_exprs(fcx, args, e.id);
}
@ -567,7 +562,7 @@ fn find_pre_post_expr(fcx: &fn_ctxt, e: @expr) {
none. { clear_pp(expr_pp(fcx.ccx, e)); }
}
}
expr_uniq(sub) { find_pre_post_exprs(fcx, ~[sub], e.id); }
expr_uniq(sub) { find_pre_post_exprs(fcx, [sub], e.id); }
}
}
@ -603,23 +598,25 @@ fn find_pre_post_stmt(fcx: &fn_ctxt, s: &stmt) {
/* FIXME: This won't be necessary when typestate
works well enough for pat_bindings to return a
refinement-typed thing. */
let ident = alt pat.node {
pat_bind(n) { n }
_ {
fcx.ccx.tcx.sess.span_bug(pat.span,
"Impossible LHS");
}
};
let ident =
alt pat.node {
pat_bind(n) { n }
_ {
fcx.ccx.tcx.sess.span_bug(pat.span,
"Impossible LHS");
}
};
alt p {
some(p) {
copy_in_postcond(fcx, id,
{ident: ident, node: pat.id},
{ident:
path_to_ident(fcx.ccx.tcx, p),
path_to_ident(fcx.ccx.tcx,
p),
node: an_init.expr.id},
op_to_oper_ty(an_init.op));
}
none. {}
none. { }
}
gen(fcx, id, ninit(pat.id, ident));
}
@ -645,10 +642,9 @@ fn find_pre_post_stmt(fcx: &fn_ctxt, s: &stmt) {
fcx.ccx.tcx.sess.span_bug(pat.span,
"Impossible LHS");
}
};
}
}
copy_pre_post_(fcx.ccx, id,
prev_pp.precondition,
copy_pre_post_(fcx.ccx, id, prev_pp.precondition,
prev_pp.postcondition);
}
none. {
@ -694,33 +690,33 @@ fn find_pre_post_block(fcx: &fn_ctxt, b: blk) {
let nv = num_constraints(fcx.enclosing);
fn do_one_(fcx: fn_ctxt, s: &@stmt) {
find_pre_post_stmt(fcx, *s);
/*
log_err "pre_post for stmt:";
log_stmt_err(*s);
log_err "is:";
log_pp_err(stmt_pp(fcx.ccx, *s));
*/
/*
log_err "pre_post for stmt:";
log_stmt_err(*s);
log_err "is:";
log_pp_err(stmt_pp(fcx.ccx, *s));
*/
}
for s: @stmt in b.node.stmts { do_one_(fcx, s); }
fn do_inner_(fcx: fn_ctxt, e: &@expr) { find_pre_post_expr(fcx, e); }
let do_inner = bind do_inner_(fcx, _);
option::map::<@expr, ()>(do_inner, b.node.expr);
let pps: [pre_and_post] = ~[];
for s: @stmt in b.node.stmts { pps += ~[stmt_pp(fcx.ccx, *s)]; }
let pps: [pre_and_post] = [];
for s: @stmt in b.node.stmts { pps += [stmt_pp(fcx.ccx, *s)]; }
alt b.node.expr {
none. {/* no-op */ }
some(e) { pps += ~[expr_pp(fcx.ccx, e)]; }
some(e) { pps += [expr_pp(fcx.ccx, e)]; }
}
let block_precond = seq_preconds(fcx, pps);
let postconds = ~[];
for pp: pre_and_post in pps { postconds += ~[get_post(pp)]; }
let postconds = [];
for pp: pre_and_post in pps { postconds += [get_post(pp)]; }
/* A block may be empty, so this next line ensures that the postconds
vector is non-empty. */
postconds += ~[block_precond];
postconds += [block_precond];
let block_postcond = empty_poststate(nv);
/* conservative approximation */

View file

@ -30,9 +30,9 @@ import util::common::log_stmt_err;
import util::common::log_expr_err;
fn handle_move_or_copy(fcx: &fn_ctxt, post: &poststate, rhs_path: &path,
rhs_id: &node_id, instlhs: &inst, init_op: &init_op) {
rhs_id: &node_id, instlhs: &inst, init_op: &init_op) {
let rhs_d = local_node_id_to_def_id(fcx, rhs_id);
alt (rhs_d) {
alt rhs_d {
some(rhsid) {
// RHS is a local var
let instrhs =
@ -46,19 +46,19 @@ fn handle_move_or_copy(fcx: &fn_ctxt, post: &poststate, rhs_path: &path,
}
}
fn seq_states(fcx: &fn_ctxt, pres: &prestate, bindings: &[binding])
-> {changed: bool, post: poststate} {
fn seq_states(fcx: &fn_ctxt, pres: &prestate, bindings: &[binding]) ->
{changed: bool, post: poststate} {
let changed = false;
let post = tritv_clone(pres);
for b:binding in bindings {
alt (b.rhs) {
for b: binding in bindings {
alt b.rhs {
some(an_init) {
// an expression, with or without a destination
changed |= find_pre_post_state_expr(fcx, post, an_init.expr)
|| changed;
changed |=
find_pre_post_state_expr(fcx, post, an_init.expr) || changed;
post = tritv_clone(expr_poststate(fcx.ccx, an_init.expr));
for i: inst in b.lhs {
alt (an_init.expr.node) {
alt an_init.expr.node {
expr_path(p) {
handle_move_or_copy(fcx, post, p, an_init.expr.id, i,
an_init.op);
@ -67,6 +67,7 @@ fn seq_states(fcx: &fn_ctxt, pres: &prestate, bindings: &[binding])
}
set_in_poststate_ident(fcx, i.node, i.ident, post);
}
// Forget the RHS if we just moved it.
if an_init.op == init_move {
forget_in_poststate(fcx, post, an_init.expr.id);
@ -168,18 +169,17 @@ fn find_pre_post_state_call(fcx: &fn_ctxt, pres: &prestate, a: &@expr,
// FIXME: This could be a typestate constraint
if vec::len(bs) != vec::len(ops) {
fcx.ccx.tcx.sess.span_bug(a.span,
#fmt("mismatched arg lengths: \
#fmt["mismatched arg lengths: \
%u exprs vs. %u ops",
vec::len(bs), vec::len(ops)));
vec::len(bs), vec::len(ops)]);
}
ret find_pre_post_state_exprs(fcx, expr_poststate(fcx.ccx, a), id,
ops, bs, cf)
|| changed;
ret find_pre_post_state_exprs(fcx, expr_poststate(fcx.ccx, a), id, ops,
bs, cf) || changed;
}
fn find_pre_post_state_exprs(fcx: &fn_ctxt, pres: &prestate, id: node_id,
ops: &[init_op], es: &[@expr],
cf: controlflow) -> bool {
ops: &[init_op], es: &[@expr], cf: controlflow)
-> bool {
let rs = seq_states(fcx, pres, anon_bindings(ops, es));
let changed = rs.changed | set_prestate_ann(fcx.ccx, id, pres);
/* if this is a failing call, it sets everything as initialized */
@ -315,8 +315,8 @@ fn find_pre_post_state_expr(fcx: &fn_ctxt, pres: &prestate, e: @expr) ->
expr_vec(elts, _) {
ret find_pre_post_state_exprs(fcx, pres, e.id,
vec::init_elt(init_assign,
vec::len(elts)),
elts, return);
vec::len(elts)), elts,
return);
}
expr_call(operator, operands) {
ret find_pre_post_state_call(fcx, pres, operator, e.id,
@ -325,17 +325,14 @@ fn find_pre_post_state_expr(fcx: &fn_ctxt, pres: &prestate, e: @expr) ->
controlflow_expr(fcx.ccx, operator));
}
expr_bind(operator, maybe_args) {
let args = ~[];
let args = [];
let callee_ops = callee_arg_init_ops(fcx, operator.id);
let ops = ~[];
let ops = [];
let i = 0;
for a_opt: option::t<@expr> in maybe_args {
alt a_opt {
none. {/* no-op */ }
some(a) {
ops += ~[callee_ops.(i)];
args += ~[a];
}
some(a) { ops += [callee_ops[i]]; args += [a]; }
}
i += 1;
}
@ -366,9 +363,8 @@ fn find_pre_post_state_expr(fcx: &fn_ctxt, pres: &prestate, e: @expr) ->
let changed =
find_pre_post_state_exprs(fcx, pres, e.id,
vec::init_elt(init_assign,
vec::len(fields)),
field_exprs(fields),
return);
vec::len(fields)),
field_exprs(fields), return);
alt maybe_base {
none. {/* do nothing */ }
some(base) {
@ -383,12 +379,10 @@ fn find_pre_post_state_expr(fcx: &fn_ctxt, pres: &prestate, e: @expr) ->
expr_tup(elts) {
ret find_pre_post_state_exprs(fcx, pres, e.id,
vec::init_elt(init_assign,
vec::len(elts)),
elts, return);
}
expr_copy(a) {
ret find_pre_post_state_sub(fcx, pres, a, e.id, none);
vec::len(elts)), elts,
return);
}
expr_copy(a) { ret find_pre_post_state_sub(fcx, pres, a, e.id, none); }
expr_move(lhs, rhs) {
ret find_pre_post_state_two(fcx, pres, lhs, rhs, e.id, oper_move);
}
@ -405,14 +399,14 @@ fn find_pre_post_state_expr(fcx: &fn_ctxt, pres: &prestate, e: @expr) ->
/* normally, everything is true if execution continues after
a ret expression (since execution never continues locally
after a ret expression */
// FIXME should factor this out
// FIXME should factor this out
let post = false_postcond(num_constrs);
// except for the "diverges" bit...
kill_poststate_(fcx, fcx.enclosing.i_diverge, post);
set_poststate_ann(fcx.ccx, e.id, post);
alt maybe_ret_val {
alt maybe_ret_val {
none. {/* do nothing */ }
some(ret_val) {
changed |= find_pre_post_state_expr(fcx, pres, ret_val);
@ -565,14 +559,12 @@ fn find_pre_post_state_expr(fcx: &fn_ctxt, pres: &prestate, e: @expr) ->
woo! */
let post = false_postcond(num_constrs);
alt fcx.enclosing.cf {
noreturn. {
kill_poststate_(fcx, ninit(fcx.id, fcx.name), post);
}
_ {}
noreturn. { kill_poststate_(fcx, ninit(fcx.id, fcx.name), post); }
_ { }
}
ret set_prestate_ann(fcx.ccx, e.id, pres) |
set_poststate_ann(fcx.ccx, e.id, post)
| alt maybe_fail_val {
set_poststate_ann(fcx.ccx, e.id, post) |
alt maybe_fail_val {
none. { false }
some(fail_val) {
find_pre_post_state_expr(fcx, pres, fail_val)
@ -603,74 +595,73 @@ fn find_pre_post_state_expr(fcx: &fn_ctxt, pres: &prestate, e: @expr) ->
}
}
fn find_pre_post_state_stmt(fcx: &fn_ctxt, pres: &prestate, s: @stmt)
-> bool {
fn find_pre_post_state_stmt(fcx: &fn_ctxt, pres: &prestate, s: @stmt) ->
bool {
let stmt_ann = stmt_to_ann(fcx.ccx, *s);
/*
log_err ("[" + fcx.name + "]");
log_err "*At beginning: stmt = ";
log_stmt_err(*s);
log_err "*prestate = ";
log_tritv_err(fcx, stmt_ann.states.prestate);
log_err "*poststate =";
log_tritv_err(fcx, stmt_ann.states.poststate);
log_err "pres = ";
log_tritv_err(fcx, pres);
*/
/*
log_err ("[" + fcx.name + "]");
log_err "*At beginning: stmt = ";
log_stmt_err(*s);
log_err "*prestate = ";
log_tritv_err(fcx, stmt_ann.states.prestate);
log_err "*poststate =";
log_tritv_err(fcx, stmt_ann.states.poststate);
log_err "pres = ";
log_tritv_err(fcx, pres);
*/
alt (s.node) {
alt s.node {
stmt_decl(adecl, id) {
alt (adecl.node) {
alt adecl.node {
decl_local(alocals) {
set_prestate(stmt_ann, pres);
let c_and_p = seq_states(fcx, pres,
locals_to_bindings(alocals));
let c_and_p = seq_states(fcx, pres, locals_to_bindings(alocals));
/* important to do this in one step to ensure
termination (don't want to set changed to true
for intermediate changes) */
let changed = (set_poststate(stmt_ann, c_and_p.post)
| c_and_p.changed);
let changed =
set_poststate(stmt_ann, c_and_p.post) | c_and_p.changed;
/*
log_err "Summary: stmt = ";
log_stmt_err(*s);
log_err "prestate = ";
log_tritv_err(fcx, stmt_ann.states.prestate);
log_err "poststate =";
log_tritv_err(fcx, stmt_ann.states.poststate);
log_err "changed =";
log_err changed;
*/
/*
log_err "Summary: stmt = ";
log_stmt_err(*s);
log_err "prestate = ";
log_tritv_err(fcx, stmt_ann.states.prestate);
log_err "poststate =";
log_tritv_err(fcx, stmt_ann.states.poststate);
log_err "changed =";
log_err changed;
*/
ret changed;
}
decl_item(an_item) {
ret set_prestate(stmt_ann, pres) |
set_poststate(stmt_ann, pres);
ret set_prestate(stmt_ann, pres) | set_poststate(stmt_ann, pres);
/* the outer visitor will recurse into the item */
}
}
}
stmt_expr(ex, _) {
let changed = find_pre_post_state_expr(fcx, pres, ex) |
let changed =
find_pre_post_state_expr(fcx, pres, ex) |
set_prestate(stmt_ann, expr_prestate(fcx.ccx, ex)) |
set_poststate(stmt_ann, expr_poststate(fcx.ccx, ex));
/*
log_err "Finally:";
log_stmt_err(*s);
log_err("prestate = ");
// log_err(bitv::to_str(stmt_ann.states.prestate));
log_tritv_err(fcx, stmt_ann.states.prestate);
log_err("poststate =");
// log_err(bitv::to_str(stmt_ann.states.poststate));
log_tritv_err(fcx, stmt_ann.states.poststate);
log_err("changed =");
*/
/*
log_err "Finally:";
log_stmt_err(*s);
log_err("prestate = ");
log_err(bitv::to_str(stmt_ann.states.prestate));
log_tritv_err(fcx, stmt_ann.states.prestate);
log_err("poststate =");
log_err(bitv::to_str(stmt_ann.states.poststate));
log_tritv_err(fcx, stmt_ann.states.poststate);
log_err("changed =");
*/
ret changed;
ret changed;
}
_ { ret false; }
}
@ -706,18 +697,18 @@ fn find_pre_post_state_block(fcx: &fn_ctxt, pres0: &prestate, b: &blk) ->
set_poststate_ann(fcx.ccx, b.node.id, post);
/*
log_err "For block:";
log_block_err(b);
log_err "poststate = ";
log_states_err(block_states(fcx.ccx, b));
log_err "pres0:";
log_tritv_err(fcx, pres0);
log_err "post:";
log_tritv_err(fcx, post);
log_err "changed = ";
log_err changed;
*/
/*
log_err "For block:";
log_block_err(b);
log_err "poststate = ";
log_states_err(block_states(fcx.ccx, b));
log_err "pres0:";
log_tritv_err(fcx, pres0);
log_err "post:";
log_tritv_err(fcx, post);
log_err "changed = ";
log_err changed;
*/
ret changed;
}
@ -731,7 +722,7 @@ fn find_pre_post_state_fn(fcx: &fn_ctxt, f: &_fn) -> bool {
// Arguments start out initialized
let block_pre = block_prestate(fcx.ccx, f.body);
for a:arg in f.decl.inputs {
for a: arg in f.decl.inputs {
set_in_prestate_constr(fcx, ninit(a.id, a.ident), block_pre);
}

View file

@ -54,9 +54,10 @@ fn trit_minus(a: trit, b: trit) -> trit {
alt b {
ttrue. { dont_care }
tfalse. { ttrue }
/* internally contradictory, but
I guess it'll get flagged? */
dont_care. {
/* internally contradictory, but
I guess it'll get flagged? */
dont_care. {
ttrue
}
}
@ -64,7 +65,8 @@ fn trit_minus(a: trit, b: trit) -> trit {
tfalse. {
alt b {
ttrue. { tfalse }
/* see above comment */
/* see above comment */
_ {
tfalse
}
@ -80,7 +82,8 @@ fn trit_or(a: trit, b: trit) -> trit {
tfalse. {
alt b {
ttrue. { dont_care }
/* FIXME: ?????? */
/* FIXME: ?????? */
_ {
tfalse
}
@ -97,15 +100,18 @@ fn trit_or(a: trit, b: trit) -> trit {
fn trit_and(a: trit, b: trit) -> trit {
alt a {
dont_care. { b }
// also seems wrong for case b = ttrue
// also seems wrong for case b = ttrue
ttrue. {
alt b {
dont_care. { ttrue }
// ??? Seems wrong
// ??? Seems wrong
ttrue. {
ttrue
}
// false wins, since if something is uninit
// on one path, we care
// (Rationale: it's always safe to assume that
@ -117,6 +123,7 @@ fn trit_and(a: trit, b: trit) -> trit {
}
}
// Rationale: if it's uninit on one path,
// we can consider it as uninit on all paths
tfalse. {
@ -180,7 +187,7 @@ fn tritv_get(v: &t, i: uint) -> trit {
let b1 = bitv::get(v.uncertain, i);
let b2 = bitv::get(v.val, i);
assert (!(b1 && b2));
if b1 { dont_care } else if (b2) { ttrue } else { tfalse }
if b1 { dont_care } else if b2 { ttrue } else { tfalse }
}
fn tritv_set(i: uint, v: &t, t: trit) -> bool {
@ -241,14 +248,14 @@ fn tritv_doesntcare(v: &t) -> bool {
fn to_vec(v: &t) -> [uint] {
let i: uint = 0u;
let rslt: [uint] = ~[];
let rslt: [uint] = [];
while i < v.nbits {
rslt +=
~[alt tritv_get(v, i) {
dont_care. { 2u }
ttrue. { 1u }
tfalse. { 0u }
}];
[alt tritv_get(v, i) {
dont_care. { 2u }
ttrue. { 1u }
tfalse. { 0u }
}];
i += 1u;
}
ret rslt;

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -49,6 +49,7 @@ tag def {
def_arg(def_id);
def_local(def_id);
def_variant(def_id, /* tag */def_id);
/* variant */
def_ty(def_id);
def_ty_arg(uint, kind);
@ -56,6 +57,7 @@ tag def {
def_use(def_id);
def_native_ty(def_id);
def_native_fn(def_id);
/* A "fake" def for upvars. This never appears in the def_map, but
* freevars::def_lookup will return it for a def that is an upvar.
* It contains the actual def. */
@ -77,7 +79,7 @@ fn def_id_of_def(d: def) -> def_id {
def_local(id) { ret id; }
def_variant(_, id) { ret id; }
def_ty(id) { ret id; }
def_ty_arg(_,_) { fail; }
def_ty_arg(_, _) { fail; }
def_binding(id) { ret id; }
def_use(id) { ret id; }
def_native_ty(id) { ret id; }
@ -100,10 +102,7 @@ type crate_ =
tag crate_directive_ {
cdir_src_mod(ident, option::t<filename>, [attribute]);
cdir_dir_mod(ident,
option::t<filename>,
[@crate_directive],
[attribute]);
cdir_dir_mod(ident, option::t<filename>, [@crate_directive], [attribute]);
cdir_view_item(@view_item);
cdir_syntax(path);
cdir_auth(path, _auth);
@ -155,30 +154,22 @@ iter pat_bindings(pat: &@pat) -> @pat {
alt pat.node {
pat_bind(_) { put pat; }
pat_tag(_, sub) {
for p in sub {
for each b in pat_bindings(p) { put b; }
}
for p in sub { for each b in pat_bindings(p) { put b; } }
}
pat_rec(fields, _) {
for f in fields {
for each b in pat_bindings(f.pat) { put b; }
}
for f in fields { for each b in pat_bindings(f.pat) { put b; } }
}
pat_tup(elts) {
for elt in elts {
for each b in pat_bindings(elt) { put b; }
}
for elt in elts { for each b in pat_bindings(elt) { put b; } }
}
pat_box(sub) {
for each b in pat_bindings(sub) { put b; }
}
pat_wild. | pat_lit(_) {}
pat_box(sub) { for each b in pat_bindings(sub) { put b; } }
pat_wild. | pat_lit(_) { }
}
}
fn pat_binding_ids(pat: &@pat) -> [node_id] {
let found = ~[];
for each b in pat_bindings(pat) { found += ~[b.id]; }
let found = [];
for each b in pat_bindings(pat) { found += [b.id]; }
ret found;
}
@ -258,6 +249,7 @@ type stmt = spanned<stmt_>;
tag stmt_ {
stmt_decl(@decl, node_id);
stmt_expr(@expr, node_id);
// These only exist in crate-level blocks.
stmt_crate_directive(@crate_directive);
}
@ -266,10 +258,8 @@ tag init_op { init_assign; init_move; }
type initializer = {op: init_op, expr: @expr};
type local_ = {ty: @ty,
pat: @pat, // FIXME: should really be a refinement on pat
init: option::t<initializer>,
id: node_id};
type local_ = // FIXME: should really be a refinement on pat
{ty: @ty, pat: @pat, init: option::t<initializer>, id: node_id};
type local = spanned<local_>;
@ -312,6 +302,7 @@ tag expr_ {
expr_alt(@expr, [arm]);
expr_fn(_fn);
expr_block(blk);
/*
* FIXME: many of these @exprs should be constrained with
* is_lval once we have constrained types working.
@ -331,10 +322,13 @@ tag expr_ {
expr_put(option::t<@expr>);
expr_be(@expr);
expr_log(int, @expr);
/* just an assert, no significance to typestate */
expr_assert(@expr);
/* preds that typestate is aware of */
expr_check(check_mode, @expr);
/* FIXME Would be nice if expr_check desugared
to expr_if_check. */
expr_if_check(@expr, blk, option::t<@expr>);
@ -428,10 +422,11 @@ tag ty_ {
ty_bot; /* return type of ! functions and type of
ret/fail/break/cont. there is no syntax
for this type. */
/* bot represents the value of functions that don't return a value
locally to their context. in contrast, things like log that do
return, but don't return a meaningful value, have result type nil. */
ty_bool;
ty_bool;
ty_int;
ty_uint;
ty_float;
@ -453,6 +448,7 @@ tag ty_ {
ty_type;
ty_constr(@ty, [@ty_constr]);
ty_mac(mac);
// ty_infer means the type should be inferred instead of it having been
// specified. This should only appear at the "top level" of a type and not
// nested in one.
@ -514,6 +510,7 @@ tag purity {
tag controlflow {
noreturn; // functions with return type _|_ that always
// raise an error or exit (i.e. never return to the caller)
return; // everything else
}
@ -531,9 +528,9 @@ type _obj = {fields: [obj_field], methods: [@method]};
type anon_obj =
// New fields and methods, if they exist.
// inner_obj: the original object being extended, if it exists.
{fields: option::t<[anon_obj_field]>,
methods: [@method],
// inner_obj: the original object being extended, if it exists.
inner_obj: option::t<@expr>};
type _mod = {view_items: [@view_item], items: [@item]};
@ -601,10 +598,14 @@ tag item_ {
item_ty(@ty, [ty_param]);
item_tag([variant], [ty_param]);
item_obj(_obj, [ty_param], /* constructor id */node_id);
item_res(_fn, /* dtor */
node_id, /* dtor id */
item_res(_fn,
/* dtor */
node_id,
/* dtor id */
[ty_param],
node_id /* ctor id */);
/* ctor id */
node_id);
}
type native_item =
@ -637,11 +638,7 @@ fn is_exported(i: ident, m: _mod) -> bool {
for vi: @ast::view_item in m.view_items {
alt vi.node {
ast::view_item_export(ids, _) {
for id in ids {
if str::eq(i, id) {
ret true;
}
}
for id in ids { if str::eq(i, id) { ret true; } }
count += 1u;
}
_ {/* fall through */ }
@ -670,7 +667,7 @@ fn eq_ty(a: &@ty, b: &@ty) -> bool { ret std::box::ptr_eq(a, b); }
fn hash_ty(t: &@ty) -> uint { ret t.span.lo << 16u + t.span.hi; }
fn block_from_expr(e: @expr) -> blk {
let blk_ = {stmts: ~[], expr: option::some::<@expr>(e), id: e.id};
let blk_ = {stmts: [], expr: option::some::<@expr>(e), id: e.id};
ret {node: blk_, span: e.span};
}

View file

@ -23,36 +23,36 @@ type codemap = @{mutable files: [filemap]};
type loc = {filename: filename, line: uint, col: uint};
fn new_codemap() -> codemap { ret @{mutable files: ~[]}; }
fn new_codemap() -> codemap { ret @{mutable files: []}; }
fn new_filemap(filename: filename, start_pos_ch: uint, start_pos_byte: uint)
-> filemap {
ret @{name: filename,
start_pos: {ch: start_pos_ch, byte: start_pos_byte},
mutable lines: ~[{ch: start_pos_ch, byte: start_pos_byte}]};
mutable lines: [{ch: start_pos_ch, byte: start_pos_byte}]};
}
fn next_line(file: filemap, chpos: uint, byte_pos: uint) {
file.lines += ~[{ch: chpos, byte: byte_pos}];
file.lines += [{ch: chpos, byte: byte_pos}];
}
type lookup_fn = fn(file_pos) -> uint ;
type lookup_fn = fn(file_pos) -> uint;
fn lookup_pos(map: codemap, pos: uint, lookup: lookup_fn) -> loc {
let a = 0u;
let b = vec::len(map.files);
while b - a > 1u {
let m = (a + b) / 2u;
if lookup(map.files.(m).start_pos) > pos { b = m; } else { a = m; }
if lookup(map.files[m].start_pos) > pos { b = m; } else { a = m; }
}
let f = map.files.(a);
let f = map.files[a];
a = 0u;
b = vec::len(f.lines);
while b - a > 1u {
let m = (a + b) / 2u;
if lookup(f.lines.(m)) > pos { b = m; } else { a = m; }
if lookup(f.lines[m]) > pos { b = m; } else { a = m; }
}
ret {filename: f.name, line: a + 1u, col: pos - lookup(f.lines.(a))};
ret {filename: f.name, line: a + 1u, col: pos - lookup(f.lines[a])};
}
fn lookup_char_pos(map: codemap, pos: uint) -> loc {
@ -65,7 +65,8 @@ fn lookup_byte_pos(map: codemap, pos: uint) -> loc {
ret lookup_pos(map, pos, lookup);
}
tag opt_span { //hack (as opposed to option::t), to make `span` compile
tag opt_span {
//hack (as opposed to option::t), to make `span` compile
os_none;
os_some(@span);
}
@ -75,13 +76,14 @@ fn span_to_str(sp: &span, cm: &codemap) -> str {
let cur = sp;
let res = "";
let prev_file = none;
while(true) {
while true {
let lo = lookup_char_pos(cm, cur.lo);
let hi = lookup_char_pos(cm, cur.hi);
res += #fmt("%s:%u:%u:%u:%u",
if some(lo.filename) == prev_file { "-" }
else { lo.filename },
lo.line, lo.col, hi.line, hi.col);
res +=
#fmt["%s:%u:%u:%u:%u",
if some(lo.filename) == prev_file {
"-"
} else { lo.filename }, lo.line, lo.col, hi.line, hi.col];
alt cur.expanded_from {
os_none. { break; }
os_some(new_sp) {
@ -110,11 +112,9 @@ fn emit_diagnostic(sp: &option::t<span>, msg: &str, kind: &str, color: u8,
if term::color_supported() {
term::fg(io::stdout().get_buf_writer(), color);
}
io::stdout().write_str(#fmt("%s:", kind));
if term::color_supported() {
term::reset(io::stdout().get_buf_writer());
}
io::stdout().write_str(#fmt(" %s\n", msg));
io::stdout().write_str(#fmt["%s:", kind]);
if term::color_supported() { term::reset(io::stdout().get_buf_writer()); }
io::stdout().write_str(#fmt[" %s\n", msg]);
maybe_highlight_lines(sp, cm, maybe_lines);
}
@ -143,14 +143,14 @@ fn maybe_highlight_lines(sp: &option::t<span>, cm: &codemap,
}
// Print the offending lines
for line: uint in display_lines {
io::stdout().write_str(#fmt("%s:%u ", fm.name, line + 1u));
io::stdout().write_str(#fmt["%s:%u ", fm.name, line + 1u]);
let s = get_line(fm, line as int, file);
if !str::ends_with(s, "\n") { s += "\n"; }
io::stdout().write_str(s);
}
if elided {
let last_line = display_lines.(vec::len(display_lines) - 1u);
let s = #fmt("%s:%u ", fm.name, last_line + 1u);
let last_line = display_lines[vec::len(display_lines) - 1u];
let s = #fmt["%s:%u ", fm.name, last_line + 1u];
let indent = str::char_len(s);
let out = "";
while indent > 0u { out += " "; indent -= 1u; }
@ -163,7 +163,7 @@ fn maybe_highlight_lines(sp: &option::t<span>, cm: &codemap,
if vec::len(lines.lines) == 1u {
let lo = lookup_char_pos(cm, option::get(sp).lo);
let digits = 0u;
let num = lines.lines.(0) / 10u;
let num = lines.lines[0] / 10u;
// how many digits must be indent past?
while num > 0u { num /= 10u; digits += 1u; }
@ -202,18 +202,18 @@ type file_lines = {name: str, lines: [uint]};
fn span_to_lines(sp: span, cm: codemap::codemap) -> @file_lines {
let lo = lookup_char_pos(cm, sp.lo);
let hi = lookup_char_pos(cm, sp.hi);
let lines = ~[];
let lines = [];
for each i: uint in uint::range(lo.line - 1u, hi.line as uint) {
lines += ~[i];
lines += [i];
}
ret @{name: lo.filename, lines: lines};
}
fn get_line(fm: filemap, line: int, file: &str) -> str {
let begin: uint = fm.lines.(line).byte - fm.start_pos.byte;
let begin: uint = fm.lines[line].byte - fm.start_pos.byte;
let end: uint;
if line as uint < vec::len(fm.lines) - 1u {
end = fm.lines.(line + 1).byte - fm.start_pos.byte;
end = fm.lines[line + 1].byte - fm.start_pos.byte;
} else {
// If we're not done parsing the file, we're at the limit of what's
// parsed. If we just slice the rest of the string, we'll print out
@ -232,7 +232,6 @@ fn get_filemap(cm: codemap, filename: str) -> filemap {
// (or expected function, found _|_)
fail; // ("asking for " + filename + " which we don't know about");
}
//
// Local Variables:
// mode: rust

View file

@ -7,10 +7,10 @@ import std::map::new_str_hash;
import codemap;
type syntax_expander =
fn(&ext_ctxt, span, @ast::expr, option::t<str>) -> @ast::expr ;
fn(&ext_ctxt, span, @ast::expr, option::t<str>) -> @ast::expr;
type macro_def = {ident: str, ext: syntax_extension};
type macro_definer =
fn(&ext_ctxt, span, @ast::expr, option::t<str>) -> macro_def ;
fn(&ext_ctxt, span, @ast::expr, option::t<str>) -> macro_def;
tag syntax_extension {
normal(syntax_expander);
@ -34,20 +34,22 @@ fn syntax_expander_table() -> hashmap<str, syntax_extension> {
ret syntax_expanders;
}
obj ext_ctxt(sess: @session, crate_file_name_hack: str,
obj ext_ctxt(sess: @session,
crate_file_name_hack: str,
mutable backtrace: codemap::opt_span) {
fn crate_file_name() -> str { ret crate_file_name_hack; }
fn session() -> @session { ret sess; }
fn print_backtrace() {
}
fn print_backtrace() { }
fn backtrace() -> codemap::opt_span { ret backtrace; }
fn bt_push(sp: span) {
backtrace = codemap::os_some(@{lo: sp.lo, hi: sp.hi,
expanded_from: backtrace});
backtrace =
codemap::os_some(@{lo: sp.lo,
hi: sp.hi,
expanded_from: backtrace});
}
fn bt_pop() {
alt backtrace {
@ -67,21 +69,16 @@ obj ext_ctxt(sess: @session, crate_file_name_hack: str,
self.print_backtrace();
sess.span_err(sp, msg);
}
fn span_unimpl(sp:span, msg: str) -> ! {
fn span_unimpl(sp: span, msg: str) -> ! {
self.print_backtrace();
sess.span_unimpl(sp, msg);
}
fn span_bug(sp:span, msg: str) -> ! {
fn span_bug(sp: span, msg: str) -> ! {
self.print_backtrace();
sess.span_bug(sp, msg);
}
fn bug(msg: str) -> ! {
self.print_backtrace();
sess.bug(msg);
}
fn next_id() -> ast::node_id {
ret sess.next_node_id();
}
fn bug(msg: str) -> ! { self.print_backtrace(); sess.bug(msg); }
fn next_id() -> ast::node_id { ret sess.next_node_id(); }
}
@ -93,7 +90,7 @@ fn mk_ctxt(sess: &session) -> ext_ctxt {
// the extensions the file name of the crate being compiled so they can
// use it to guess whether paths should be prepended with "std::". This is
// super-ugly and needs a better solution.
let crate_file_name_hack = sess.get_codemap().files.(0).name;
let crate_file_name_hack = sess.get_codemap().files[0].name;
ret ext_ctxt(@sess, crate_file_name_hack, codemap::os_none);
}
@ -115,7 +112,7 @@ fn expr_to_ident(cx: &ext_ctxt, expr: @ast::expr, error: str) -> ast::ident {
ast::expr_path(p) {
if vec::len(p.node.types) > 0u || vec::len(p.node.idents) != 1u {
cx.span_fatal(expr.span, error);
} else { ret p.node.idents.(0); }
} else { ret p.node.idents[0]; }
}
_ { cx.span_fatal(expr.span, error); }
}

View file

@ -4,18 +4,21 @@ import syntax::ast;
fn expand_syntax_ext(cx: &ext_ctxt, sp: codemap::span, arg: @ast::expr,
_body: option::t<str>) -> @ast::expr {
let args: [@ast::expr] = alt arg.node {
ast::expr_vec(elts, _) { elts }
_ { cx.span_fatal(sp, "#concat_idents requires a vector argument .") }
};
let args: [@ast::expr] =
alt arg.node {
ast::expr_vec(elts, _) { elts }
_ {
cx.span_fatal(sp, "#concat_idents requires a vector argument .")
}
};
let res: ast::ident = "";
for e: @ast::expr in args {
res += expr_to_ident(cx, e, "expected an ident");
}
ret @{id: cx.next_id(),
node: ast::expr_path( {
node: {global: false, idents: ~[res], types: ~[]},
span: sp}),
node:
ast::expr_path({node: {global: false, idents: [res], types: []},
span: sp}),
span: sp};
}

View file

@ -12,17 +12,20 @@ export expand_syntax_ext;
fn expand_syntax_ext(cx: &ext_ctxt, sp: codemap::span, arg: @ast::expr,
_body: option::t<str>) -> @ast::expr {
let args: [@ast::expr] = alt arg.node {
ast::expr_vec(elts, _) { elts }
_ { cx.span_fatal(sp, "#env requires arguments of the form `[...]`.") }
};
let args: [@ast::expr] =
alt arg.node {
ast::expr_vec(elts, _) { elts }
_ {
cx.span_fatal(sp, "#env requires arguments of the form `[...]`.")
}
};
if vec::len::<@ast::expr>(args) != 1u {
cx.span_fatal(sp, "malformed #env call");
}
// FIXME: if this was more thorough it would manufacture an
// option::t<str> rather than just an maybe-empty string.
let var = expr_to_str(cx, args.(0), "#env requires a string");
let var = expr_to_str(cx, args[0], "#env requires a string");
alt generic_os::getenv(var) {
option::none. { ret make_new_str(cx, sp, ""); }
option::some(s) { ret make_new_str(cx, sp, s); }

View file

@ -15,14 +15,14 @@ import syntax::ext::base::*;
fn expand_expr(exts: &hashmap<str, syntax_extension>, cx: &ext_ctxt,
e: &expr_, fld: ast_fold,
orig: &fn(&expr_, ast_fold) -> expr_ ) -> expr_ {
e: &expr_, fld: ast_fold, orig: &fn(&expr_, ast_fold) -> expr_)
-> expr_ {
ret alt e {
expr_mac(mac) {
alt mac.node {
mac_invoc(pth, args, body) {
assert (vec::len(pth.node.idents) > 0u);
let extname = pth.node.idents.(0);
let extname = pth.node.idents[0];
alt exts.find(extname) {
none. {
cx.span_fatal(pth.span,
@ -41,7 +41,7 @@ fn expand_expr(exts: &hashmap<str, syntax_extension>, cx: &ext_ctxt,
some(macro_defining(ext)) {
let named_extension = ext(cx, pth.span, args, body);
exts.insert(named_extension.ident, named_extension.ext);
ast::expr_rec(~[], none)
ast::expr_rec([], none)
}
}
}
@ -65,7 +65,6 @@ fn expand_crate(sess: &session::session, c: &@crate) -> @crate {
ret res;
}
// Local Variables:
// mode: rust
// fill-column: 78;

View file

@ -17,17 +17,20 @@ export expand_syntax_ext;
fn expand_syntax_ext(cx: &ext_ctxt, sp: span, arg: @ast::expr,
_body: option::t<str>) -> @ast::expr {
let args: [@ast::expr] = alt arg.node {
ast::expr_vec(elts, _) { elts }
_ { cx.span_fatal(sp, "#fmt requires arguments of the form `[...]`.") }
};
let args: [@ast::expr] =
alt arg.node {
ast::expr_vec(elts, _) { elts }
_ {
cx.span_fatal(sp, "#fmt requires arguments of the form `[...]`.")
}
};
if vec::len::<@ast::expr>(args) == 0u {
cx.span_fatal(sp, "#fmt requires a format string");
}
let fmt =
expr_to_str(cx, args.(0),
expr_to_str(cx, args[0],
"first argument to #fmt must be a " + "string literal.");
let fmtspan = args.(0).span;
let fmtspan = args[0].span;
log "Format string:";
log fmt;
fn parse_fmt_err_(cx: &ext_ctxt, sp: span, msg: str) -> ! {
@ -66,7 +69,7 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
}
fn make_path_expr(cx: &ext_ctxt, sp: span, idents: &[ast::ident]) ->
@ast::expr {
let path = {global: false, idents: idents, types: ~[]};
let path = {global: false, idents: idents, types: []};
let sp_path = {node: path, span: sp};
let pathexpr = ast::expr_path(sp_path);
ret @{id: cx.next_id(), node: pathexpr, span: sp};
@ -85,13 +88,13 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
fn make_rec_expr(cx: &ext_ctxt, sp: span,
fields: &[{ident: ast::ident, ex: @ast::expr}]) ->
@ast::expr {
let astfields: [ast::field] = ~[];
let astfields: [ast::field] = [];
for field: {ident: ast::ident, ex: @ast::expr} in fields {
let ident = field.ident;
let val = field.ex;
let astfield =
{node: {mut: ast::imm, ident: ident, expr: val}, span: sp};
astfields += ~[astfield];
astfields += [astfield];
}
let recexpr = ast::expr_rec(astfields, option::none::<@ast::expr>);
ret @{id: cx.next_id(), node: recexpr, span: sp};
@ -101,8 +104,8 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
ret str::find(cx.crate_file_name(), "std.rc") >= 0;
}
if compiling_std(cx) {
ret ~["extfmt", "rt", ident];
} else { ret ~["std", "extfmt", "rt", ident]; }
ret ["extfmt", "rt", ident];
} else { ret ["std", "extfmt", "rt", ident]; }
}
fn make_rt_path_expr(cx: &ext_ctxt, sp: span, ident: str) -> @ast::expr {
let path = make_path_vec(cx, ident);
@ -112,9 +115,8 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
// which tells the RT::conv* functions how to perform the conversion
fn make_rt_conv_expr(cx: &ext_ctxt, sp: span, cnv: &conv) -> @ast::expr {
fn make_flags(cx: &ext_ctxt, sp: span, flags: &[flag]) ->
@ast::expr {
let flagexprs: [@ast::expr] = ~[];
fn make_flags(cx: &ext_ctxt, sp: span, flags: &[flag]) -> @ast::expr {
let flagexprs: [@ast::expr] = [];
for f: flag in flags {
let fstr;
alt f {
@ -124,14 +126,14 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
flag_sign_always. { fstr = "flag_sign_always"; }
flag_alternate. { fstr = "flag_alternate"; }
}
flagexprs += ~[make_rt_path_expr(cx, sp, fstr)];
flagexprs += [make_rt_path_expr(cx, sp, fstr)];
}
// FIXME: 0-length vectors can't have their type inferred
// through the rec that these flags are a member of, so
// this is a hack placeholder flag
if vec::len::<@ast::expr>(flagexprs) == 0u {
flagexprs += ~[make_rt_path_expr(cx, sp, "flag_none")];
flagexprs += [make_rt_path_expr(cx, sp, "flag_none")];
}
ret make_vec_expr(cx, sp, flagexprs);
}
@ -143,7 +145,7 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
count_is(c) {
let count_lit = make_new_int(cx, sp, c);
let count_is_path = make_path_vec(cx, "count_is");
let count_is_args = ~[count_lit];
let count_is_args = [count_lit];
ret make_call(cx, sp, count_is_path, count_is_args);
}
_ { cx.span_unimpl(sp, "unimplemented #fmt conversion"); }
@ -168,10 +170,10 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
width_expr: @ast::expr, precision_expr: @ast::expr,
ty_expr: @ast::expr) -> @ast::expr {
ret make_rec_expr(cx, sp,
~[{ident: "flags", ex: flags_expr},
{ident: "width", ex: width_expr},
{ident: "precision", ex: precision_expr},
{ident: "ty", ex: ty_expr}]);
[{ident: "flags", ex: flags_expr},
{ident: "width", ex: width_expr},
{ident: "precision", ex: precision_expr},
{ident: "ty", ex: ty_expr}]);
}
let rt_conv_flags = make_flags(cx, sp, cnv.flags);
let rt_conv_width = make_count(cx, sp, cnv.width);
@ -185,7 +187,7 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
let fname = "conv_" + conv_type;
let path = make_path_vec(cx, fname);
let cnv_expr = make_rt_conv_expr(cx, sp, cnv);
let args = ~[cnv_expr, arg];
let args = [cnv_expr, arg];
ret make_call(cx, arg.span, path, args);
}
fn make_new_conv(cx: &ext_ctxt, sp: span, cnv: conv, arg: @ast::expr) ->
@ -304,7 +306,7 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
ty_octal. { log "type: octal"; }
}
}
let fmt_sp = args.(0).span;
let fmt_sp = args[0].span;
let n = 0u;
let tmp_expr = make_new_str(cx, sp, "");
let nargs = vec::len::<@ast::expr>(args);
@ -323,7 +325,7 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
}
log "Building conversion:";
log_conv(conv);
let arg_expr = args.(n);
let arg_expr = args[n];
let c_expr = make_new_conv(cx, fmt_sp, conv, arg_expr);
tmp_expr = make_add_expr(cx, fmt_sp, tmp_expr, c_expr);
}
@ -332,9 +334,10 @@ fn pieces_to_expr(cx: &ext_ctxt, sp: span, pieces: &[piece],
let expected_nargs = n + 1u; // n conversions + the fmt string
if expected_nargs < nargs {
cx.span_fatal
(sp, #fmt("too many arguments to #fmt. found %u, expected %u",
nargs, expected_nargs));
cx.span_fatal(
sp,
#fmt["too many arguments to #fmt. found %u, expected %u",
nargs, expected_nargs]);
}
ret tmp_expr;
}

View file

@ -5,16 +5,19 @@ import syntax::ast;
fn expand_syntax_ext(cx: &ext_ctxt, sp: codemap::span, arg: @ast::expr,
_body: option::t<str>) -> @ast::expr {
let args: [@ast::expr] = alt arg.node {
ast::expr_vec(elts, _) { elts }
_ { cx.span_fatal(sp, "#ident_to_str requires a vector argument .") }
};
let args: [@ast::expr] =
alt arg.node {
ast::expr_vec(elts, _) { elts }
_ {
cx.span_fatal(sp, "#ident_to_str requires a vector argument .")
}
};
if vec::len::<@ast::expr>(args) != 1u {
cx.span_fatal(sp, "malformed #ident_to_str call");
}
ret make_new_lit(cx, sp,
ast::lit_str(expr_to_ident(cx, args.(0u),
ast::lit_str(expr_to_ident(cx, args[0u],
"expected an ident"),
ast::sk_rc));

View file

@ -9,5 +9,5 @@ fn expand_syntax_ext(cx: &ext_ctxt, sp: codemap::span, arg: @ast::expr,
std::io::stdout().write_line(print::pprust::expr_to_str(arg));
//trivial expression
ret @{id: cx.next_id(), node: ast::expr_rec(~[], option::none), span: sp};
ret @{id: cx.next_id(), node: ast::expr_rec([], option::none), span: sp};
}

View file

@ -34,7 +34,7 @@ export add_new_extension;
fn path_to_ident(pth: &path) -> option::t<ident> {
if vec::len(pth.node.idents) == 1u && vec::len(pth.node.types) == 0u {
ret some(pth.node.idents.(0u));
ret some(pth.node.idents[0u]);
}
ret none;
}
@ -89,10 +89,10 @@ fn match_error(cx: &ext_ctxt, m: &matchable, expected: &str) -> ! {
// we'll want to return something indicating amount of progress and location
// of failure instead of `none`.
type match_result = option::t<arb_depth<matchable>>;
type selector = fn(&matchable) -> match_result ;
type selector = fn(&matchable) -> match_result;
fn elts_to_ell(cx: &ext_ctxt, elts: &[@expr])
-> {pre: [@expr], rep: option::t<@expr>, post: [@expr]} {
fn elts_to_ell(cx: &ext_ctxt, elts: &[@expr]) ->
{pre: [@expr], rep: option::t<@expr>, post: [@expr]} {
let idx: uint = 0u;
let res = none;
for elt: @expr in elts {
@ -103,10 +103,10 @@ fn elts_to_ell(cx: &ext_ctxt, elts: &[@expr])
if res != none {
cx.span_fatal(m.span, "only one ellipsis allowed");
}
res = some({pre: vec::slice(elts, 0u, idx - 1u),
rep: some(elts.(idx - 1u)),
post: vec::slice(elts, idx + 1u,
vec::len(elts))});
res =
some({pre: vec::slice(elts, 0u, idx - 1u),
rep: some(elts[idx - 1u]),
post: vec::slice(elts, idx + 1u, vec::len(elts))});
}
_ { }
}
@ -116,16 +116,16 @@ fn elts_to_ell(cx: &ext_ctxt, elts: &[@expr])
idx += 1u;
}
ret alt res {
some(val) { val }
none. { {pre: elts, rep: none, post: ~[]} }
}
some(val) { val }
none. { {pre: elts, rep: none, post: []} }
}
}
fn option_flatten_map<T, U>(f: &fn(&T) -> option::t<U>, v: &[T]) ->
option::t<[U]> {
let res = ~[];
let res = [];
for elem: T in v {
alt f(elem) { none. { ret none; } some(fv) { res += ~[fv]; } }
alt f(elem) { none. { ret none; } some(fv) { res += [fv]; } }
}
ret some(res);
}
@ -168,7 +168,7 @@ fn acumm_bindings(_cx: &ext_ctxt, _b_dest: &bindings, _b_src: &bindings) { }
fn pattern_to_selectors(cx: &ext_ctxt, e: @expr) -> binders {
let res: binders =
{real_binders: new_str_hash::<selector>(),
mutable literal_ast_matchers: ~[]};
mutable literal_ast_matchers: []};
//this oughta return binders instead, but macro args are a sequence of
//expressions, rather than a single expression
fn trivial_selector(m: &matchable) -> match_result { ret some(leaf(m)); }
@ -203,7 +203,7 @@ fn use_selectors_to_bind(b: &binders, e: @expr) -> option::t<bindings> {
/* use the bindings on the body to generate the expanded code */
fn transcribe(cx: &ext_ctxt, b: &bindings, body: @expr) -> @expr {
let idx_path: @mutable [uint] = @mutable ~[];
let idx_path: @mutable [uint] = @mutable [];
fn new_id(_old: node_id, cx: &ext_ctxt) -> node_id { ret cx.next_id(); }
fn new_span(cx: &ext_ctxt, sp: &span) -> span {
/* this discards information in the case of macro-defining macros */
@ -236,7 +236,7 @@ fn follow(m: &arb_depth<matchable>, idx_path: @mutable [uint]) ->
for idx: uint in *idx_path {
alt res {
leaf(_) { ret res;/* end of the line */ }
seq(new_ms, _) { res = new_ms.(idx); }
seq(new_ms, _) { res = new_ms[idx]; }
}
}
ret res;
@ -282,13 +282,12 @@ iter free_vars(b: &bindings, e: @expr) -> ident {
/* handle sequences (anywhere in the AST) of exprs, either real or ...ed */
fn transcribe_exprs(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
recur: fn(&@expr) -> @expr , exprs: [@expr])
-> [@expr] {
recur: fn(&@expr) -> @expr, exprs: [@expr]) -> [@expr] {
alt elts_to_ell(cx, exprs) {
{pre: pre, rep: repeat_me_maybe, post: post} {
let res = vec::map(recur, pre);
alt repeat_me_maybe {
none. {}
none. { }
some(repeat_me) {
let repeat: option::t<{rep_count: uint, name: ident}> = none;
/* we need to walk over all the free vars in lockstep, except for
@ -305,9 +304,10 @@ fn transcribe_exprs(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
some({rep_count: old_len, name: old_name}) {
let len = vec::len(*ms);
if old_len != len {
let msg = #fmt("'%s' occurs %u times, but ", fv,
len) + #fmt("'%s' occurs %u times",
old_name, old_len);
let msg =
#fmt["'%s' occurs %u times, but ", fv, len] +
#fmt["'%s' occurs %u times", old_name,
old_len];
cx.span_fatal(repeat_me.span, msg);
}
}
@ -319,14 +319,14 @@ fn transcribe_exprs(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
none. {
cx.span_fatal(repeat_me.span,
"'...' surrounds an expression without any" +
" repeating syntax variables");
" repeating syntax variables");
}
some({rep_count: rc, _}) {
/* Whew, we now know how how many times to repeat */
let idx: uint = 0u;
while idx < rc {
*idx_path += ~[idx];
res += ~[recur(repeat_me)]; // whew!
*idx_path += [idx];
res += [recur(repeat_me)]; // whew!
vec::pop(*idx_path);
idx += 1u;
}
@ -357,9 +357,9 @@ fn transcribe_path(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
p: &path_, _fld: ast_fold) -> path_ {
// Don't substitute into qualified names.
if vec::len(p.types) > 0u || vec::len(p.idents) != 1u { ret p; }
ret alt follow_for_trans(cx, b.find(p.idents.(0)), idx_path) {
ret alt follow_for_trans(cx, b.find(p.idents[0]), idx_path) {
some(match_ident(id)) {
{global: false, idents: ~[id.node], types: ~[]}
{global: false, idents: [id.node], types: []}
}
some(match_path(a_pth)) { a_pth.node }
some(m) { match_error(cx, m, "a path") }
@ -370,21 +370,20 @@ fn transcribe_path(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
fn transcribe_expr(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
e: &ast::expr_, fld: ast_fold,
orig: fn(&ast::expr_, ast_fold) -> ast::expr_ ) ->
orig: fn(&ast::expr_, ast_fold) -> ast::expr_) ->
ast::expr_ {
ret alt e {
expr_path(p) {
// Don't substitute into qualified names.
if vec::len(p.node.types) > 0u || vec::len(p.node.idents) != 1u
{
if vec::len(p.node.types) > 0u || vec::len(p.node.idents) != 1u {
e
}
alt follow_for_trans(cx, b.find(p.node.idents.(0)), idx_path) {
alt follow_for_trans(cx, b.find(p.node.idents[0]), idx_path) {
some(match_ident(id)) {
expr_path(respan(id.span,
{global: false,
idents: ~[id.node],
types: ~[]}))
idents: [id.node],
types: []}))
}
some(match_path(a_pth)) { expr_path(a_pth) }
some(match_expr(a_exp)) { a_exp.node }
@ -398,7 +397,7 @@ fn transcribe_expr(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
fn transcribe_type(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
t: &ast::ty_, fld: ast_fold,
orig: fn(&ast::ty_, ast_fold) -> ast::ty_ ) -> ast::ty_ {
orig: fn(&ast::ty_, ast_fold) -> ast::ty_) -> ast::ty_ {
ret alt t {
ast::ty_path(pth, _) {
alt path_to_ident(pth) {
@ -422,12 +421,13 @@ fn transcribe_type(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
fn transcribe_block(cx: &ext_ctxt, b: &bindings, idx_path: @mutable [uint],
blk: &blk_, fld: ast_fold,
orig: fn(&blk_, ast_fold) -> blk_ ) -> blk_ {
orig: fn(&blk_, ast_fold) -> blk_) -> blk_ {
ret alt block_to_ident(blk) {
some(id) {
alt follow_for_trans(cx, b.find(id), idx_path) {
some(match_block(new_blk)) { new_blk.node }
// possibly allow promotion of ident/path/expr to blocks?
some(m) {
match_error(cx, m, "a block")
@ -452,20 +452,20 @@ fn p_t_s_rec(cx: &ext_ctxt, m: &matchable, s: &selector, b: &binders) {
expr_vec(p_elts, _) {
alt elts_to_ell(cx, p_elts) {
{pre: pre, rep: some(repeat_me), post: post} {
p_t_s_r_length(cx, vec::len(pre) + vec::len(post),
true, s, b);
if(vec::len(pre) > 0u) {
p_t_s_r_length(cx, vec::len(pre) + vec::len(post), true, s,
b);
if vec::len(pre) > 0u {
p_t_s_r_actual_vector(cx, pre, true, s, b);
}
p_t_s_r_ellipses(cx, repeat_me, vec::len(pre), s, b);
if(vec::len(post) > 0u) {
if vec::len(post) > 0u {
cx.span_unimpl(e.span,
"matching after `...` not yet supported");
}
}
{pre: pre, rep: none., post: post} {
if post != ~[] {
if post != [] {
cx.bug("elts_to_ell provided an invalid result");
}
p_t_s_r_length(cx, vec::len(pre), false, s, b);
@ -474,6 +474,7 @@ fn p_t_s_rec(cx: &ext_ctxt, m: &matchable, s: &selector, b: &binders) {
}
}
/* TODO: handle embedded types and blocks, at least */
expr_mac(mac) {
p_t_s_r_mac(cx, mac, s, b);
@ -488,7 +489,7 @@ fn p_t_s_rec(cx: &ext_ctxt, m: &matchable, s: &selector, b: &binders) {
_ { cx.bug("broken traversal in p_t_s_r") }
}
}
b.literal_ast_matchers += ~[bind select(cx, _, e)];
b.literal_ast_matchers += [bind select(cx, _, e)];
}
}
}
@ -545,7 +546,7 @@ fn block_to_ident(blk: &blk_) -> option::t<ident> {
fn p_t_s_r_mac(cx: &ext_ctxt, mac: &ast::mac, s: &selector, b: &binders) {
fn select_pt_1(cx: &ext_ctxt, m: &matchable,
fn_m: fn(&ast::mac) -> match_result ) -> match_result {
fn_m: fn(&ast::mac) -> match_result) -> match_result {
ret alt m {
match_expr(e) {
alt e.node { expr_mac(mac) { fn_m(mac) } _ { none } }
@ -603,17 +604,18 @@ fn p_t_s_r_mac(cx: &ext_ctxt, mac: &ast::mac, s: &selector, b: &binders) {
fn p_t_s_r_ellipses(cx: &ext_ctxt, repeat_me: @expr, offset: uint,
s: &selector, b: &binders) {
fn select(cx: &ext_ctxt, repeat_me: @expr, offset: uint, m: &matchable) ->
match_result {
match_result {
ret alt m {
match_expr(e) {
alt e.node {
expr_vec(arg_elts, _) {
let elts = ~[];
let elts = [];
let idx = offset;
while idx < vec::len(arg_elts) {
elts += ~[leaf(match_expr(arg_elts.(idx)))];
elts += [leaf(match_expr(arg_elts[idx]))];
idx += 1u;
}
// using repeat_me.span is a little wacky, but the
// error we want to report is one in the macro def
some(seq(@elts, repeat_me.span))
@ -631,14 +633,14 @@ fn p_t_s_r_ellipses(cx: &ext_ctxt, repeat_me: @expr, offset: uint,
fn p_t_s_r_length(cx: &ext_ctxt, len: uint, at_least: bool, s: selector,
b: &binders) {
fn len_select(_cx: &ext_ctxt, m: &matchable, at_least: bool, len: uint)
-> match_result {
fn len_select(_cx: &ext_ctxt, m: &matchable, at_least: bool, len: uint) ->
match_result {
ret alt m {
match_expr(e) {
alt e.node {
expr_vec(arg_elts, _) {
let actual_len = vec::len(arg_elts);
if (at_least && actual_len >= len) || actual_len == len {
if at_least && actual_len >= len || actual_len == len {
some(leaf(match_exact))
} else { none }
}
@ -649,7 +651,7 @@ fn p_t_s_r_length(cx: &ext_ctxt, len: uint, at_least: bool, s: selector,
}
}
b.literal_ast_matchers +=
~[compose_sels(s, bind len_select(cx, _, at_least, len))];
[compose_sels(s, bind len_select(cx, _, at_least, len))];
}
fn p_t_s_r_actual_vector(cx: &ext_ctxt, elts: [@expr], _repeat_after: bool,
@ -661,7 +663,7 @@ fn p_t_s_r_actual_vector(cx: &ext_ctxt, elts: [@expr], _repeat_after: bool,
match_expr(e) {
alt e.node {
expr_vec(arg_elts, _) {
some(leaf(match_expr(arg_elts.(idx))))
some(leaf(match_expr(arg_elts[idx])))
}
_ { none }
}
@ -669,7 +671,7 @@ fn p_t_s_r_actual_vector(cx: &ext_ctxt, elts: [@expr], _repeat_after: bool,
_ { cx.bug("broken traversal in p_t_s_r") }
}
}
p_t_s_rec(cx, match_expr(elts.(idx)),
p_t_s_rec(cx, match_expr(elts[idx]),
compose_sels(s, bind select(cx, _, idx)), b);
idx += 1u;
}
@ -677,15 +679,17 @@ fn p_t_s_r_actual_vector(cx: &ext_ctxt, elts: [@expr], _repeat_after: bool,
fn add_new_extension(cx: &ext_ctxt, sp: span, arg: @expr,
_body: option::t<str>) -> base::macro_def {
let args: [@ast::expr] = alt arg.node {
ast::expr_vec(elts, _) { elts }
_ {
cx.span_fatal(sp, "#macro requires arguments of the form `[...]`.")
}
};
let args: [@ast::expr] =
alt arg.node {
ast::expr_vec(elts, _) { elts }
_ {
cx.span_fatal(sp,
"#macro requires arguments of the form `[...]`.")
}
};
let macro_name: option::t<str> = none;
let clauses: [@clause] = ~[];
let clauses: [@clause] = [];
for arg: @expr in args {
alt arg.node {
expr_vec(elts, mut) {
@ -696,7 +700,7 @@ fn add_new_extension(cx: &ext_ctxt, sp: span, arg: @expr,
}
alt elts.(0u).node {
alt elts[0u].node {
expr_mac(mac) {
alt mac.node {
mac_invoc(pth, invoc_arg, body) {
@ -706,8 +710,9 @@ fn add_new_extension(cx: &ext_ctxt, sp: span, arg: @expr,
none. { macro_name = some(id); }
some(other_id) {
if id != other_id {
cx.span_fatal(pth.span, "macro name must be "
+ "consistent");
cx.span_fatal(pth.span,
"macro name must be " +
"consistent");
}
}
}
@ -718,15 +723,15 @@ fn add_new_extension(cx: &ext_ctxt, sp: span, arg: @expr,
}
}
clauses +=
~[@{params: pattern_to_selectors(cx, invoc_arg),
body: elts.(1u)}];
[@{params: pattern_to_selectors(cx, invoc_arg),
body: elts[1u]}];
// FIXME: check duplicates (or just simplify
// the macro arg situation)
}
}
}
_ {
cx.span_fatal(elts.(0u).span,
cx.span_fatal(elts[0u].span,
"extension clause must" +
" start with a macro invocation.");
}
@ -756,9 +761,7 @@ fn add_new_extension(cx: &ext_ctxt, sp: span, arg: @expr,
_body: option::t<str>, clauses: [@clause]) -> @expr {
for c: @clause in clauses {
alt use_selectors_to_bind(c.params, arg) {
some(bindings) {
ret transcribe(cx, bindings, c.body)
}
some(bindings) { ret transcribe(cx, bindings, c.body) }
none. { cont; }
}
}

View file

@ -20,57 +20,57 @@ type ast_fold = @mutable a_f;
type ast_fold_precursor =
//unlike the others, item_ is non-trivial
{fold_crate: fn(&crate_, ast_fold) -> crate_ ,
{fold_crate: fn(&crate_, ast_fold) -> crate_,
fold_crate_directive:
fn(&crate_directive_, ast_fold) -> crate_directive_ ,
fold_view_item: fn(&view_item_, ast_fold) -> view_item_ ,
fold_native_item: fn(&@native_item, ast_fold) -> @native_item ,
fold_item: fn(&@item, ast_fold) -> @item ,
fold_item_underscore: fn(&item_, ast_fold) -> item_ ,
fold_method: fn(&method_, ast_fold) -> method_ ,
fold_block: fn(&blk_, ast_fold) -> blk_ ,
fold_stmt: fn(&stmt_, ast_fold) -> stmt_ ,
fold_arm: fn(&arm, ast_fold) -> arm ,
fold_pat: fn(&pat_, ast_fold) -> pat_ ,
fold_decl: fn(&decl_, ast_fold) -> decl_ ,
fold_expr: fn(&expr_, ast_fold) -> expr_ ,
fold_ty: fn(&ty_, ast_fold) -> ty_ ,
fold_constr: fn(&ast::constr_, ast_fold) -> constr_ ,
fold_fn: fn(&_fn, ast_fold) -> _fn ,
fold_mod: fn(&_mod, ast_fold) -> _mod ,
fold_native_mod: fn(&native_mod, ast_fold) -> native_mod ,
fold_variant: fn(&variant_, ast_fold) -> variant_ ,
fold_ident: fn(&ident, ast_fold) -> ident ,
fold_path: fn(&path_, ast_fold) -> path_ ,
fold_local: fn(&local_, ast_fold) -> local_ ,
map_exprs: fn(fn(&@expr) -> @expr , [@expr]) -> [@expr],
fn(&crate_directive_, ast_fold) -> crate_directive_,
fold_view_item: fn(&view_item_, ast_fold) -> view_item_,
fold_native_item: fn(&@native_item, ast_fold) -> @native_item,
fold_item: fn(&@item, ast_fold) -> @item,
fold_item_underscore: fn(&item_, ast_fold) -> item_,
fold_method: fn(&method_, ast_fold) -> method_,
fold_block: fn(&blk_, ast_fold) -> blk_,
fold_stmt: fn(&stmt_, ast_fold) -> stmt_,
fold_arm: fn(&arm, ast_fold) -> arm,
fold_pat: fn(&pat_, ast_fold) -> pat_,
fold_decl: fn(&decl_, ast_fold) -> decl_,
fold_expr: fn(&expr_, ast_fold) -> expr_,
fold_ty: fn(&ty_, ast_fold) -> ty_,
fold_constr: fn(&ast::constr_, ast_fold) -> constr_,
fold_fn: fn(&_fn, ast_fold) -> _fn,
fold_mod: fn(&_mod, ast_fold) -> _mod,
fold_native_mod: fn(&native_mod, ast_fold) -> native_mod,
fold_variant: fn(&variant_, ast_fold) -> variant_,
fold_ident: fn(&ident, ast_fold) -> ident,
fold_path: fn(&path_, ast_fold) -> path_,
fold_local: fn(&local_, ast_fold) -> local_,
map_exprs: fn(fn(&@expr) -> @expr, [@expr]) -> [@expr],
new_id: fn(node_id) -> node_id,
new_span: fn(&span) -> span};
type a_f =
{fold_crate: fn(&crate) -> crate ,
fold_crate_directive: fn(&@crate_directive) -> @crate_directive ,
fold_view_item: fn(&@view_item) -> @view_item ,
fold_native_item: fn(&@native_item) -> @native_item ,
fold_item: fn(&@item) -> @item ,
fold_item_underscore: fn(&item_) -> item_ ,
fold_method: fn(&@method) -> @method ,
fold_block: fn(&blk) -> blk ,
fold_stmt: fn(&@stmt) -> @stmt ,
fold_arm: fn(&arm) -> arm ,
fold_pat: fn(&@pat) -> @pat ,
fold_decl: fn(&@decl) -> @decl ,
fold_expr: fn(&@expr) -> @expr ,
fold_ty: fn(&@ty) -> @ty ,
fold_constr: fn(&@constr) -> @constr ,
fold_fn: fn(&_fn) -> _fn ,
fold_mod: fn(&_mod) -> _mod ,
fold_native_mod: fn(&native_mod) -> native_mod ,
fold_variant: fn(&variant) -> variant ,
fold_ident: fn(&ident) -> ident ,
fold_path: fn(&path) -> path ,
fold_local: fn(&@local) -> @local ,
map_exprs: fn(fn(&@expr) -> @expr , [@expr]) -> [@expr],
{fold_crate: fn(&crate) -> crate,
fold_crate_directive: fn(&@crate_directive) -> @crate_directive,
fold_view_item: fn(&@view_item) -> @view_item,
fold_native_item: fn(&@native_item) -> @native_item,
fold_item: fn(&@item) -> @item,
fold_item_underscore: fn(&item_) -> item_,
fold_method: fn(&@method) -> @method,
fold_block: fn(&blk) -> blk,
fold_stmt: fn(&@stmt) -> @stmt,
fold_arm: fn(&arm) -> arm,
fold_pat: fn(&@pat) -> @pat,
fold_decl: fn(&@decl) -> @decl,
fold_expr: fn(&@expr) -> @expr,
fold_ty: fn(&@ty) -> @ty,
fold_constr: fn(&@constr) -> @constr,
fold_fn: fn(&_fn) -> _fn,
fold_mod: fn(&_mod) -> _mod,
fold_native_mod: fn(&native_mod) -> native_mod,
fold_variant: fn(&variant) -> variant,
fold_ident: fn(&ident) -> ident,
fold_path: fn(&path) -> path,
fold_local: fn(&@local) -> @local,
map_exprs: fn(fn(&@expr) -> @expr, [@expr]) -> [@expr],
new_id: fn(node_id) -> node_id,
new_span: fn(&span) -> span};
@ -120,7 +120,7 @@ fn fold_meta_item_(mi: &@meta_item, fld: ast_fold) -> @meta_item {
span: mi.span};
}
//used in noop_fold_item and noop_fold_crate
fn fold_attribute_(at: &attribute, fmi: fn(&@meta_item) -> @meta_item ) ->
fn fold_attribute_(at: &attribute, fmi: fn(&@meta_item) -> @meta_item) ->
attribute {
ret {node: {style: at.node.style, value: *fmi(@at.node.value)},
span: at.span};
@ -135,14 +135,14 @@ fn fold_arg_(a: &arg, fld: ast_fold) -> arg {
//used in noop_fold_expr, and possibly elsewhere in the future
fn fold_mac_(m: &mac, fld: ast_fold) -> mac {
ret {node:
alt m.node {
mac_invoc(pth, arg, body) {
mac_invoc(fld.fold_path(pth), fld.fold_expr(arg), body)
}
mac_embed_type(ty) { mac_embed_type(fld.fold_ty(ty)) }
mac_embed_block(blk) { mac_embed_block(fld.fold_block(blk)) }
mac_ellipsis. { mac_ellipsis }
},
alt m.node {
mac_invoc(pth, arg, body) {
mac_invoc(fld.fold_path(pth), fld.fold_expr(arg), body)
}
mac_embed_type(ty) { mac_embed_type(fld.fold_ty(ty)) }
mac_embed_block(blk) { mac_embed_block(fld.fold_block(blk)) }
mac_ellipsis. { mac_ellipsis }
},
span: m.span};
}
@ -200,7 +200,7 @@ fn noop_fold_native_item(ni: &@native_item, fld: ast_fold) -> @native_item {
cf: fdec.cf,
constraints:
vec::map(fld.fold_constr,
fdec.constraints)}, typms)
fdec.constraints)}, typms)
}
},
id: ni.id,
@ -238,8 +238,8 @@ fn noop_fold_item_underscore(i: &item_, fld: ast_fold) -> item_ {
}
item_obj(o, typms, d) {
item_obj({fields: vec::map(fold_obj_field, o.fields),
methods: vec::map(fld.fold_method, o.methods)},
typms, d)
methods: vec::map(fld.fold_method, o.methods)}, typms,
d)
}
item_res(dtor, did, typms, cid) {
item_res(fld.fold_fn(dtor), did, typms, cid)
@ -269,8 +269,7 @@ fn noop_fold_stmt(s: &stmt_, fld: ast_fold) -> stmt_ {
}
fn noop_fold_arm(a: &arm, fld: ast_fold) -> arm {
ret {pats: vec::map(fld.fold_pat, a.pats),
body: fld.fold_block(a.body)};
ret {pats: vec::map(fld.fold_pat, a.pats), body: fld.fold_block(a.body)};
}
fn noop_fold_pat(p: &pat_, fld: ast_fold) -> pat_ {
@ -282,15 +281,13 @@ fn noop_fold_pat(p: &pat_, fld: ast_fold) -> pat_ {
pat_tag(fld.fold_path(pth), vec::map(fld.fold_pat, pats))
}
pat_rec(fields, etc) {
let fs = ~[];
let fs = [];
for f: ast::field_pat in fields {
fs += ~[{ident: f.ident, pat: fld.fold_pat(f.pat)}];
fs += [{ident: f.ident, pat: fld.fold_pat(f.pat)}];
}
pat_rec(fs, etc)
}
pat_tup(elts) {
pat_tup(vec::map(fld.fold_pat, elts))
}
pat_tup(elts) { pat_tup(vec::map(fld.fold_pat, elts)) }
pat_box(inner) { pat_box(fld.fold_pat(inner)) }
};
}
@ -346,9 +343,7 @@ fn noop_fold_expr(e: &expr_, fld: ast_fold) -> expr_ {
expr_rec(vec::map(fold_field, fields),
option::map(fld.fold_expr, maybe_expr))
}
expr_tup(elts) {
expr_tup(vec::map(fld.fold_expr, elts))
}
expr_tup(elts) { expr_tup(vec::map(fld.fold_expr, elts)) }
expr_call(f, args) {
expr_call(fld.fold_expr(f), fld.map_exprs(fld.fold_expr, args))
}
@ -393,9 +388,7 @@ fn noop_fold_expr(e: &expr_, fld: ast_fold) -> expr_ {
expr_move(el, er) {
expr_move(fld.fold_expr(el), fld.fold_expr(er))
}
expr_copy(e) {
expr_copy(fld.fold_expr(e))
}
expr_copy(e) { expr_copy(fld.fold_expr(e)) }
expr_assign(el, er) {
expr_assign(fld.fold_expr(el), fld.fold_expr(er))
}
@ -487,19 +480,20 @@ fn noop_fold_path(p: &path_, fld: ast_fold) -> path_ {
fn noop_fold_local(l: &local_, fld: ast_fold) -> local_ {
ret {ty: fld.fold_ty(l.ty),
pat: fld.fold_pat(l.pat),
init: alt l.init {
option::none::<initializer>. { l.init }
option::some::<initializer>(init) {
option::some::<initializer>({op: init.op,
expr: fld.fold_expr(init.expr)})
}
},
init:
alt l.init {
option::none::<initializer>. { l.init }
option::some::<initializer>(init) {
option::some::<initializer>({op: init.op,
expr: fld.fold_expr(init.expr)})
}
},
id: l.id};
}
/* temporarily eta-expand because of a compiler bug with using `fn<T>` as a
value */
fn noop_map_exprs(f: fn(&@expr) -> @expr , es: [@expr]) -> [@expr] {
fn noop_map_exprs(f: fn(&@expr) -> @expr, es: [@expr]) -> [@expr] {
ret vec::map(f, es);
}
@ -634,14 +628,16 @@ fn make_fold(afp: &ast_fold_precursor) -> ast_fold {
}
fn f_pat(afp: &ast_fold_precursor, f: ast_fold, x: &@pat) -> @pat {
ret @{id: afp.new_id(x.id),
node: afp.fold_pat(x.node, f), span: afp.new_span(x.span)};
node: afp.fold_pat(x.node, f),
span: afp.new_span(x.span)};
}
fn f_decl(afp: &ast_fold_precursor, f: ast_fold, x: &@decl) -> @decl {
ret @{node: afp.fold_decl(x.node, f), span: afp.new_span(x.span)};
}
fn f_expr(afp: &ast_fold_precursor, f: ast_fold, x: &@expr) -> @expr {
ret @{id: afp.new_id(x.id),
node: afp.fold_expr(x.node, f), span: afp.new_span(x.span)};
node: afp.fold_expr(x.node, f),
span: afp.new_span(x.span)};
}
fn f_ty(afp: &ast_fold_precursor, f: ast_fold, x: &@ty) -> @ty {
ret @{node: afp.fold_ty(x.node, f), span: afp.new_span(x.span)};

View file

@ -26,8 +26,7 @@ type ctx =
cfg: ast::crate_cfg};
fn eval_crate_directives(cx: ctx, cdirs: &[@ast::crate_directive],
prefix: str,
view_items: &mutable [@ast::view_item],
prefix: str, view_items: &mutable [@ast::view_item],
items: &mutable [@ast::item]) {
for sub_cdir: @ast::crate_directive in cdirs {
eval_crate_directive(cx, sub_cdir, prefix, view_items, items);
@ -36,8 +35,8 @@ fn eval_crate_directives(cx: ctx, cdirs: &[@ast::crate_directive],
fn eval_crate_directives_to_mod(cx: ctx, cdirs: &[@ast::crate_directive],
prefix: str) -> ast::_mod {
let view_items: [@ast::view_item] = ~[];
let items: [@ast::item] = ~[];
let view_items: [@ast::view_item] = [];
let items: [@ast::item] = [];
eval_crate_directives(cx, cdirs, prefix, view_items, items);
ret {view_items: view_items, items: items};
}
@ -53,7 +52,7 @@ fn eval_crate_directive(cx: ctx, cdir: @ast::crate_directive, prefix: str,
if std::fs::path_is_absolute(file_path) {
file_path
} else { prefix + std::fs::path_sep() + file_path };
if cx.mode == mode_depend { cx.deps += ~[full_path]; ret; }
if cx.mode == mode_depend { cx.deps += [full_path]; ret; }
let p0 =
new_parser_from_file(cx.sess, cx.cfg, full_path, cx.chpos,
cx.byte_pos, SOURCE_FILE);
@ -68,7 +67,7 @@ fn eval_crate_directive(cx: ctx, cdir: @ast::crate_directive, prefix: str,
// Thread defids, chpos and byte_pos through the parsers
cx.chpos = p0.get_chpos();
cx.byte_pos = p0.get_byte_pos();
items += ~[i];
items += [i];
}
ast::cdir_dir_mod(id, dir_opt, cdirs, attrs) {
let path = id;
@ -85,9 +84,9 @@ fn eval_crate_directive(cx: ctx, cdir: @ast::crate_directive, prefix: str,
node: ast::item_mod(m0),
span: cdir.span};
cx.sess.next_id += 1;
items += ~[i];
items += [i];
}
ast::cdir_view_item(vi) { view_items += ~[vi]; }
ast::cdir_view_item(vi) { view_items += [vi]; }
ast::cdir_syntax(pth) { }
ast::cdir_auth(pth, eff) { }
}

View file

@ -14,18 +14,18 @@ import codemap;
type reader =
obj {
fn is_eof() -> bool ;
fn curr() -> char ;
fn next() -> char ;
fn init() ;
fn bump() ;
fn get_str_from(uint) -> str ;
fn get_interner() -> @interner::interner<str> ;
fn get_chpos() -> uint ;
fn get_byte_pos() -> uint ;
fn get_col() -> uint ;
fn get_filemap() -> codemap::filemap ;
fn err(str) ;
fn is_eof() -> bool;
fn curr() -> char;
fn next() -> char;
fn init();
fn bump();
fn get_str_from(uint) -> str;
fn get_interner() -> @interner::interner<str>;
fn get_chpos() -> uint;
fn get_byte_pos() -> uint;
fn get_col() -> uint;
fn get_filemap() -> codemap::filemap;
fn err(str);
};
fn new_reader(cm: &codemap::codemap, src: str, filemap: codemap::filemap,
@ -81,7 +81,7 @@ fn new_reader(cm: &codemap::codemap, src: str, filemap: codemap::filemap,
codemap::emit_error(some(ast::mk_sp(chpos, chpos)), m, cm);
}
}
let strs: [str] = ~[];
let strs: [str] = [];
let rd =
reader(cm, src, str::byte_len(src), 0u, 0u, -1 as char,
filemap.start_pos.ch, strs, filemap, itr);
@ -166,10 +166,7 @@ fn consume_block_comment(rdr: &reader) {
fn digits_to_string(s: str) -> int {
let accum_int: int = 0;
for c: u8 in s {
accum_int *= 10;
accum_int += dec_digit_val(c as char);
}
for c: u8 in s { accum_int *= 10; accum_int += dec_digit_val(c as char); }
ret accum_int;
}
@ -177,11 +174,11 @@ fn scan_exponent(rdr: &reader) -> option::t<str> {
let c = rdr.curr();
let rslt = "";
if c == 'e' || c == 'E' {
rslt += str::unsafe_from_bytes(~[c as u8]);
rslt += str::unsafe_from_bytes([c as u8]);
rdr.bump();
c = rdr.curr();
if c == '-' || c == '+' {
rslt += str::unsafe_from_bytes(~[c as u8]);
rslt += str::unsafe_from_bytes([c as u8]);
rdr.bump();
}
let exponent = scan_dec_digits(rdr);
@ -195,7 +192,7 @@ fn scan_dec_digits(rdr: &reader) -> str {
let c = rdr.curr();
let rslt: str = "";
while is_dec_digit(c) || c == '_' {
if c != '_' { rslt += str::unsafe_from_bytes(~[c as u8]); }
if c != '_' { rslt += str::unsafe_from_bytes([c as u8]); }
rdr.bump();
c = rdr.curr();
}
@ -216,7 +213,7 @@ fn scan_number(c: char, rdr: &reader) -> token::token {
rdr.bump();
c = rdr.curr();
}
} else if (c == '0' && n == 'b') {
} else if c == '0' && n == 'b' {
rdr.bump();
rdr.bump();
c = rdr.curr();
@ -290,7 +287,7 @@ fn scan_number(c: char, rdr: &reader) -> token::token {
ret token::LIT_MACH_FLOAT(ast::ty_f32,
intern(*rdr.get_interner(),
float_str));
} else if (c == '6' && n == '4') {
} else if c == '6' && n == '4' {
rdr.bump();
rdr.bump();
ret token::LIT_MACH_FLOAT(ast::ty_f64,
@ -302,14 +299,14 @@ fn scan_number(c: char, rdr: &reader) -> token::token {
}
} else {
ret token::LIT_FLOAT(interner::intern::<str>(*rdr.get_interner(),
float_str));
float_str));
}
}
let maybe_exponent = scan_exponent(rdr);
alt maybe_exponent {
some(s) {
ret token::LIT_FLOAT(interner::intern::<str>(*rdr.get_interner(),
dec_str + s));
dec_str + s));
}
none. { ret token::LIT_INT(accum_int); }
}
@ -321,7 +318,7 @@ fn scan_numeric_escape(rdr: &reader, n_hex_digits: uint) -> char {
let n = rdr.curr();
rdr.bump();
if !is_hex_digit(n) {
rdr.err(#fmt("illegal numeric character escape: %d", n as int));
rdr.err(#fmt["illegal numeric character escape: %d", n as int]);
fail;
}
accum_int *= 16;
@ -351,7 +348,7 @@ fn next_token_inner(rdr: &reader) -> token::token {
if str::eq(accum_str, "_") { ret token::UNDERSCORE; }
let is_mod_name = c == ':' && rdr.next() == ':';
ret token::IDENT(interner::intern::<str>(*rdr.get_interner(),
accum_str), is_mod_name);
accum_str), is_mod_name);
}
if is_dec_digit(c) { ret scan_number(c, rdr); }
fn binop(rdr: &reader, op: token::binop) -> token::token {
@ -363,6 +360,7 @@ fn next_token_inner(rdr: &reader) -> token::token {
}
alt c {
// One-byte tokens.
'?' {
rdr.bump();
@ -401,6 +399,7 @@ fn next_token_inner(rdr: &reader) -> token::token {
} else { ret token::COLON; }
}
// Multi-byte tokens.
'=' {
rdr.bump();
@ -461,7 +460,7 @@ fn next_token_inner(rdr: &reader) -> token::token {
'u' { c2 = scan_numeric_escape(rdr, 4u); }
'U' { c2 = scan_numeric_escape(rdr, 8u); }
c2 {
rdr.err(#fmt("unknown character escape: %d", c2 as int));
rdr.err(#fmt["unknown character escape: %d", c2 as int]);
fail;
}
}
@ -500,7 +499,7 @@ fn next_token_inner(rdr: &reader) -> token::token {
str::push_char(accum_str, scan_numeric_escape(rdr, 8u));
}
c2 {
rdr.err(#fmt("unknown string escape: %d", c2 as int));
rdr.err(#fmt["unknown string escape: %d", c2 as int]);
fail;
}
}
@ -510,7 +509,7 @@ fn next_token_inner(rdr: &reader) -> token::token {
}
rdr.bump();
ret token::LIT_STR(interner::intern::<str>(*rdr.get_interner(),
accum_str));
accum_str));
}
'-' {
if rdr.next() == '>' {
@ -537,7 +536,7 @@ fn next_token_inner(rdr: &reader) -> token::token {
'/' { ret binop(rdr, token::SLASH); }
'^' { ret binop(rdr, token::CARET); }
'%' { ret binop(rdr, token::PERCENT); }
c { rdr.err(#fmt("unkown start of token: %d", c as int)); fail; }
c { rdr.err(#fmt["unkown start of token: %d", c as int]); fail; }
}
}
@ -562,7 +561,7 @@ fn read_to_eol(rdr: &reader) -> str {
fn read_one_line_comment(rdr: &reader) -> str {
let val = read_to_eol(rdr);
assert (val.(0) == '/' as u8 && val.(1) == '/' as u8);
assert (val[0] == '/' as u8 && val[1] == '/' as u8);
ret val;
}
@ -578,8 +577,8 @@ fn consume_non_eol_whitespace(rdr: &reader) {
fn push_blank_line_comment(rdr: &reader, comments: &mutable [cmnt]) {
log ">>> blank-line comment";
let v: [str] = ~[];
comments += ~[{style: blank_line, lines: v, pos: rdr.get_chpos()}];
let v: [str] = [];
comments += [{style: blank_line, lines: v, pos: rdr.get_chpos()}];
}
fn consume_whitespace_counting_blank_lines(rdr: &reader,
@ -595,11 +594,11 @@ fn consume_whitespace_counting_blank_lines(rdr: &reader,
fn read_line_comments(rdr: &reader, code_to_the_left: bool) -> cmnt {
log ">>> line comments";
let p = rdr.get_chpos();
let lines: [str] = ~[];
let lines: [str] = [];
while rdr.curr() == '/' && rdr.next() == '/' {
let line = read_one_line_comment(rdr);
log line;
lines += ~[line];
lines += [line];
consume_non_eol_whitespace(rdr);
}
log "<<< line comments";
@ -610,10 +609,7 @@ fn read_line_comments(rdr: &reader, code_to_the_left: bool) -> cmnt {
fn all_whitespace(s: &str, begin: uint, end: uint) -> bool {
let i: uint = begin;
while i != end {
if !is_whitespace(s.(i) as char) { ret false; }
i += 1u;
}
while i != end { if !is_whitespace(s[i] as char) { ret false; } i += 1u; }
ret true;
}
@ -626,20 +622,20 @@ fn trim_whitespace_prefix_and_push_line(lines: &mutable [str], s: &str,
} else { s1 = ""; }
} else { s1 = s; }
log "pushing line: " + s1;
lines += ~[s1];
lines += [s1];
}
fn read_block_comment(rdr: &reader, code_to_the_left: bool) -> cmnt {
log ">>> block comment";
let p = rdr.get_chpos();
let lines: [str] = ~[];
let lines: [str] = [];
let col: uint = rdr.get_col();
rdr.bump();
rdr.bump();
let curr_line = "/*";
let level: int = 1;
while level > 0 {
log #fmt("=== block comment level %d", level);
log #fmt["=== block comment level %d", level];
if rdr.is_eof() { rdr.err("unterminated block comment"); fail; }
if rdr.curr() == '\n' {
trim_whitespace_prefix_and_push_line(lines, curr_line, col);
@ -683,9 +679,9 @@ fn consume_comment(rdr: &reader, code_to_the_left: bool,
comments: &mutable [cmnt]) {
log ">>> consume comment";
if rdr.curr() == '/' && rdr.next() == '/' {
comments += ~[read_line_comments(rdr, code_to_the_left)];
} else if (rdr.curr() == '/' && rdr.next() == '*') {
comments += ~[read_block_comment(rdr, code_to_the_left)];
comments += [read_line_comments(rdr, code_to_the_left)];
} else if rdr.curr() == '/' && rdr.next() == '*' {
comments += [read_block_comment(rdr, code_to_the_left)];
} else { fail; }
log "<<< consume comment";
}
@ -712,8 +708,8 @@ fn gather_comments_and_literals(cm: &codemap::codemap, path: str,
let src = str::unsafe_from_bytes(srdr.read_whole_stream());
let itr = @interner::mk::<str>(str::hash, str::eq);
let rdr = new_reader(cm, src, codemap::new_filemap(path, 0u, 0u), itr);
let comments: [cmnt] = ~[];
let literals: [lit] = ~[];
let comments: [cmnt] = [];
let literals: [lit] = [];
let first_read: bool = true;
while !rdr.is_eof() {
while true {
@ -731,7 +727,7 @@ fn gather_comments_and_literals(cm: &codemap::codemap, path: str,
}
let tok = next_token(rdr);
if is_lit(tok.tok) {
literals += ~[{lit: rdr.get_str_from(tok.bpos), pos: tok.chpos}];
literals += [{lit: rdr.get_str_from(tok.bpos), pos: tok.chpos}];
}
log "tok: " + token::to_str(rdr, tok.tok);
first_read = false;

File diff suppressed because it is too large Load diff

View file

@ -116,6 +116,7 @@ fn to_str(r: lexer::reader, t: token) -> str {
BINOP(op) { ret binop_to_str(op); }
BINOPEQ(op) { ret binop_to_str(op) + "="; }
/* Structural symbols */
AT. {
ret "@";
@ -140,6 +141,7 @@ fn to_str(r: lexer::reader, t: token) -> str {
POUND_LBRACE. { ret "#{"; }
POUND_LT. { ret "#<"; }
/* Literals */
LIT_INT(i) {
ret int::to_str(i, 10u);
@ -165,6 +167,7 @@ fn to_str(r: lexer::reader, t: token) -> str {
}
LIT_BOOL(b) { if b { ret "true"; } else { ret "false"; } }
/* Name components */
IDENT(s, _) {
ret interner::get::<str>(*r.get_interner(), s);

View file

@ -66,7 +66,7 @@ tag token { STRING(str, int); BREAK(break_t); BEGIN(begin_t); END; EOF; }
fn tok_str(t: token) -> str {
alt t {
STRING(s, len) { ret #fmt("STR(%s,%d)", s, len); }
STRING(s, len) { ret #fmt["STR(%s,%d)", s, len]; }
BREAK(_) { ret "BREAK"; }
BEGIN(_) { ret "BEGIN"; }
END. { ret "END"; }
@ -84,7 +84,7 @@ fn buf_str(toks: &[mutable token], szs: &[mutable int], left: uint,
while i != right && L != 0u {
L -= 1u;
if i != left { s += ", "; }
s += #fmt("%d=%s", szs.(i), tok_str(toks.(i)));
s += #fmt["%d=%s", szs[i], tok_str(toks[i])];
i += 1u;
i %= n;
}
@ -103,11 +103,11 @@ fn mk_printer(out: io::writer, linewidth: uint) -> printer {
// fall behind.
let n: uint = 3u * linewidth;
log #fmt("mk_printer %u", linewidth);
log #fmt["mk_printer %u", linewidth];
let token: [mutable token] = vec::init_elt_mut(EOF, n);
let size: [mutable int] = vec::init_elt_mut(0, n);
let scan_stack: [mutable uint] = vec::init_elt_mut(0u, n);
let print_stack: [print_stack_elt] = ~[];
let print_stack: [print_stack_elt] = [];
ret printer(out, n, linewidth as int, // margin
linewidth as int, // space
0u, // left
@ -237,18 +237,18 @@ obj printer(out: io::writer,
// buffered indentation to avoid writing trailing whitespace
mutable pending_indentation: int) {
fn last_token() -> token { ret token.(right); }
fn last_token() -> token { ret token[right]; }
// be very careful with this!
fn replace_last_token(t: token) { token.(right) = t; }
fn replace_last_token(t: token) { token[right] = t; }
fn pretty_print(t: token) {
log #fmt("pp [%u,%u]", left, right);
log #fmt["pp [%u,%u]", left, right];
alt t {
EOF. {
if !scan_stack_empty {
self.check_stack(0);
self.advance_left(token.(left), size.(left));
self.advance_left(token[left], size[left]);
}
self.indent(0);
}
@ -259,20 +259,20 @@ obj printer(out: io::writer,
left = 0u;
right = 0u;
} else { self.advance_right(); }
log #fmt("pp BEGIN/buffer [%u,%u]", left, right);
token.(right) = t;
size.(right) = -right_total;
log #fmt["pp BEGIN/buffer [%u,%u]", left, right];
token[right] = t;
size[right] = -right_total;
self.scan_push(right);
}
END. {
if scan_stack_empty {
log #fmt("pp END/print [%u,%u]", left, right);
log #fmt["pp END/print [%u,%u]", left, right];
self.print(t, 0);
} else {
log #fmt("pp END/buffer [%u,%u]", left, right);
log #fmt["pp END/buffer [%u,%u]", left, right];
self.advance_right();
token.(right) = t;
size.(right) = -1;
token[right] = t;
size[right] = -1;
self.scan_push(right);
}
}
@ -283,22 +283,22 @@ obj printer(out: io::writer,
left = 0u;
right = 0u;
} else { self.advance_right(); }
log #fmt("pp BREAK/buffer [%u,%u]", left, right);
log #fmt["pp BREAK/buffer [%u,%u]", left, right];
self.check_stack(0);
self.scan_push(right);
token.(right) = t;
size.(right) = -right_total;
token[right] = t;
size[right] = -right_total;
right_total += b.blank_space;
}
STRING(s, len) {
if scan_stack_empty {
log #fmt("pp STRING/print [%u,%u]", left, right);
log #fmt["pp STRING/print [%u,%u]", left, right];
self.print(t, len);
} else {
log #fmt("pp STRING/buffer [%u,%u]", left, right);
log #fmt["pp STRING/buffer [%u,%u]", left, right];
self.advance_right();
token.(right) = t;
size.(right) = len;
token[right] = t;
size[right] = len;
right_total += len;
self.check_stream();
}
@ -306,43 +306,40 @@ obj printer(out: io::writer,
}
}
fn check_stream() {
log #fmt("check_stream [%u, %u] with left_total=%d, right_total=%d",
left, right, left_total, right_total);
log #fmt["check_stream [%u, %u] with left_total=%d, right_total=%d",
left, right, left_total, right_total];
if right_total - left_total > space {
log #fmt("scan window is %d, longer than space on line (%d)",
right_total - left_total, space);
log #fmt["scan window is %d, longer than space on line (%d)",
right_total - left_total, space];
if !scan_stack_empty {
if left == scan_stack.(bottom) {
log #fmt("setting %u to infinity and popping", left);
size.(self.scan_pop_bottom()) = size_infinity;
if left == scan_stack[bottom] {
log #fmt["setting %u to infinity and popping", left];
size[self.scan_pop_bottom()] = size_infinity;
}
}
self.advance_left(token.(left), size.(left));
self.advance_left(token[left], size[left]);
if left != right { self.check_stream(); }
}
}
fn scan_push(x: uint) {
log #fmt("scan_push %u", x);
log #fmt["scan_push %u", x];
if scan_stack_empty {
scan_stack_empty = false;
} else { top += 1u; top %= buf_len; assert (top != bottom); }
scan_stack.(top) = x;
scan_stack[top] = x;
}
fn scan_pop() -> uint {
assert (!scan_stack_empty);
let x = scan_stack.(top);
let x = scan_stack[top];
if top == bottom {
scan_stack_empty = true;
} else { top += buf_len - 1u; top %= buf_len; }
ret x;
}
fn scan_top() -> uint {
assert (!scan_stack_empty);
ret scan_stack.(top);
}
fn scan_top() -> uint { assert (!scan_stack_empty); ret scan_stack[top]; }
fn scan_pop_bottom() -> uint {
assert (!scan_stack_empty);
let x = scan_stack.(bottom);
let x = scan_stack[bottom];
if top == bottom {
scan_stack_empty = true;
} else { bottom += 1u; bottom %= buf_len; }
@ -354,7 +351,7 @@ obj printer(out: io::writer,
assert (right != left);
}
fn advance_left(x: token, L: int) {
log #fmt("advnce_left [%u,%u], sizeof(%u)=%d", left, right, left, L);
log #fmt["advnce_left [%u,%u], sizeof(%u)=%d", left, right, left, L];
if L >= 0 {
self.print(x, L);
alt x {
@ -365,47 +362,47 @@ obj printer(out: io::writer,
if left != right {
left += 1u;
left %= buf_len;
self.advance_left(token.(left), size.(left));
self.advance_left(token[left], size[left]);
}
}
}
fn check_stack(k: int) {
if !scan_stack_empty {
let x = self.scan_top();
alt token.(x) {
alt token[x] {
BEGIN(b) {
if k > 0 {
size.(self.scan_pop()) = size.(x) + right_total;
size[self.scan_pop()] = size[x] + right_total;
self.check_stack(k - 1);
}
}
END. {
// paper says + not =, but that makes no sense.
size.(self.scan_pop()) = 1;
size[self.scan_pop()] = 1;
self.check_stack(k + 1);
}
_ {
size.(self.scan_pop()) = size.(x) + right_total;
size[self.scan_pop()] = size[x] + right_total;
if k > 0 { self.check_stack(k); }
}
}
}
}
fn print_newline(amount: int) {
log #fmt("NEWLINE %d", amount);
log #fmt["NEWLINE %d", amount];
out.write_str("\n");
pending_indentation = 0;
self.indent(amount);
}
fn indent(amount: int) {
log #fmt("INDENT %d", amount);
log #fmt["INDENT %d", amount];
pending_indentation += amount;
}
fn top() -> print_stack_elt {
let n = vec::len(print_stack);
let top: print_stack_elt = {offset: 0, pbreak: broken(inconsistent)};
if n != 0u { top = print_stack.(n - 1u); }
if n != 0u { top = print_stack[n - 1u]; }
ret top;
}
fn write_str(s: str) {
@ -416,18 +413,18 @@ obj printer(out: io::writer,
out.write_str(s);
}
fn print(x: token, L: int) {
log #fmt("print %s %d (remaining line space=%d)", tok_str(x), L,
space);
log #fmt["print %s %d (remaining line space=%d)", tok_str(x), L,
space];
log buf_str(token, size, left, right, 6u);
alt x {
BEGIN(b) {
if L > space {
let col = margin - space + b.offset;
log #fmt("print BEGIN -> push broken block at col %d", col);
print_stack += ~[{offset: col, pbreak: broken(b.breaks)}];
log #fmt["print BEGIN -> push broken block at col %d", col];
print_stack += [{offset: col, pbreak: broken(b.breaks)}];
} else {
log "print BEGIN -> push fitting block";
print_stack += ~[{offset: 0, pbreak: fits}];
print_stack += [{offset: 0, pbreak: fits}];
}
}
END. {

View file

@ -32,7 +32,7 @@ tag ann_node {
node_expr(ps, @ast::expr);
node_pat(ps, @ast::pat);
}
type pp_ann = {pre: fn(&ann_node) , post: fn(&ann_node) };
type pp_ann = {pre: fn(&ann_node), post: fn(&ann_node)};
fn no_ann() -> pp_ann {
fn ignore(_node: &ann_node) { }
@ -49,12 +49,12 @@ type ps =
mutable boxes: [pp::breaks],
ann: pp_ann};
fn ibox(s: &ps, u: uint) { s.boxes += ~[pp::inconsistent]; pp::ibox(s.s, u); }
fn ibox(s: &ps, u: uint) { s.boxes += [pp::inconsistent]; pp::ibox(s.s, u); }
fn end(s: &ps) { vec::pop(s.boxes); pp::end(s.s); }
fn rust_printer(writer: io::writer) -> ps {
let boxes: [pp::breaks] = ~[];
let boxes: [pp::breaks] = [];
ret @{s: pp::mk_printer(writer, default_columns),
cm: none::<codemap>,
comments: none::<[lexer::cmnt]>,
@ -75,7 +75,7 @@ const default_columns: uint = 78u;
// copy forward.
fn print_crate(cm: &codemap, crate: @ast::crate, filename: str,
in: io::reader, out: io::writer, ann: &pp_ann) {
let boxes: [pp::breaks] = ~[];
let boxes: [pp::breaks] = [];
let r = lexer::gather_comments_and_literals(cm, filename, in);
let s =
@{s: pp::mk_printer(out, default_columns),
@ -135,12 +135,9 @@ fn attribute_to_str(attr: &ast::attribute) -> str {
be to_str(attr, print_attribute);
}
fn cbox(s: &ps, u: uint) { s.boxes += ~[pp::consistent]; pp::cbox(s.s, u); }
fn cbox(s: &ps, u: uint) { s.boxes += [pp::consistent]; pp::cbox(s.s, u); }
fn box(s: &ps, u: uint, b: pp::breaks) {
s.boxes += ~[b];
pp::box(s.s, u, b);
}
fn box(s: &ps, u: uint, b: pp::breaks) { s.boxes += [b]; pp::box(s.s, u, b); }
fn nbsp(s: &ps) { word(s.s, " "); }
@ -175,22 +172,16 @@ fn bclose_(s: &ps, span: codemap::span, indented: uint) {
fn bclose(s: &ps, span: codemap::span) { bclose_(s, span, indent_unit); }
fn is_begin(s: &ps) -> bool {
alt s.s.last_token() {
pp::BEGIN(_) { true }
_ { false }
}
alt s.s.last_token() { pp::BEGIN(_) { true } _ { false } }
}
fn is_end(s: &ps) -> bool {
alt s.s.last_token() {
pp::END. { true }
_ { false }
}
alt s.s.last_token() { pp::END. { true } _ { false } }
}
fn is_bol(s: &ps) -> bool {
ret s.s.last_token() == pp::EOF ||
s.s.last_token() == pp::hardbreak_tok();
s.s.last_token() == pp::hardbreak_tok();
}
fn hardbreak_if_not_bol(s: &ps) { if !is_bol(s) { hardbreak(s.s); } }
@ -218,7 +209,7 @@ fn synth_comment(s: &ps, text: str) {
word(s.s, "*/");
}
fn commasep<IN>(s: &ps, b: breaks, elts: &[IN], op: fn(&ps, &IN) ) {
fn commasep<IN>(s: &ps, b: breaks, elts: &[IN], op: fn(&ps, &IN)) {
box(s, 0u, b);
let first = true;
for elt: IN in elts {
@ -229,8 +220,8 @@ fn commasep<IN>(s: &ps, b: breaks, elts: &[IN], op: fn(&ps, &IN) ) {
}
fn commasep_cmnt<IN>(s: &ps, b: breaks, elts: &[IN], op: fn(&ps, &IN) ,
get_span: fn(&IN) -> codemap::span ) {
fn commasep_cmnt<IN>(s: &ps, b: breaks, elts: &[IN], op: fn(&ps, &IN),
get_span: fn(&IN) -> codemap::span) {
box(s, 0u, b);
let len = vec::len::<IN>(elts);
let i = 0u;
@ -241,7 +232,7 @@ fn commasep_cmnt<IN>(s: &ps, b: breaks, elts: &[IN], op: fn(&ps, &IN) ,
if i < len {
word(s.s, ",");
maybe_print_trailing_comment(s, get_span(elt),
some(get_span(elts.(i)).hi));
some(get_span(elts[i]).hi));
space_if_not_bol(s);
}
}
@ -290,7 +281,7 @@ fn print_type(s: &ps, ty: &@ast::ty) {
alt mt.mut {
ast::mut. { word_space(s, "mutable"); }
ast::maybe_mut. { word_space(s, "mutable?"); }
ast::imm. {}
ast::imm. { }
}
print_type(s, mt.ty);
word(s.s, "]");
@ -322,9 +313,9 @@ fn print_type(s: &ps, ty: &@ast::ty) {
word(s.s, "}");
}
ast::ty_tup(elts) {
popen(s);
commasep(s, inconsistent, elts, print_type);
pclose(s);
popen(s);
commasep(s, inconsistent, elts, print_type);
pclose(s);
}
ast::ty_fn(proto, inputs, output, cf, constrs) {
print_ty_fn(s, proto, none::<str>, inputs, output, cf, constrs);
@ -371,6 +362,7 @@ fn print_native_item(s: &ps, item: &@ast::native_item) {
}
ast::native_item_fn(lname, decl, typarams) {
print_fn(s, decl, ast::proto_fn, item.ident, typarams,
decl.constraints);
@ -457,8 +449,8 @@ fn print_item(s: &ps, item: &@ast::item) {
ast::item_tag(variants, params) {
let newtype =
vec::len(variants) == 1u &&
str::eq(item.ident, variants.(0).node.name) &&
vec::len(variants.(0).node.args) == 1u;
str::eq(item.ident, variants[0].node.name) &&
vec::len(variants[0].node.args) == 1u;
if newtype {
ibox(s, indent_unit);
word_space(s, "tag");
@ -468,7 +460,7 @@ fn print_item(s: &ps, item: &@ast::item) {
space(s.s);
if newtype {
word_space(s, "=");
print_type(s, variants.(0).node.args.(0).ty);
print_type(s, variants[0].node.args[0].ty);
word(s.s, ";");
end(s);
} else {
@ -509,11 +501,11 @@ fn print_item(s: &ps, item: &@ast::item) {
space(s.s);
bopen(s);
for meth: @ast::method in _obj.methods {
let typarams: [ast::ty_param] = ~[];
let typarams: [ast::ty_param] = [];
hardbreak_if_not_bol(s);
maybe_print_comment(s, meth.span.lo);
print_fn(s, meth.node.meth.decl, meth.node.meth.proto,
meth.node.ident, typarams, ~[]);
meth.node.ident, typarams, []);
word(s.s, " ");
print_block(s, meth.node.meth.body);
}
@ -524,8 +516,8 @@ fn print_item(s: &ps, item: &@ast::item) {
word(s.s, item.ident);
print_type_params(s, tps);
popen(s);
word_space(s, dt.decl.inputs.(0).ident + ":");
print_type(s, dt.decl.inputs.(0).ty);
word_space(s, dt.decl.inputs[0].ident + ":");
print_type(s, dt.decl.inputs[0].ty);
pclose(s);
space(s.s);
print_block(s, dt.body);
@ -620,85 +612,77 @@ fn print_possibly_embedded_block(s: &ps, blk: &ast::blk, embedded: embed_type,
// extra semi to make sure the output retains the same meaning.
fn maybe_protect_block(s: &ps, last: &option::t<@ast::stmt>,
next: &expr_or_stmt) {
let last_expr_is_block = alt last {
option::some(@{node: ast::stmt_expr(e, _), _}) {
alt e.node {
ast::expr_if(_ ,_ ,_)
| ast::expr_alt(_, _)
| ast::expr_block(_) { true }
let last_expr_is_block =
alt last {
option::some(@{node: ast::stmt_expr(e, _), _}) {
alt e.node {
ast::expr_if(_, _, _) | ast::expr_alt(_, _) |
ast::expr_block(_) {
true
}
_ { false }
}
true
}
_ { false }
}
true
}
_ { false }
};
};
if !last_expr_is_block { ret; }
let next_expr_is_ambig = alt next {
expr_(e) { expr_is_ambig(e) }
stmt_(@{node: ast::stmt_expr(e, _), _}) {
expr_is_ambig(e)
}
_ { false }
};
let next_expr_is_ambig =
alt next {
expr_(e) { expr_is_ambig(e) }
stmt_(@{node: ast::stmt_expr(e, _), _}) { expr_is_ambig(e) }
_ { false }
};
if last_expr_is_block && next_expr_is_ambig {
word(s.s, ";");
}
if last_expr_is_block && next_expr_is_ambig { word(s.s, ";"); }
fn expr_is_ambig(ex: @ast::expr) -> bool {
// We're going to walk the expression to the 'left' looking for
// various properties that might indicate ambiguity
// We're going to walk the expression to the 'left' looking for
// various properties that might indicate ambiguity
type env = @mutable bool;
let visitor = visit::mk_vt(@{
visit_expr: visit_expr
with *visit::default_visitor()
});
let env = @mutable false;
visit_expr(ex, env, visitor);
ret *env;
type env = @mutable bool;
let visitor =
visit::mk_vt(@{visit_expr: visit_expr
with *visit::default_visitor()});
let env = @mutable false;
visit_expr(ex, env, visitor);
ret *env;
fn visit_expr(ex: &@ast::expr, e: &env, v: &visit::vt<env>) {
assert *e == false;
fn visit_expr(ex: &@ast::expr, e: &env, v: &visit::vt<env>) {
assert (*e == false);
if expr_is_ambig(ex) {
*e = true;
ret;
}
if expr_is_ambig(ex) { *e = true; ret; }
alt ex.node {
ast::expr_assign(x, _) { v.visit_expr(x, e, v); }
ast::expr_assign_op(_, x, _) { visit_expr(x, e, v); }
ast::expr_move(x, _) { v.visit_expr(x, e, v); }
ast::expr_field(x, _) { v.visit_expr(x, e, v); }
ast::expr_index(x, _) { v.visit_expr(x, e, v); }
ast::expr_binary(op, x, _) {
if need_parens(x, operator_prec(op)) {
*e = true;
ret;
alt ex.node {
ast::expr_assign(x, _) { v.visit_expr(x, e, v); }
ast::expr_assign_op(_, x, _) { visit_expr(x, e, v); }
ast::expr_move(x, _) { v.visit_expr(x, e, v); }
ast::expr_field(x, _) { v.visit_expr(x, e, v); }
ast::expr_index(x, _) { v.visit_expr(x, e, v); }
ast::expr_binary(op, x, _) {
if need_parens(x, operator_prec(op)) { *e = true; ret; }
v.visit_expr(x, e, v);
}
v.visit_expr(x, e, v);
}
ast::expr_cast(x, _) {
if need_parens(x, parse::parser::as_prec) {
*e = true;
ret;
ast::expr_cast(x, _) {
if need_parens(x, parse::parser::as_prec) {
*e = true;
ret;
}
}
ast::expr_ternary(x, _, _) { v.visit_expr(x, e, v); }
_ { }
}
ast::expr_ternary(x, _, _) { v.visit_expr(x, e, v); }
_ { }
}
}
}
fn expr_is_ambig(ex: @ast::expr) -> bool {
alt ex.node {
ast::expr_unary(_, _) { true }
ast::expr_tup(_) { true }
_ { false }
}
}
fn expr_is_ambig(ex: @ast::expr) -> bool {
alt ex.node {
ast::expr_unary(_, _) { true }
ast::expr_tup(_) { true }
_ { false }
}
}
}
}
}
@ -706,10 +690,8 @@ fn print_possibly_embedded_block(s: &ps, blk: &ast::blk, embedded: embed_type,
// ret and fail, without arguments cannot appear is the discriminant of if,
// alt, do, & while unambiguously without being parenthesized
fn print_maybe_parens_discrim(s: &ps, e: &@ast::expr) {
let disambig = alt e.node {
ast::expr_ret(option::none.) { true }
_ { false }
};
let disambig =
alt e.node { ast::expr_ret(option::none.) { true } _ { false } };
if disambig { popen(s) }
print_expr(s, e);
if disambig { pclose(s) }
@ -727,6 +709,7 @@ fn print_if(s: &ps, test: &@ast::expr, blk: &ast::blk,
some(_else) {
alt _else.node {
// "another else-if"
ast::expr_if(i, t, e) {
cbox(s, indent_unit - 1u);
@ -738,6 +721,7 @@ fn print_if(s: &ps, test: &@ast::expr, blk: &ast::blk,
do_else(s, e);
}
// "final else"
ast::expr_block(b) {
cbox(s, indent_unit - 1u);
@ -758,10 +742,7 @@ fn print_mac(s: &ps, m: &ast::mac) {
ast::mac_invoc(path, arg, body) {
word(s.s, "#");
print_path(s, path, false);
alt (arg.node) {
ast::expr_vec(_,_) {}
_ { word(s.s, " "); }
}
alt arg.node { ast::expr_vec(_, _) { } _ { word(s.s, " "); } }
print_expr(s, arg);
// FIXME: extension 'body'
}
@ -930,6 +911,7 @@ fn print_expr(s: &ps, expr: &@ast::expr) {
bclose_(s, expr.span, alt_indent_unit);
}
ast::expr_fn(f) {
// If the return type is the magic ty_infer, then we need to
// pretty print as a lambda-block
if f.decl.output.node == ast::ty_infer {
@ -939,11 +921,11 @@ fn print_expr(s: &ps, expr: &@ast::expr) {
ibox(s, 0u);
word(s.s, "{");
print_fn_block_args(s, f.decl);
print_possibly_embedded_block(s, f.body,
block_block_fn, indent_unit);
print_possibly_embedded_block(s, f.body, block_block_fn,
indent_unit);
} else {
head(s, proto_to_str(f.proto));
print_fn_args_and_ret(s, f.decl, ~[]);
print_fn_args_and_ret(s, f.decl, []);
space(s.s);
print_block(s, f.body);
}
@ -955,10 +937,7 @@ fn print_expr(s: &ps, expr: &@ast::expr) {
ibox(s, 0u);
print_block(s, blk);
}
ast::expr_copy(e) {
word_space(s, "copy");
print_expr(s, e);
}
ast::expr_copy(e) { word_space(s, "copy"); print_expr(s, e); }
ast::expr_move(lhs, rhs) {
print_expr(s, lhs);
space(s.s);
@ -1070,11 +1049,11 @@ fn print_expr(s: &ps, expr: &@ast::expr) {
// Methods
for meth: @ast::method in anon_obj.methods {
let typarams: [ast::ty_param] = ~[];
let typarams: [ast::ty_param] = [];
hardbreak_if_not_bol(s);
maybe_print_comment(s, meth.span.lo);
print_fn(s, meth.node.meth.decl, meth.node.meth.proto,
meth.node.ident, typarams, ~[]);
meth.node.ident, typarams, []);
word(s.s, " ");
print_block(s, meth.node.meth.body);
}
@ -1082,18 +1061,11 @@ fn print_expr(s: &ps, expr: &@ast::expr) {
// With object
alt anon_obj.inner_obj {
none. { }
some(e) {
space(s.s);
word_space(s, "with");
print_expr(s, e);
}
some(e) { space(s.s); word_space(s, "with"); print_expr(s, e); }
}
bclose(s, expr.span);
}
ast::expr_uniq(expr) {
word(s.s, "~");
print_expr(s, expr);
}
ast::expr_uniq(expr) { word(s.s, "~"); print_expr(s, expr); }
}
s.ann.post(ann_node);
end(s);
@ -1224,7 +1196,7 @@ fn print_fn(s: &ps, decl: ast::fn_decl, proto: ast::proto, name: str,
}
fn print_fn_args_and_ret(s: &ps, decl: &ast::fn_decl,
constrs: [@ast::constr]) {
constrs: [@ast::constr]) {
popen(s);
fn print_arg(s: &ps, x: &ast::arg) {
ibox(s, indent_unit);
@ -1270,7 +1242,7 @@ fn print_kind(s: &ps, kind: ast::kind) {
alt kind {
ast::kind_unique. { word(s.s, "~"); }
ast::kind_shared. { word(s.s, "@"); }
_ { /* fallthrough */ }
_ {/* fallthrough */ }
}
}
@ -1320,7 +1292,7 @@ fn print_view_item(s: &ps, item: &@ast::view_item) {
}
ast::view_item_import(id, ids, _) {
head(s, "import");
if !str::eq(id, ids.(vec::len(ids) - 1u)) {
if !str::eq(id, ids[vec::len(ids) - 1u]) {
word_space(s, id);
word_space(s, "=");
}
@ -1332,13 +1304,10 @@ fn print_view_item(s: &ps, item: &@ast::view_item) {
}
ast::view_item_import_from(mod_path, idents, _) {
head(s, "import");
for elt: str in mod_path {
word(s.s, elt);
word(s.s, "::");
}
for elt: str in mod_path { word(s.s, elt); word(s.s, "::"); }
word(s.s, "{");
commasep(s, inconsistent, idents,
fn(s: &ps, w: &ast::import_ident) {
fn (s: &ps, w: &ast::import_ident) {
word(s.s, w.node.name)
});
word(s.s, "}");
@ -1354,8 +1323,7 @@ fn print_view_item(s: &ps, item: &@ast::view_item) {
}
ast::view_item_export(ids, _) {
head(s, "export");
commasep(s, inconsistent, ids,
fn(s: &ps, w: &str) { word(s.s, w) });
commasep(s, inconsistent, ids, fn (s: &ps, w: &str) { word(s.s, w) });
}
}
word(s.s, ";");
@ -1377,16 +1345,15 @@ fn operator_prec(op: ast::binop) -> int {
fn need_parens(expr: &@ast::expr, outer_prec: int) -> bool {
alt expr.node {
ast::expr_binary(op, _, _) {
operator_prec(op) < outer_prec
}
ast::expr_binary(op, _, _) { operator_prec(op) < outer_prec }
ast::expr_cast(_, _) { parse::parser::as_prec < outer_prec }
ast::expr_ternary(_, _, _) {
parse::parser::ternary_prec < outer_prec
}
ast::expr_ternary(_, _, _) { parse::parser::ternary_prec < outer_prec }
// This may be too conservative in some cases
ast::expr_assign(_, _) { true }
ast::expr_assign(_, _) {
true
}
ast::expr_move(_, _) { true }
ast::expr_swap(_, _) { true }
ast::expr_assign_op(_, _, _) { true }
@ -1485,7 +1452,7 @@ fn print_remaining_comments(s: &ps) {
fn in_cbox(s: &ps) -> bool {
let len = vec::len(s.boxes);
if len == 0u { ret false; }
ret s.boxes.(len - 1u) == pp::consistent;
ret s.boxes[len - 1u] == pp::consistent;
}
fn print_literal(s: &ps, lit: &@ast::lit) {
@ -1502,8 +1469,9 @@ fn print_literal(s: &ps, lit: &@ast::lit) {
print_string(s, st);
}
ast::lit_char(ch) {
word(s.s, "'" + escape_str(
str::unsafe_from_bytes(~[ch as u8]), '\'') + "'");
word(s.s,
"'" + escape_str(str::unsafe_from_bytes([ch as u8]), '\'') +
"'");
}
ast::lit_int(val) { word(s.s, int::str(val)); }
ast::lit_uint(val) { word(s.s, uint::str(val) + "u"); }
@ -1530,7 +1498,7 @@ fn next_lit(s: &ps) -> option::t<lexer::lit> {
alt s.literals {
some(lits) {
if s.cur_lit < vec::len(lits) {
ret some(lits.(s.cur_lit));
ret some(lits[s.cur_lit]);
} else { ret none::<lexer::lit>; }
}
_ { ret none::<lexer::lit>; }
@ -1556,7 +1524,7 @@ fn print_comment(s: &ps, cmnt: lexer::cmnt) {
lexer::mixed. {
assert (vec::len(cmnt.lines) == 1u);
zerobreak(s.s);
word(s.s, cmnt.lines.(0));
word(s.s, cmnt.lines[0]);
zerobreak(s.s);
}
lexer::isolated. {
@ -1571,7 +1539,7 @@ fn print_comment(s: &ps, cmnt: lexer::cmnt) {
lexer::trailing. {
word(s.s, " ");
if vec::len(cmnt.lines) == 1u {
word(s.s, cmnt.lines.(0));
word(s.s, cmnt.lines[0]);
hardbreak(s.s);
} else {
ibox(s, 0u);
@ -1584,10 +1552,11 @@ fn print_comment(s: &ps, cmnt: lexer::cmnt) {
}
lexer::blank_line. {
// We need to do at least one, possibly two hardbreaks.
let is_semi = alt s.s.last_token() {
pp::STRING(s, _) { s == ";" }
_ { false }
};
let is_semi =
alt s.s.last_token() {
pp::STRING(s, _) { s == ";" }
_ { false }
};
if is_semi || is_begin(s) || is_end(s) { hardbreak(s.s) }
hardbreak(s.s);
}
@ -1605,7 +1574,7 @@ fn escape_str(st: str, to_escape: char) -> str {
let len = str::byte_len(st);
let i = 0u;
while i < len {
alt st.(i) as char {
alt st[i] as char {
'\n' { out += "\\n"; }
'\t' { out += "\\t"; }
'\r' { out += "\\r"; }
@ -1622,7 +1591,7 @@ fn escape_str(st: str, to_escape: char) -> str {
ret out;
}
fn to_str<T>(t: &T, f: fn(&ps, &T) ) -> str {
fn to_str<T>(t: &T, f: fn(&ps, &T)) -> str {
let writer = io::string_writer();
let s = rust_printer(writer.get_writer());
f(s, t);
@ -1634,7 +1603,7 @@ fn next_comment(s: &ps) -> option::t<lexer::cmnt> {
alt s.comments {
some(cmnts) {
if s.cur_cmnt < vec::len(cmnts) {
ret some(cmnts.(s.cur_cmnt));
ret some(cmnts[s.cur_cmnt]);
} else { ret none::<lexer::cmnt>; }
}
_ { ret none::<lexer::cmnt>; }
@ -1643,8 +1612,8 @@ fn next_comment(s: &ps) -> option::t<lexer::cmnt> {
// Removing the aliases from the type of f in the next two functions
// triggers memory corruption, but I haven't isolated the bug yet. FIXME
fn constr_args_to_str<T>(f: &fn(&T) -> str ,
args: &[@ast::sp_constr_arg<T>]) -> str {
fn constr_args_to_str<T>(f: &fn(&T) -> str, args: &[@ast::sp_constr_arg<T>])
-> str {
let comma = false;
let s = "(";
for a: @ast::sp_constr_arg<T> in args {
@ -1655,8 +1624,8 @@ fn constr_args_to_str<T>(f: &fn(&T) -> str ,
ret s;
}
fn constr_arg_to_str<T>(f: &fn(&T) -> str, c: &ast::constr_arg_general_<T>)
-> str {
fn constr_arg_to_str<T>(f: &fn(&T) -> str, c: &ast::constr_arg_general_<T>) ->
str {
alt c {
ast::carg_base. { ret "*"; }
ast::carg_ident(i) { ret f(i); }
@ -1686,7 +1655,7 @@ fn ast_ty_fn_constrs_str(constrs: &[@ast::constr]) -> str {
}
fn fn_arg_idx_to_str(decl: &ast::fn_decl, idx: &uint) -> str {
decl.inputs.(idx).ident
decl.inputs[idx].ident
}
fn ast_fn_constr_to_str(decl: &ast::fn_decl, c: &@ast::constr) -> str {
@ -1696,8 +1665,7 @@ fn ast_fn_constr_to_str(decl: &ast::fn_decl, c: &@ast::constr) -> str {
}
// FIXME: fix repeated code
fn ast_fn_constrs_str(decl: &ast::fn_decl,
constrs: &[@ast::constr]) -> str {
fn ast_fn_constrs_str(decl: &ast::fn_decl, constrs: &[@ast::constr]) -> str {
let s = "";
let colon = true;
for c: @ast::constr in constrs {
@ -1717,9 +1685,7 @@ fn proto_to_str(p: &ast::proto) -> str {
}
fn ty_constr_to_str(c: &@ast::ty_constr) -> str {
fn ty_constr_path_to_str(p: &ast::path) -> str {
"*." + path_to_str(p)
}
fn ty_constr_path_to_str(p: &ast::path) -> str { "*." + path_to_str(p) }
ret path_to_str(c.node.path) +
constr_args_to_str::<ast::path>(ty_constr_path_to_str,

View file

@ -18,7 +18,7 @@ type interner<T> =
fn mk<@T>(hasher: hashfn<T>, eqer: eqfn<T>) -> interner<T> {
let m = map::mk_hashmap::<T, uint>(hasher, eqer);
ret {map: m, mutable vect: ~[], hasher: hasher, eqer: eqer};
ret {map: m, mutable vect: [], hasher: hasher, eqer: eqer};
}
fn intern<@T>(itr: &interner<T>, val: &T) -> uint {
@ -27,13 +27,13 @@ fn intern<@T>(itr: &interner<T>, val: &T) -> uint {
none. {
let new_idx = vec::len::<T>(itr.vect);
itr.map.insert(val, new_idx);
itr.vect += ~[val];
itr.vect += [val];
ret new_idx;
}
}
}
fn get<T>(itr: &interner<T>, idx: uint) -> T { ret itr.vect.(idx); }
fn get<T>(itr: &interner<T>, idx: uint) -> T { ret itr.vect[idx]; }
fn len<T>(itr : &interner<T>) -> uint { ret vec::len(itr.vect); }
fn len<T>(itr: &interner<T>) -> uint { ret vec::len(itr.vect); }

View file

@ -32,8 +32,7 @@ type visitor<E> =
visit_expr: fn(&@expr, &E, &vt<E>),
visit_ty: fn(&@ty, &E, &vt<E>),
visit_constr: fn(&path, &span, node_id, &E, &vt<E>),
visit_fn:
fn(&_fn, &[ty_param], &span, &fn_ident, node_id, &E, &vt<E>) };
visit_fn: fn(&_fn, &[ty_param], &span, &fn_ident, node_id, &E, &vt<E>)};
fn default_visitor<E>() -> visitor<E> {
ret @{visit_mod: bind visit_mod::<E>(_, _, _, _),
@ -104,8 +103,8 @@ fn visit_item<E>(i: &@item, e: &E, v: &vt<E>) {
item_obj(ob, _, _) {
for f: obj_field in ob.fields { v.visit_ty(f.ty, e, v); }
for m: @method in ob.methods {
v.visit_fn(m.node.meth, ~[], m.span, some(m.node.ident),
m.node.id, e, v);
v.visit_fn(m.node.meth, [], m.span, some(m.node.ident), m.node.id,
e, v);
}
}
}
@ -132,9 +131,7 @@ fn visit_ty<E>(t: &@ty, e: &E, v: &vt<E>) {
ty_rec(flds) {
for f: ty_field in flds { v.visit_ty(f.node.mt.ty, e, v); }
}
ty_tup(ts) {
for tt in ts { v.visit_ty(tt, e, v); }
}
ty_tup(ts) { for tt in ts { v.visit_ty(tt, e, v); } }
ty_fn(_, args, out, _, constrs) {
for a: ty_arg in args { v.visit_ty(a.node.ty, e, v); }
for c: @constr in constrs {
@ -174,9 +171,7 @@ fn visit_pat<E>(p: &@pat, e: &E, v: &vt<E>) {
pat_rec(fields, _) {
for f: field_pat in fields { v.visit_pat(f.pat, e, v); }
}
pat_tup(elts) {
for elt in elts { v.visit_pat(elt, e, v); }
}
pat_tup(elts) { for elt in elts { v.visit_pat(elt, e, v); } }
pat_box(inner) { v.visit_pat(inner, e, v); }
_ { }
}
@ -249,9 +244,7 @@ fn visit_expr<E>(ex: &@expr, e: &E, v: &vt<E>) {
for f: field in flds { v.visit_expr(f.node.expr, e, v); }
visit_expr_opt(base, e, v);
}
expr_tup(elts) {
for el in elts { v.visit_expr(el, e, v); }
}
expr_tup(elts) { for el in elts { v.visit_expr(el, e, v); } }
expr_call(callee, args) {
v.visit_expr(callee, e, v);
visit_exprs(args, e, v);
@ -291,7 +284,7 @@ fn visit_expr<E>(ex: &@expr, e: &E, v: &vt<E>) {
v.visit_expr(x, e, v);
for a: arm in arms { v.visit_arm(a, e, v); }
}
expr_fn(f) { v.visit_fn(f, ~[], ex.span, none, ex.id, e, v); }
expr_fn(f) { v.visit_fn(f, [], ex.span, none, ex.id, e, v); }
expr_block(b) { v.visit_block(b, e, v); }
expr_assign(a, b) { v.visit_expr(b, e, v); v.visit_expr(a, e, v); }
expr_copy(a) { v.visit_expr(a, e, v); }
@ -328,8 +321,8 @@ fn visit_expr<E>(ex: &@expr, e: &E, v: &vt<E>) {
some(ex) { v.visit_expr(ex, e, v); }
}
for m: @method in anon_obj.methods {
v.visit_fn(m.node.meth, ~[], m.span, some(m.node.ident),
m.node.id, e, v);
v.visit_fn(m.node.meth, [], m.span, some(m.node.ident), m.node.id,
e, v);
}
}
expr_mac(mac) { visit_mac(mac, e, v); }
@ -348,20 +341,20 @@ fn visit_arm<E>(a: &arm, e: &E, v: &vt<E>) {
type simple_visitor =
// takes the components so that one function can be
// generic over constr and ty_constr
@{visit_mod: fn(&_mod, &span) ,
visit_view_item: fn(&@view_item) ,
visit_native_item: fn(&@native_item) ,
visit_item: fn(&@item) ,
visit_local: fn(&@local) ,
visit_block: fn(&ast::blk) ,
visit_stmt: fn(&@stmt) ,
visit_arm: fn(&arm) ,
visit_pat: fn(&@pat) ,
visit_decl: fn(&@decl) ,
visit_expr: fn(&@expr) ,
visit_ty: fn(&@ty) ,
visit_constr: fn(&path, &span, node_id) ,
visit_fn: fn(&_fn, &[ty_param], &span, &fn_ident, node_id) };
@{visit_mod: fn(&_mod, &span),
visit_view_item: fn(&@view_item),
visit_native_item: fn(&@native_item),
visit_item: fn(&@item),
visit_local: fn(&@local),
visit_block: fn(&ast::blk),
visit_stmt: fn(&@stmt),
visit_arm: fn(&arm),
visit_pat: fn(&@pat),
visit_decl: fn(&@decl),
visit_expr: fn(&@expr),
visit_ty: fn(&@ty),
visit_constr: fn(&path, &span, node_id),
visit_fn: fn(&_fn, &[ty_param], &span, &fn_ident, node_id)};
fn default_simple_visitor() -> simple_visitor {
ret @{visit_mod: fn (_m: &_mod, _sp: &span) { },
@ -384,61 +377,61 @@ fn default_simple_visitor() -> simple_visitor {
}
fn mk_simple_visitor(v: &simple_visitor) -> vt<()> {
fn v_mod(f: fn(&_mod, &span) , m: &_mod, sp: &span, e: &(), v: &vt<()>) {
fn v_mod(f: fn(&_mod, &span), m: &_mod, sp: &span, e: &(), v: &vt<()>) {
f(m, sp);
visit_mod(m, sp, e, v);
}
fn v_view_item(f: fn(&@view_item) , vi: &@view_item, e: &(), v: &vt<()>) {
fn v_view_item(f: fn(&@view_item), vi: &@view_item, e: &(), v: &vt<()>) {
f(vi);
visit_view_item(vi, e, v);
}
fn v_native_item(f: fn(&@native_item) , ni: &@native_item, e: &(),
fn v_native_item(f: fn(&@native_item), ni: &@native_item, e: &(),
v: &vt<()>) {
f(ni);
visit_native_item(ni, e, v);
}
fn v_item(f: fn(&@item) , i: &@item, e: &(), v: &vt<()>) {
fn v_item(f: fn(&@item), i: &@item, e: &(), v: &vt<()>) {
f(i);
visit_item(i, e, v);
}
fn v_local(f: fn(&@local) , l: &@local, e: &(), v: &vt<()>) {
fn v_local(f: fn(&@local), l: &@local, e: &(), v: &vt<()>) {
f(l);
visit_local(l, e, v);
}
fn v_block(f: fn(&ast::blk) , bl: &ast::blk, e: &(), v: &vt<()>) {
fn v_block(f: fn(&ast::blk), bl: &ast::blk, e: &(), v: &vt<()>) {
f(bl);
visit_block(bl, e, v);
}
fn v_stmt(f: fn(&@stmt) , st: &@stmt, e: &(), v: &vt<()>) {
fn v_stmt(f: fn(&@stmt), st: &@stmt, e: &(), v: &vt<()>) {
f(st);
visit_stmt(st, e, v);
}
fn v_arm(f: fn(&arm) , a: &arm, e: &(), v: &vt<()>) {
fn v_arm(f: fn(&arm), a: &arm, e: &(), v: &vt<()>) {
f(a);
visit_arm(a, e, v);
}
fn v_pat(f: fn(&@pat) , p: &@pat, e: &(), v: &vt<()>) {
fn v_pat(f: fn(&@pat), p: &@pat, e: &(), v: &vt<()>) {
f(p);
visit_pat(p, e, v);
}
fn v_decl(f: fn(&@decl) , d: &@decl, e: &(), v: &vt<()>) {
fn v_decl(f: fn(&@decl), d: &@decl, e: &(), v: &vt<()>) {
f(d);
visit_decl(d, e, v);
}
fn v_expr(f: fn(&@expr) , ex: &@expr, e: &(), v: &vt<()>) {
fn v_expr(f: fn(&@expr), ex: &@expr, e: &(), v: &vt<()>) {
f(ex);
visit_expr(ex, e, v);
}
fn v_ty(f: fn(&@ty) , ty: &@ty, e: &(), v: &vt<()>) {
fn v_ty(f: fn(&@ty), ty: &@ty, e: &(), v: &vt<()>) {
f(ty);
visit_ty(ty, e, v);
}
fn v_constr(f: fn(&path, &span, node_id) , pt: &path, sp: &span,
fn v_constr(f: fn(&path, &span, node_id), pt: &path, sp: &span,
id: node_id, e: &(), v: &vt<()>) {
f(pt, sp, id);
visit_constr(pt, sp, id, e, v);
}
fn v_fn(f: fn(&_fn, &[ty_param], &span, &fn_ident, node_id) , ff: &_fn,
fn v_fn(f: fn(&_fn, &[ty_param], &span, &fn_ident, node_id), ff: &_fn,
tps: &[ty_param], sp: &span, ident: &fn_ident, id: node_id,
e: &(), v: &vt<()>) {
f(ff, tps, sp, ident, id);

View file

@ -50,8 +50,8 @@ fn new_def_hash<@V>() -> std::map::hashmap<ast::def_id, V> {
fn field_expr(f: &ast::field) -> @ast::expr { ret f.node.expr; }
fn field_exprs(fields: &[ast::field]) -> [@ast::expr] {
let es = ~[];
for f: ast::field in fields { es += ~[f.node.expr]; }
let es = [];
for f: ast::field in fields { es += [f.node.expr]; }
ret es;
}
@ -160,8 +160,7 @@ fn is_main_name(path: &[str]) -> bool {
// FIXME mode this to std::float when editing the stdlib no longer
// requires a snapshot
fn float_to_str(num: float, digits: uint) -> str {
let accum = if num < 0.0 { num = -num; "-" }
else { "" };
let accum = if num < 0.0 { num = -num; "-" } else { "" };
let trunc = num as uint;
let frac = num - (trunc as float);
accum += uint::str(trunc);

View file

@ -38,7 +38,7 @@ fn fn_ident_to_string(id: ast::node_id, i: &ast::fn_ident) -> str {
}
fn get_id_ident(cx: &ctxt, id: ast::def_id) -> str {
if (id.crate != ast::local_crate) {
if id.crate != ast::local_crate {
str::connect(cx.ext_map.get(id), "::")
} else {
alt cx.items.find(id.node) {
@ -60,8 +60,8 @@ fn ty_to_str(cx: &ctxt, typ: &t) -> str {
let s = proto_to_str(proto);
alt ident { some(i) { s += " "; s += i; } _ { } }
s += "(";
let strs = ~[];
for a: arg in inputs { strs += ~[fn_input_to_str(cx, a)]; }
let strs = [];
for a: arg in inputs { strs += [fn_input_to_str(cx, a)]; }
s += str::connect(strs, ", ");
s += ")";
if struct(cx, output) != ty_nil {
@ -91,60 +91,59 @@ fn ty_to_str(cx: &ctxt, typ: &t) -> str {
}
alt cname(cx, typ) { some(cs) { ret cs; } _ { } }
ret alt struct(cx, typ) {
ty_native(_) { "native" }
ty_nil. { "()" }
ty_bot. { "_|_" }
ty_bool. { "bool" }
ty_int. { "int" }
ty_float. { "float" }
ty_uint. { "uint" }
ty_machine(tm) { ty_mach_to_str(tm) }
ty_char. { "char" }
ty_str. { "str" }
ty_istr. { "istr" }
ty_box(tm) { "@" + mt_to_str(cx, tm) }
ty_uniq(t) { "~" + ty_to_str(cx, t) }
ty_vec(tm) { "[" + mt_to_str(cx, tm) + "]" }
ty_type. { "type" }
ty_rec(elems) {
let strs: [str] = ~[];
for fld: field in elems { strs += ~[field_to_str(cx, fld)]; }
"{" + str::connect(strs, ",") + "}"
}
ty_tup(elems) {
let strs = ~[];
for elem in elems { strs += ~[ty_to_str(cx, elem)]; }
"(" + str::connect(strs, ",") + ")"
}
ty_tag(id, tps) {
let s = get_id_ident(cx, id);
if vec::len::<t>(tps) > 0u {
let strs: [str] = ~[];
for typ: t in tps { strs += ~[ty_to_str(cx, typ)]; }
s += "[" + str::connect(strs, ",") + "]";
ty_native(_) { "native" }
ty_nil. { "()" }
ty_bot. { "_|_" }
ty_bool. { "bool" }
ty_int. { "int" }
ty_float. { "float" }
ty_uint. { "uint" }
ty_machine(tm) { ty_mach_to_str(tm) }
ty_char. { "char" }
ty_str. { "str" }
ty_istr. { "istr" }
ty_box(tm) { "@" + mt_to_str(cx, tm) }
ty_uniq(t) { "~" + ty_to_str(cx, t) }
ty_vec(tm) { "[" + mt_to_str(cx, tm) + "]" }
ty_type. { "type" }
ty_rec(elems) {
let strs: [str] = [];
for fld: field in elems { strs += [field_to_str(cx, fld)]; }
"{" + str::connect(strs, ",") + "}"
}
ty_tup(elems) {
let strs = [];
for elem in elems { strs += [ty_to_str(cx, elem)]; }
"(" + str::connect(strs, ",") + ")"
}
ty_tag(id, tps) {
let s = get_id_ident(cx, id);
if vec::len::<t>(tps) > 0u {
let strs: [str] = [];
for typ: t in tps { strs += [ty_to_str(cx, typ)]; }
s += "[" + str::connect(strs, ",") + "]";
}
s
}
ty_fn(proto, inputs, output, cf, constrs) {
fn_to_str(cx, proto, none, inputs, output, cf, constrs)
}
ty_native_fn(_, inputs, output) {
fn_to_str(cx, ast::proto_fn, none, inputs, output, ast::return,
[])
}
ty_obj(meths) {
let strs = [];
for m: method in meths { strs += [method_to_str(cx, m)]; }
"obj {\n\t" + str::connect(strs, "\n\t") + "\n}"
}
ty_res(id, _, _) { get_id_ident(cx, id) }
ty_var(v) { "<T" + int::str(v) + ">" }
ty_param(id, _) {
"'" + str::unsafe_from_bytes([('a' as u8) + (id as u8)])
}
_ { ty_to_short_str(cx, typ) }
}
s
}
ty_fn(proto, inputs, output, cf, constrs) {
fn_to_str(cx, proto, none, inputs, output, cf, constrs)
}
ty_native_fn(_, inputs, output) {
fn_to_str(cx, ast::proto_fn, none, inputs, output, ast::return, ~[])
}
ty_obj(meths) {
let strs = ~[];
for m: method in meths { strs += ~[method_to_str(cx, m)]; }
"obj {\n\t" + str::connect(strs, "\n\t") + "\n}"
}
ty_res(id, _, _) {
get_id_ident(cx, id)
}
ty_var(v) { "<T" + int::str(v) + ">" }
ty_param(id,_) {
"'" + str::unsafe_from_bytes(~[('a' as u8) + (id as u8)])
}
_ { ty_to_short_str(cx, typ) }
}
}
fn ty_to_short_str(cx: &ctxt, typ: t) -> str {

View file

@ -1,13 +1,13 @@
use std;
import std::vec;
fn vec_equal<T>(v: &[T], u: &[T], element_equality_test: fn(&T, &T) -> bool )
fn vec_equal<T>(v: &[T], u: &[T], element_equality_test: fn(&T, &T) -> bool)
-> bool {
let Lv = vec::len(v);
if Lv != vec::len(u) { ret false; }
let i = 0u;
while i < Lv {
if !element_equality_test(v.(i), u.(i)) { ret false; }
if !element_equality_test(v[i], u[i]) { ret false; }
i += 1u;
}
ret true;
@ -18,10 +18,10 @@ fn builtin_equal<T>(a: &T, b: &T) -> bool { ret a == b; }
fn main() {
assert (builtin_equal(5, 5));
assert (!builtin_equal(5, 4));
assert (!vec_equal(~[5, 5], ~[5], builtin_equal));
assert (!vec_equal(~[5, 5], ~[5, 4], builtin_equal));
assert (!vec_equal(~[5, 5], ~[4, 5], builtin_equal));
assert (vec_equal(~[5, 5], ~[5, 5], builtin_equal));
assert (!vec_equal([5, 5], [5], builtin_equal));
assert (!vec_equal([5, 5], [5, 4], builtin_equal));
assert (!vec_equal([5, 5], [4, 5], builtin_equal));
assert (vec_equal([5, 5], [5, 5], builtin_equal));
log_err "Pass";
}

View file

@ -21,11 +21,9 @@ import rustc::syntax::parse::parser;
import rustc::syntax::print::pprust;
fn write_file(filename: &str, content: &str) {
io::file_writer(filename,
~[io::create,
io::truncate]).write_str(content);
io::file_writer(filename, [io::create, io::truncate]).write_str(content);
// Work around https://github.com/graydon/rust/issues/726
std::run::run_program("chmod", ~["644", filename]);
std::run::run_program("chmod", ["644", filename]);
}
fn file_contains(filename: &str, needle: &str) -> bool {
@ -41,9 +39,8 @@ fn find_rust_files(files: &mutable [str], path: str) {
if str::ends_with(path, ".rs") {
if file_contains(path, "xfail-stage1") {
//log_err "Skipping " + path + " because it is marked as xfail-stage1";
} else { files += ~[path]; }
} else if (fs::file_is_dir(path) && str::find(path, "compile-fail") == -1)
{
} else { files += [path]; }
} else if fs::file_is_dir(path) && str::find(path, "compile-fail") == -1 {
for p in fs::list_dir(path) { find_rust_files(files, p); }
}
}
@ -51,6 +48,7 @@ fn find_rust_files(files: &mutable [str], path: str) {
fn safe_to_steal(e: ast::expr_) -> bool {
alt e {
// pretty-printer precedence issues -- https://github.com/graydon/rust/issues/670
ast::expr_unary(_, _) {
false
@ -73,13 +71,23 @@ fn safe_to_steal(e: ast::expr_) -> bool {
ast::expr_binary(_, _, _) { false }
ast::expr_assign(_, _) { false }
ast::expr_assign_op(_, _, _) { false }
ast::expr_fail(option::none.) { false /* https://github.com/graydon/rust/issues/764 */ }
ast::expr_fail(option::none.) {
false
/* https://github.com/graydon/rust/issues/764 */
}
ast::expr_ret(option::none.) { false }
ast::expr_put(option::none.) { false }
ast::expr_ret(_) { false /* lots of code generation issues, such as https://github.com/graydon/rust/issues/770 */ }
ast::expr_ret(_) {
false
/* lots of code generation issues, such as https://github.com/graydon/rust/issues/770 */
}
ast::expr_fail(_) { false }
_ {
true
}
@ -87,17 +95,17 @@ fn safe_to_steal(e: ast::expr_) -> bool {
}
fn steal_exprs(crate: &ast::crate) -> [ast::expr] {
let exprs: @mutable [ast::expr] = @mutable ~[];
let exprs: @mutable [ast::expr] = @mutable [];
// "Stash" is not type-parameterized because of the need for safe_to_steal
fn stash_expr(es: @mutable [ast::expr], e: &@ast::expr) {
if safe_to_steal(e.node) {
*es += ~[*e];
*es += [*e];
} else {/* now my indices are wrong :( */ }
}
let v = visit::mk_simple_visitor
(@{visit_expr: bind stash_expr(exprs, _)
with *visit::default_simple_visitor()});
visit::visit_crate(crate, (), v);
let v =
visit::mk_simple_visitor(@{visit_expr: bind stash_expr(exprs, _)
with *visit::default_simple_visitor()});
visit::visit_crate(crate, (), v);;
*exprs
}
@ -128,6 +136,7 @@ fn replace_expr_in_crate(crate: &ast::crate, i: uint, newexpr: ast::expr_) ->
let af = fold::make_fold(afp);
let crate2: @ast::crate = @af.fold_crate(crate);
fold::dummy_out(af); // work around a leak (https://github.com/graydon/rust/issues/651)
;
*crate2
}
@ -138,26 +147,28 @@ iter under(n: uint) -> uint {
fn devnull() -> io::writer { std::io::string_writer().get_writer() }
fn as_str(f: fn(io::writer) ) -> str {
fn as_str(f: fn(io::writer)) -> str {
let w = std::io::string_writer();
f(w.get_writer());
ret w.get_str();
}
fn check_variants_of_ast(crate: &ast::crate, codemap: &codemap::codemap, filename: &str) {
fn check_variants_of_ast(crate: &ast::crate, codemap: &codemap::codemap,
filename: &str) {
let exprs = steal_exprs(crate);
let exprsL = vec::len(exprs);
if (exprsL < 100u) {
if exprsL < 100u {
for each i: uint in under(uint::min(exprsL, 20u)) {
log_err "Replacing... " + pprust::expr_to_str(@exprs.(i));
log_err "Replacing... " + pprust::expr_to_str(@exprs[i]);
for each j: uint in under(uint::min(exprsL, 5u)) {
log_err "With... " + pprust::expr_to_str(@exprs.(j));
let crate2 = @replace_expr_in_crate(crate, i, exprs.(j).node);
log_err "With... " + pprust::expr_to_str(@exprs[j]);
let crate2 = @replace_expr_in_crate(crate, i, exprs[j].node);
// It would be best to test the *crate* for stability, but testing the
// string for stability is easier and ok for now.
let str3 = as_str(bind pprust::print_crate(codemap, crate2, filename,
io::string_reader(""), _,
pprust::no_ann()));
let str3 =
as_str(bind pprust::print_crate(codemap, crate2, filename,
io::string_reader(""), _,
pprust::no_ann()));
// 1u would be sane here, but the pretty-printer currently has lots of whitespace and paren issues,
// and https://github.com/graydon/rust/issues/766 is hilarious.
check_roundtrip_convergence(str3, 7u);
@ -174,37 +185,52 @@ fn check_variants_of_ast(crate: &ast::crate, codemap: &codemap::codemap, filenam
fn check_whole_compiler(code: &str) {
let filename = "test.rs";
write_file(filename, code);
let p = std::run::program_output("/Users/jruderman/code/rust/build/stage1/rustc", ~["-c", filename]);
let p =
std::run::program_output("/Users/jruderman/code/rust/build/stage1/rustc",
["-c", filename]);
//log_err #fmt("Status: %d", p.status);
//log_err "Output: " + p.out;
if p.err != "" {
if contains(p.err, "argument of incompatible type") {
log_err "https://github.com/graydon/rust/issues/769";
} else if contains(p.err, "Cannot create binary operator with two operands of differing type") {
} else if contains(p.err,
"Cannot create binary operator with two operands of differing type")
{
log_err "https://github.com/graydon/rust/issues/770";
} else if contains(p.err, "May only branch on boolean predicates!") {
log_err "https://github.com/graydon/rust/issues/770 or https://github.com/graydon/rust/issues/776";
} else if contains(p.err, "Invalid constantexpr cast!") && contains(code, "!") {
} else if contains(p.err, "Invalid constantexpr cast!") &&
contains(code, "!") {
log_err "https://github.com/graydon/rust/issues/777";
} else if contains(p.err, "Both operands to ICmp instruction are not of the same type!") && contains(code, "!") {
} else if contains(p.err,
"Both operands to ICmp instruction are not of the same type!")
&& contains(code, "!") {
log_err "https://github.com/graydon/rust/issues/777 #issuecomment-1678487";
} else if contains(p.err, "Ptr must be a pointer to Val type!") && contains(code, "!") {
} else if contains(p.err, "Ptr must be a pointer to Val type!") &&
contains(code, "!") {
log_err "https://github.com/graydon/rust/issues/779";
} else if contains(p.err, "Calling a function with bad signature!") && (contains(code, "iter") || contains(code, "range")) {
} else if contains(p.err, "Calling a function with bad signature!") &&
(contains(code, "iter") || contains(code, "range")) {
log_err "https://github.com/graydon/rust/issues/771 - calling an iter fails";
} else if contains(p.err, "Calling a function with a bad signature!") && contains(code, "empty") {
} else if contains(p.err, "Calling a function with a bad signature!")
&& contains(code, "empty") {
log_err "https://github.com/graydon/rust/issues/775 - possibly a modification of run-pass/import-glob-crate.rs";
} else if contains(p.err, "Invalid type for pointer element!") && contains(code, "put") {
} else if contains(p.err, "Invalid type for pointer element!") &&
contains(code, "put") {
log_err "https://github.com/graydon/rust/issues/773 - put put ()";
} else if contains(p.err, "pointer being freed was not allocated") && contains(p.out, "Out of stack space, sorry") {
} else if contains(p.err, "pointer being freed was not allocated") &&
contains(p.out, "Out of stack space, sorry") {
log_err "https://github.com/graydon/rust/issues/768 + https://github.com/graydon/rust/issues/778"
} else {
log_err "Stderr: " + p.err;
fail "Unfamiliar error message";
}
} else if contains(p.out, "non-exhaustive match failure") && contains(p.out, "alias.rs") {
} else if contains(p.out, "non-exhaustive match failure") &&
contains(p.out, "alias.rs") {
log_err "https://github.com/graydon/rust/issues/772";
} else if contains(p.out, "non-exhaustive match failure") && contains(p.out, "trans.rs") && contains(code, "put") {
} else if contains(p.out, "non-exhaustive match failure") &&
contains(p.out, "trans.rs") && contains(code, "put") {
log_err "https://github.com/graydon/rust/issues/774";
} else if contains(p.out, "Out of stack space, sorry") {
log_err "Possibly a variant of https://github.com/graydon/rust/issues/768";
@ -214,74 +240,64 @@ fn check_whole_compiler(code: &str) {
}
} else if p.status == 11 {
log_err "What is this I don't even";
} else if p.status != 0 {
fail "Unfamiliar status code";
}
} else if p.status != 0 { fail "Unfamiliar status code"; }
}
fn parse_and_print(code: &str) -> str {
let filename = "tmp.rs";
let sess = @{cm: codemap::new_codemap(), mutable next_id: 0};
//write_file(filename, code);
let crate =
parser::parse_crate_from_source_str(filename, code, ~[], sess);
let crate = parser::parse_crate_from_source_str(filename, code, [], sess);
ret as_str(bind pprust::print_crate(sess.cm, crate, filename,
io::string_reader(code), _,
pprust::no_ann()));
}
fn content_is_dangerous_to_modify(code: &str) -> bool {
let dangerous_patterns = [
"obj", // not safe to steal; https://github.com/graydon/rust/issues/761
let dangerous_patterns =
["obj", // not safe to steal; https://github.com/graydon/rust/issues/761
"#macro", // not safe to steal things inside of it, because they have a special syntax
"#", // strange representation of the arguments to #fmt, for example
" be ", // don't want to replace its child with a non-call: "Non-call expression in tail call"
"@" // hangs when compiling: https://github.com/graydon/rust/issues/768
];
"@"]; // hangs when compiling: https://github.com/graydon/rust/issues/768
for p: str in dangerous_patterns { if contains(code, p) { ret true; } }
ret false;
}
fn content_is_confusing(code: &str) -> bool {
let // https://github.com/graydon/rust/issues/671
// https://github.com/graydon/rust/issues/669
// https://github.com/graydon/rust/issues/669
// https://github.com/graydon/rust/issues/669
// crazy rules enforced by parser rather than typechecker?
// more precedence issues
// more precedence issues?
confusing_patterns =
fn content_is_confusing(code: &str) ->
bool { // https://github.com/graydon/rust/issues/671
// https://github.com/graydon/rust/issues/669
// https://github.com/graydon/rust/issues/669
// https://github.com/graydon/rust/issues/669
// crazy rules enforced by parser rather than typechecker?
// more precedence issues
// more precedence issues?
let confusing_patterns =
["#macro", "][]", "][mutable]", "][mutable ]", "self", "spawn",
"bind",
"\n\n\n\n\n", // https://github.com/graydon/rust/issues/759
"bind", "\n\n\n\n\n", // https://github.com/graydon/rust/issues/759
" : ", // https://github.com/graydon/rust/issues/760
"if ret",
"alt ret",
"if fail",
"alt fail"
];
"if ret", "alt ret", "if fail", "alt fail"];
for p: str in confusing_patterns { if contains(code, p) { ret true; } }
ret false;
}
fn file_is_confusing(filename: &str) -> bool {
let
// https://github.com/graydon/rust/issues/674
// https://github.com/graydon/rust/issues/674
// something to do with () as a lone pattern
// something to do with () as a lone pattern
// an issue where -2147483648 gains an
// extra negative sign each time through,
// which i can't reproduce using "rustc
// --pretty normal"???
confusing_files =
// an issue where -2147483648 gains an
// extra negative sign each time through,
// which i can't reproduce using "rustc
// --pretty normal"???
let confusing_files =
["block-expr-precedence.rs", "nil-pattern.rs",
"syntax-extension-fmt.rs",
"newtype.rs" // modifying it hits something like https://github.com/graydon/rust/issues/670
];
"newtype.rs"]; // modifying it hits something like https://github.com/graydon/rust/issues/670
for f in confusing_files { if contains(filename, f) { ret true; } }
@ -303,23 +319,25 @@ fn check_roundtrip_convergence(code: &str, maxIters: uint) {
}
if old == new {
log_err #fmt("Converged after %u iterations", i);
log_err #fmt["Converged after %u iterations", i];
} else {
log_err #fmt("Did not converge after %u iterations!", i);
log_err #fmt["Did not converge after %u iterations!", i];
write_file("round-trip-a.rs", old);
write_file("round-trip-b.rs", new);
std::run::run_program("diff", ~["-w", "-u", "round-trip-a.rs", "round-trip-b.rs"]);
std::run::run_program("diff",
["-w", "-u", "round-trip-a.rs",
"round-trip-b.rs"]);
fail "Mismatch";
}
}
fn check_convergence(files: &[str]) {
log_err #fmt("pp convergence tests: %u files", vec::len(files));
log_err #fmt["pp convergence tests: %u files", vec::len(files)];
for file in files {
if !file_is_confusing(file) {
let s = io::read_whole_file_str(file);
if !content_is_confusing(s) {
log_err #fmt("pp converge: %s", file);
log_err #fmt["pp converge: %s", file];
// Change from 7u to 2u when https://github.com/graydon/rust/issues/759 is fixed
check_roundtrip_convergence(s, 7u);
}
@ -331,13 +349,16 @@ fn check_variants(files: &[str]) {
for file in files {
if !file_is_confusing(file) {
let s = io::read_whole_file_str(file);
if content_is_dangerous_to_modify(s) || content_is_confusing(s) { cont; }
if content_is_dangerous_to_modify(s) || content_is_confusing(s) {
cont;
}
log_err "check_variants: " + file;
let sess = @{cm: codemap::new_codemap(), mutable next_id: 0};
let crate = parser::parse_crate_from_source_str(file, s, ~[], sess);
let crate =
parser::parse_crate_from_source_str(file, s, [], sess);
log_err as_str(bind pprust::print_crate(sess.cm, crate, file,
io::string_reader(s), _,
pprust::no_ann()));
io::string_reader(s), _,
pprust::no_ann()));
check_variants_of_ast(*crate, sess.cm, file);
}
}
@ -345,11 +366,11 @@ fn check_variants(files: &[str]) {
fn main(args: [str]) {
if vec::len(args) != 2u {
log_err #fmt("usage: %s <testdir>", args.(0));
log_err #fmt["usage: %s <testdir>", args[0]];
ret;
}
let files = ~[];
let root = args.(1);
let files = [];
let root = args[1];
find_rust_files(files, root);
check_convergence(files);

View file

@ -32,19 +32,19 @@ fn vec_omit<T>(v: &[T], i: uint) -> [T] {
slice(v, 0u, i) + slice(v, i + 1u, len(v))
}
fn vec_dup<T>(v: &[T], i: uint) -> [T] {
slice(v, 0u, i) + ~[v.(i)] + slice(v, i, len(v))
slice(v, 0u, i) + [v[i]] + slice(v, i, len(v))
}
fn vec_swadj<T>(v: &[T], i: uint) -> [T] {
slice(v, 0u, i) + ~[v.(i + 1u), v.(i)] + slice(v, i + 2u, len(v))
slice(v, 0u, i) + [v[i + 1u], v[i]] + slice(v, i + 2u, len(v))
}
fn vec_prefix<T>(v: &[T], i: uint) -> [T] { slice(v, 0u, i) }
fn vec_suffix<T>(v: &[T], i: uint) -> [T] { slice(v, i, len(v)) }
fn vec_poke<T>(v: &[T], i: uint, x: &T) -> [T] {
slice(v, 0u, i) + ~[x] + slice(v, i + 1u, len(v))
slice(v, 0u, i) + [x] + slice(v, i + 1u, len(v))
}
fn vec_insert<T>(v: &[T], i: uint, x: &T) -> [T] {
slice(v, 0u, i) + ~[x] + slice(v, i, len(v))
slice(v, 0u, i) + [x] + slice(v, i, len(v))
}
// Iterates over 0...length, skipping the specified number on each side.
@ -55,28 +55,28 @@ iter ix(skip_low: uint, skip_high: uint, length: uint) -> uint {
// Returns a bunch of modified versions of v, some of which introduce new elements (borrowed from xs).
fn vec_edits<T>(v: &[T], xs: &[T]) -> [[T]] {
let edits: [[T]] = ~[];
let edits: [[T]] = [];
let Lv: uint = len(v);
if Lv != 1u {
edits +=
~[~[]]; // When Lv == 1u, this is redundant with omit
//if (Lv >= 3u) { edits += ~[vec_reverse(v)]; }
[[]]; // When Lv == 1u, this is redundant with omit
//if (Lv >= 3u) { edits += ~[vec_reverse(v)]; }
}
for each i: uint in ix(0u, 1u, Lv) { edits += ~[vec_omit(v, i)]; }
for each i: uint in ix(0u, 1u, Lv) { edits += ~[vec_dup(v, i)]; }
for each i: uint in ix(0u, 2u, Lv) { edits += ~[vec_swadj(v, i)]; }
for each i: uint in ix(1u, 2u, Lv) { edits += ~[vec_prefix(v, i)]; }
for each i: uint in ix(2u, 1u, Lv) { edits += ~[vec_suffix(v, i)]; }
for each i: uint in ix(0u, 1u, Lv) { edits += [vec_omit(v, i)]; }
for each i: uint in ix(0u, 1u, Lv) { edits += [vec_dup(v, i)]; }
for each i: uint in ix(0u, 2u, Lv) { edits += [vec_swadj(v, i)]; }
for each i: uint in ix(1u, 2u, Lv) { edits += [vec_prefix(v, i)]; }
for each i: uint in ix(2u, 1u, Lv) { edits += [vec_suffix(v, i)]; }
for each j: uint in ix(0u, 1u, len(xs)) {
for each i: uint in ix(0u, 1u, Lv) {
edits += ~[vec_poke(v, i, xs.(j))];
edits += [vec_poke(v, i, xs[j])];
}
for each i: uint in ix(0u, 0u, Lv) {
edits += ~[vec_insert(v, i, xs.(j))];
edits += [vec_insert(v, i, xs[j])];
}
}
@ -89,7 +89,7 @@ fn vec_to_str(v: &[int]) -> str {
let i = 0u;
let s = "[";
while i < len(v) {
s += int::str(v.(i));
s += int::str(v[i]);
if i + 1u < len(v) { s += ", " }
i += 1u;
}
@ -99,16 +99,16 @@ fn vec_to_str(v: &[int]) -> str {
fn show_edits(a: &[int], xs: &[int]) {
log_err "=== Edits of " + vec_to_str(a) + " ===";
let b = vec_edits(a, xs);
for each i: uint in ix(0u, 1u, len(b)) { log_err vec_to_str(b.(i)); }
for each i: uint in ix(0u, 1u, len(b)) { log_err vec_to_str(b[i]); }
}
fn demo_edits() {
let xs = ~[7, 8];
show_edits(~[], xs);
show_edits(~[1], xs);
show_edits(~[1, 2], xs);
show_edits(~[1, 2, 3], xs);
show_edits(~[1, 2, 3, 4], xs);
let xs = [7, 8];
show_edits([], xs);
show_edits([1], xs);
show_edits([1, 2], xs);
show_edits([1, 2, 3], xs);
show_edits([1, 2, 3, 4], xs);
}
fn main() { demo_edits(); }

View file

@ -28,26 +28,17 @@ native "rust" mod rustrt {
// currently in the sendable kind, so we'll unsafely cast between ints.
type server = rustrt::server;
type client = rustrt::socket;
tag pending_connection {
remote(net::ip_addr,int);
incoming(server);
}
tag pending_connection { remote(net::ip_addr, int); incoming(server); }
tag socket_event {
connected(client);
closed;
received([u8]);
}
tag socket_event { connected(client); closed; received([u8]); }
tag server_event {
pending(_chan<_chan<socket_event>>);
}
tag server_event { pending(_chan<_chan<socket_event>>); }
tag request {
quit;
connect(pending_connection,_chan<socket_event>);
serve(net::ip_addr,int,_chan<server_event>,_chan<server>);
write(client,[u8],_chan<bool>);
connect(pending_connection, _chan<socket_event>);
serve(net::ip_addr, int, _chan<server_event>, _chan<server>);
write(client, [u8], _chan<bool>);
close_server(server, _chan<bool>);
close_client(client);
}
@ -74,12 +65,12 @@ fn new_client(client: client, evt: _chan<socket_event>) {
send(evt, connected(client));
while (true) {
while true {
log "waiting for bytes";
let data: [u8] = reader.recv();
log "got some bytes";
log vec::len::<u8>(data);
if (vec::len::<u8>(data) == 0u) {
if vec::len::<u8>(data) == 0u {
log "got empty buffer, bailing";
break;
}
@ -104,19 +95,17 @@ fn accept_task(client: client, events: _chan<server_event>) {
fn server_task(ip: net::ip_addr, portnum: int, events: _chan<server_event>,
server: _chan<server>) {
let accepter: _port<client> = mk_port();
send(server, rustrt::aio_serve(ip_to_sbuf(ip), portnum,
accepter.mk_chan()));
send(server,
rustrt::aio_serve(ip_to_sbuf(ip), portnum, accepter.mk_chan()));
let client: client;
while (true) {
while true {
log "preparing to accept a client";
client = accepter.recv();
if (rustrt::aio_is_null_client(client)) {
log "client was actually null, returning";
ret;
} else {
task::_spawn(bind accept_task(client, events));
}
if rustrt::aio_is_null_client(client) {
log "client was actually null, returning";
ret;
} else { task::_spawn(bind accept_task(client, events)); }
}
}
@ -128,7 +117,7 @@ fn request_task(c: _chan<ctx>) {
log "uv run task spawned";
// Spin for requests
let req: request;
while (true) {
while true {
req = p.recv();
alt req {
quit. {
@ -137,20 +126,19 @@ fn request_task(c: _chan<ctx>) {
rustrt::aio_stop();
ret;
}
connect(remote(ip,portnum),client) {
connect(remote(ip, portnum), client) {
task::_spawn(bind connect_task(ip, portnum, client));
}
serve(ip,portnum,events,server) {
serve(ip, portnum, events, server) {
task::_spawn(bind server_task(ip, portnum, events, server));
}
write(socket,v,status) {
rustrt::aio_writedata(socket,
vec::to_ptr::<u8>(v), vec::len::<u8>(v),
status);
write(socket, v, status) {
rustrt::aio_writedata(socket, vec::to_ptr::<u8>(v),
vec::len::<u8>(v), status);
}
close_server(server,status) {
close_server(server, status) {
log "closing server";
rustrt::aio_close_server(server,status);
rustrt::aio_close_server(server, status);
}
close_client(client) {
log "closing client";

View file

@ -35,16 +35,16 @@ fn create(nbits: uint, init: bool) -> t {
ret @{storage: storage, nbits: nbits};
}
fn process(op: &block(uint, uint) -> uint , v0: &t, v1: &t) -> bool {
fn process(op: &block(uint, uint) -> uint, v0: &t, v1: &t) -> bool {
let len = vec::len(v1.storage);
assert (vec::len(v0.storage) == len);
assert (v0.nbits == v1.nbits);
let changed = false;
for each i: uint in uint::range(0u, len) {
let w0 = v0.storage.(i);
let w1 = v1.storage.(i);
let w0 = v0.storage[i];
let w1 = v1.storage[i];
let w = op(w0, w1);
if w0 != w { changed = true; v0.storage.(i) = w; }
if w0 != w { changed = true; v0.storage[i] = w; }
}
ret changed;
}
@ -70,7 +70,7 @@ fn assign(v0: &t, v1: t) -> bool {
fn clone(v: t) -> t {
let storage = vec::init_elt_mut::<uint>(0u, v.nbits / uint_bits() + 1u);
let len = vec::len(v.storage);
for each i: uint in uint::range(0u, len) { storage.(i) = v.storage.(i); }
for each i: uint in uint::range(0u, len) { storage[i] = v.storage[i]; }
ret @{storage: storage, nbits: v.nbits};
}
@ -79,7 +79,7 @@ fn get(v: &t, i: uint) -> bool {
let bits = uint_bits();
let w = i / bits;
let b = i % bits;
let x = 1u & v.storage.(w) >> b;
let x = 1u & v.storage[w] >> b;
ret x == 1u;
}
@ -90,7 +90,7 @@ fn equal(v0: &t, v1: &t) -> bool {
let len = vec::len(v1.storage);
let i = 0u;
while i < len {
if v0.storage.(i) != v1.storage.(i) { ret false; }
if v0.storage[i] != v1.storage[i] { ret false; }
i = i + 1u;
}
ret true;
@ -98,7 +98,7 @@ fn equal(v0: &t, v1: &t) -> bool {
fn clear(v: &t) {
for each i: uint in uint::range(0u, vec::len(v.storage)) {
v.storage.(i) = 0u;
v.storage[i] = 0u;
}
}
@ -108,7 +108,7 @@ fn set_all(v: &t) {
fn invert(v: &t) {
for each i: uint in uint::range(0u, vec::len(v.storage)) {
v.storage.(i) = !v.storage.(i);
v.storage[i] = !v.storage[i];
}
}
@ -127,8 +127,7 @@ fn set(v: &t, i: uint, x: bool) {
let w = i / bits;
let b = i % bits;
let flag = 1u << b;
v.storage.(w) =
if x { v.storage.(w) | flag } else { v.storage.(w) & !flag };
v.storage[w] = if x { v.storage[w] | flag } else { v.storage[w] & !flag };
}
@ -154,9 +153,7 @@ fn to_vec(v: &t) -> [uint] {
fn to_str(v: &t) -> str {
let rs = "";
for i: uint in to_vec(v) {
if i == 1u { rs += "1"; } else { rs += "0"; }
}
for i: uint in to_vec(v) { if i == 1u { rs += "1"; } else { rs += "0"; } }
ret rs;
}
@ -166,7 +163,7 @@ fn eq_vec(v0: &t, v1: &[uint]) -> bool {
let i = 0u;
while i < len {
let w0 = get(v0, i);
let w1 = v1.(i);
let w1 = v1[i];
if !w0 && w1 != 0u || w0 && w1 == 0u { ret false; }
i = i + 1u;
}

View file

@ -26,31 +26,55 @@ pred is_whitespace(c: char) -> bool {
const ch_next_line: char = '\u0085';
const ch_no_break_space: char = '\u00a0';
if c == ch_space { true }
else if c == ch_ogham_space_mark { true }
else if c == ch_mongolian_vowel_sep { true }
else if c == ch_en_quad { true }
else if c == ch_em_quad { true }
else if c == ch_en_space { true }
else if c == ch_em_space { true }
else if c == ch_three_per_em_space { true }
else if c == ch_four_per_em_space { true }
else if c == ch_six_per_em_space { true }
else if c == ch_figure_space { true }
else if c == ch_punctuation_space { true }
else if c == ch_thin_space { true }
else if c == ch_hair_space { true }
else if c == ch_narrow_no_break_space { true }
else if c == ch_medium_mathematical_space { true }
else if c == ch_ideographic_space { true }
else if c == ch_line_tabulation { true }
else if c == ch_paragraph_separator { true }
else if c == ch_character_tabulation { true }
else if c == ch_line_feed { true }
else if c == ch_line_tabulation { true }
else if c == ch_form_feed { true }
else if c == ch_carriage_return { true }
else if c == ch_next_line { true }
else if c == ch_no_break_space { true }
else { false }
}
if c == ch_space {
true
} else if c == ch_ogham_space_mark {
true
} else if c == ch_mongolian_vowel_sep {
true
} else if c == ch_en_quad {
true
} else if c == ch_em_quad {
true
} else if c == ch_en_space {
true
} else if c == ch_em_space {
true
} else if c == ch_three_per_em_space {
true
} else if c == ch_four_per_em_space {
true
} else if c == ch_six_per_em_space {
true
} else if c == ch_figure_space {
true
} else if c == ch_punctuation_space {
true
} else if c == ch_thin_space {
true
} else if c == ch_hair_space {
true
} else if c == ch_narrow_no_break_space {
true
} else if c == ch_medium_mathematical_space {
true
} else if c == ch_ideographic_space {
true
} else if c == ch_line_tabulation {
true
} else if c == ch_paragraph_separator {
true
} else if c == ch_character_tabulation {
true
} else if c == ch_line_feed {
true
} else if c == ch_line_tabulation {
true
} else if c == ch_form_feed {
true
} else if c == ch_carriage_return {
true
} else if c == ch_next_line {
true
} else if c == ch_no_break_space { true } else { false }
}

View file

@ -17,22 +17,21 @@ native "rust" mod rustrt {
type void;
type rust_port;
fn chan_id_send<~T>(target_task : task_id, target_port : port_id,
data : -T);
fn chan_id_send<~T>(target_task: task_id, target_port: port_id, data: -T);
fn new_port(unit_sz : uint) -> *rust_port;
fn del_port(po : *rust_port);
fn drop_port(po : *rust_port);
fn get_port_id(po : *rust_port) -> port_id;
fn new_port(unit_sz: uint) -> *rust_port;
fn del_port(po: *rust_port);
fn drop_port(po: *rust_port);
fn get_port_id(po: *rust_port) -> port_id;
}
native "rust-intrinsic" mod rusti {
fn recv<~T>(port : *rustrt::rust_port) -> T;
fn recv<~T>(port: *rustrt::rust_port) -> T;
}
type port_id = int;
type chan_handle<~T> = { task : task_id, port : port_id};
type chan_handle<~T> = {task: task_id, port: port_id};
tag chan<~T> { chan_t(chan_handle<T>); }
type _chan<~T> = chan<T>;
@ -44,22 +43,16 @@ resource port_ptr(po: *rustrt::rust_port) {
tag port<~T> { port_t(@port_ptr); }
obj port_obj<~T>(raw_port : port<T>) {
fn mk_chan() -> chan<T> {
chan(raw_port)
}
obj port_obj<~T>(raw_port: port<T>) {
fn mk_chan() -> chan<T> { chan(raw_port) }
fn recv() -> T {
recv(raw_port)
}
fn recv() -> T { recv(raw_port) }
}
type _port<~T> = port_obj<T>;
fn mk_port<~T>() -> _port<T> {
ret port_obj::<T>(port::<T>());
}
fn mk_port<~T>() -> _port<T> { ret port_obj::<T>(port::<T>()); }
fn send<~T>(ch : &chan<T>, data : -T) {
fn send<~T>(ch: &chan<T>, data: -T) {
rustrt::chan_id_send(ch.task, ch.port, data);
}
@ -67,13 +60,8 @@ fn port<~T>() -> port<T> {
port_t(@port_ptr(rustrt::new_port(sys::size_of::<T>())))
}
fn recv<~T>(p : &port<T>) -> T {
ret rusti::recv(***p)
}
fn recv<~T>(p: &port<T>) -> T { ret rusti::recv(***p) }
fn chan<~T>(p : &port<T>) -> chan<T> {
chan_t({
task: task::get_task_id(),
port: rustrt::get_port_id(***p)
})
fn chan<~T>(p: &port<T>) -> chan<T> {
chan_t({task: task::get_task_id(), port: rustrt::get_port_id(***p)})
}

View file

@ -6,14 +6,14 @@
*/
type t<T> =
obj {
fn size() -> uint ;
fn add_front(&T) ;
fn add_back(&T) ;
fn pop_front() -> T ;
fn pop_back() -> T ;
fn peek_front() -> T ;
fn peek_back() -> T ;
fn get(int) -> T ;
fn size() -> uint;
fn add_front(&T);
fn add_back(&T);
fn pop_front() -> T;
fn pop_back() -> T;
fn peek_front() -> T;
fn peek_back() -> T;
fn get(int) -> T;
};
fn create<@T>() -> t<T> {
@ -26,24 +26,25 @@ fn create<@T>() -> t<T> {
*/
fn grow<@T>(nelts: uint, lo: uint, elts: &[mutable cell<T>]) ->
[mutable cell<T>] {
assert (nelts == vec::len(elts));
let rv = ~[mutable];
let rv = [mutable];
let i = 0u;
let nalloc = uint::next_power_of_two(nelts + 1u);
while i < nalloc {
if i < nelts {
rv += ~[mutable elts.((lo + i) % nelts)];
} else { rv += ~[mutable option::none]; }
rv += [mutable elts[(lo + i) % nelts]];
} else { rv += [mutable option::none]; }
i += 1u;
}
ret rv;
}
fn get<@T>(elts: &[mutable cell<T>], i: uint) -> T {
ret alt elts.(i) { option::some(t) { t } _ { fail } };
ret alt elts[i] { option::some(t) { t } _ { fail } };
}
obj deque<@T>(mutable nelts: uint,
mutable lo: uint,
@ -60,7 +61,7 @@ fn create<@T>() -> t<T> {
lo = vec::len::<cell<T>>(elts) - 1u;
hi = nelts;
}
elts.(lo) = option::some::<T>(t);
elts[lo] = option::some::<T>(t);
nelts += 1u;
}
fn add_back(t: &T) {
@ -69,7 +70,7 @@ fn create<@T>() -> t<T> {
lo = 0u;
hi = nelts;
}
elts.(hi) = option::some::<T>(t);
elts[hi] = option::some::<T>(t);
hi = (hi + 1u) % vec::len::<cell<T>>(elts);
nelts += 1u;
}
@ -80,7 +81,7 @@ fn create<@T>() -> t<T> {
*/
fn pop_front() -> T {
let t: T = get::<T>(elts, lo);
elts.(lo) = option::none::<T>;
elts[lo] = option::none::<T>;
lo = (lo + 1u) % vec::len::<cell<T>>(elts);
nelts -= 1u;
ret t;
@ -90,7 +91,7 @@ fn create<@T>() -> t<T> {
hi = vec::len::<cell<T>>(elts) - 1u;
} else { hi -= 1u; }
let t: T = get::<T>(elts, hi);
elts.(hi) = option::none::<T>;
elts[hi] = option::none::<T>;
nelts -= 1u;
ret t;
}

View file

@ -18,23 +18,23 @@ type ebml_state = {ebml_tag: ebml_tag, tag_pos: uint, data_pos: uint};
type doc = {data: @[u8], start: uint, end: uint};
fn vint_at(data: &[u8], start: uint) -> {val: uint, next: uint} {
let a = data.(start);
let a = data[start];
if a & 0x80u8 != 0u8 { ret {val: a & 0x7fu8 as uint, next: start + 1u}; }
if a & 0x40u8 != 0u8 {
ret {val: (a & 0x3fu8 as uint) << 8u | (data.(start + 1u) as uint),
ret {val: (a & 0x3fu8 as uint) << 8u | (data[start + 1u] as uint),
next: start + 2u};
} else if (a & 0x20u8 != 0u8) {
} else if a & 0x20u8 != 0u8 {
ret {val:
(a & 0x1fu8 as uint) << 16u |
(data.(start + 1u) as uint) << 8u |
(data.(start + 2u) as uint),
(data[start + 1u] as uint) << 8u |
(data[start + 2u] as uint),
next: start + 3u};
} else if (a & 0x10u8 != 0u8) {
} else if a & 0x10u8 != 0u8 {
ret {val:
(a & 0x0fu8 as uint) << 24u |
(data.(start + 1u) as uint) << 16u |
(data.(start + 2u) as uint) << 8u |
(data.(start + 3u) as uint),
(data[start + 1u] as uint) << 16u |
(data[start + 2u] as uint) << 8u |
(data[start + 3u] as uint),
next: start + 4u};
} else { log_err "vint too big"; fail; }
}
@ -105,7 +105,7 @@ fn be_uint_from_bytes(data: &@[u8], start: uint, size: uint) -> uint {
let pos = start;
while sz > 0u {
sz -= 1u;
val += (data.(pos) as uint) << sz * 8u;
val += (data[pos] as uint) << sz * 8u;
pos += 1u;
}
ret val;
@ -122,17 +122,17 @@ type writer = {writer: io::buf_writer, mutable size_positions: [uint]};
fn write_sized_vint(w: &io::buf_writer, n: uint, size: uint) {
let buf: [u8];
alt size {
1u { buf = ~[0x80u8 | (n as u8)]; }
2u { buf = ~[0x40u8 | (n >> 8u as u8), n & 0xffu as u8]; }
1u { buf = [0x80u8 | (n as u8)]; }
2u { buf = [0x40u8 | (n >> 8u as u8), n & 0xffu as u8]; }
3u {
buf =
~[0x20u8 | (n >> 16u as u8), n >> 8u & 0xffu as u8,
n & 0xffu as u8];
[0x20u8 | (n >> 16u as u8), n >> 8u & 0xffu as u8,
n & 0xffu as u8];
}
4u {
buf =
~[0x10u8 | (n >> 24u as u8), n >> 16u & 0xffu as u8,
n >> 8u & 0xffu as u8, n & 0xffu as u8];
[0x10u8 | (n >> 24u as u8), n >> 16u & 0xffu as u8,
n >> 8u & 0xffu as u8, n & 0xffu as u8];
}
_ { log_err "vint to write too big"; fail; }
}
@ -149,7 +149,7 @@ fn write_vint(w: &io::buf_writer, n: uint) {
}
fn create_writer(w: &io::buf_writer) -> writer {
let size_positions: [uint] = ~[];
let size_positions: [uint] = [];
ret {writer: w, mutable size_positions: size_positions};
}
@ -161,8 +161,8 @@ fn start_tag(w: &writer, tag_id: uint) {
write_vint(w.writer, tag_id);
// Write a placeholder four-byte size.
w.size_positions += ~[w.writer.tell()];
let zeroes: [u8] = ~[0u8, 0u8, 0u8, 0u8];
w.size_positions += [w.writer.tell()];
let zeroes: [u8] = [0u8, 0u8, 0u8, 0u8];
w.writer.write(zeroes);
}

View file

@ -5,32 +5,33 @@ import option::none;
tag t<T, U> { left(T); right(U); }
fn either<T, U, V>(f_left: &block(&T) -> V, f_right: &block(&U) -> V,
value: &t<T, U>) -> V {
fn either<T, U,
V>(f_left: &block(&T) -> V, f_right: &block(&U) -> V,
value: &t<T, U>) -> V {
alt value { left(l) { f_left(l) } right(r) { f_right(r) } }
}
fn lefts<T, U>(eithers: &[t<T, U>]) -> [T] {
let result: [T] = ~[];
let result: [T] = [];
for elt: t<T, U> in eithers {
alt elt { left(l) { result += ~[l] } _ {/* fallthrough */ } }
alt elt { left(l) { result += [l] } _ {/* fallthrough */ } }
}
ret result;
}
fn rights<T, U>(eithers: &[t<T, U>]) -> [U] {
let result: [U] = ~[];
let result: [U] = [];
for elt: t<T, U> in eithers {
alt elt { right(r) { result += ~[r] } _ {/* fallthrough */ } }
alt elt { right(r) { result += [r] } _ {/* fallthrough */ } }
}
ret result;
}
fn partition<T, U>(eithers: &[t<T, U>]) -> {lefts: [T], rights: [U]} {
let lefts: [T] = ~[];
let rights: [U] = ~[];
let lefts: [T] = [];
let rights: [U] = [];
for elt: t<T, U> in eithers {
alt elt { left(l) { lefts += ~[l] } right(r) { rights += ~[r] } }
alt elt { left(l) { lefts += [l] } right(r) { rights += [r] } }
}
ret {lefts: lefts, rights: rights};
}

View file

@ -69,16 +69,16 @@ mod ct {
// A fragment of the output sequence
tag piece { piece_string(str); piece_conv(conv); }
type error_fn = fn(str) -> ! ;
type error_fn = fn(str) -> ! ;
fn parse_fmt_string(s: str, error: error_fn) -> [piece] {
let pieces: [piece] = ~[];
let pieces: [piece] = [];
let lim = str::byte_len(s);
let buf = "";
fn flush_buf(buf: str, pieces: &mutable [piece]) -> str {
if str::byte_len(buf) > 0u {
let piece = piece_string(buf);
pieces += ~[piece];
pieces += [piece];
}
ret "";
}
@ -96,7 +96,7 @@ mod ct {
} else {
buf = flush_buf(buf, pieces);
let rs = parse_conversion(s, i, lim, error);
pieces += ~[rs.piece];
pieces += [rs.piece];
i = rs.next;
}
} else { buf += curr; i += 1u; }
@ -107,7 +107,7 @@ mod ct {
fn peek_num(s: str, i: uint, lim: uint) ->
option::t<{num: uint, next: uint}> {
if i >= lim { ret none; }
let c = s.(i);
let c = s[i];
if !('0' as u8 <= c && c <= '9' as u8) { ret option::none; }
let n = c - ('0' as u8) as uint;
ret alt peek_num(s, i + 1u, lim) {
@ -143,7 +143,7 @@ mod ct {
some(t) {
let n = t.num;
let j = t.next;
if j < lim && s.(j) == '$' as u8 {
if j < lim && s[j] == '$' as u8 {
{param: some(n as int), next: j + 1u}
} else { {param: none, next: i} }
}
@ -151,7 +151,7 @@ mod ct {
}
fn parse_flags(s: str, i: uint, lim: uint) ->
{flags: [flag], next: uint} {
let noflags: [flag] = ~[];
let noflags: [flag] = [];
if i >= lim { ret {flags: noflags, next: i}; }
// FIXME: This recursion generates illegal instructions if the return
@ -161,27 +161,27 @@ mod ct {
let next = parse_flags(s, i + 1u, lim);
let rest = next.flags;
let j = next.next;
let curr: [flag] = ~[f];
let curr: [flag] = [f];
ret @{flags: curr + rest, next: j};
}
let more = bind more_(_, s, i, lim);
let f = s.(i);
let f = s[i];
ret if f == '-' as u8 {
*more(flag_left_justify)
} else if (f == '0' as u8) {
} else if f == '0' as u8 {
*more(flag_left_zero_pad)
} else if (f == ' ' as u8) {
} else if f == ' ' as u8 {
*more(flag_space_for_sign)
} else if (f == '+' as u8) {
} else if f == '+' as u8 {
*more(flag_sign_always)
} else if (f == '#' as u8) {
} else if f == '#' as u8 {
*more(flag_alternate)
} else { {flags: noflags, next: i} };
}
fn parse_count(s: str, i: uint, lim: uint) -> {count: count, next: uint} {
ret if i >= lim {
{count: count_implied, next: i}
} else if (s.(i) == '*' as u8) {
} else if s[i] == '*' as u8 {
let param = parse_parameter(s, i + 1u, lim);
let j = param.next;
alt param.param {
@ -202,7 +202,7 @@ mod ct {
{count: count, next: uint} {
ret if i >= lim {
{count: count_implied, next: i}
} else if (s.(i) == '.' as u8) {
} else if s[i] == '.' as u8 {
let count = parse_count(s, i + 1u, lim);
@ -223,21 +223,21 @@ mod ct {
let t =
if str::eq(tstr, "b") {
ty_bool
} else if (str::eq(tstr, "s")) {
} else if str::eq(tstr, "s") {
ty_str
} else if (str::eq(tstr, "c")) {
} else if str::eq(tstr, "c") {
ty_char
} else if (str::eq(tstr, "d") || str::eq(tstr, "i")) {
} else if str::eq(tstr, "d") || str::eq(tstr, "i") {
ty_int(signed)
} else if (str::eq(tstr, "u")) {
} else if str::eq(tstr, "u") {
ty_int(unsigned)
} else if (str::eq(tstr, "x")) {
} else if str::eq(tstr, "x") {
ty_hex(case_lower)
} else if (str::eq(tstr, "X")) {
} else if str::eq(tstr, "X") {
ty_hex(case_upper)
} else if (str::eq(tstr, "t")) {
} else if str::eq(tstr, "t") {
ty_bits
} else if (str::eq(tstr, "o")) {
} else if str::eq(tstr, "o") {
ty_octal
} else { error("unknown type in conversion: " + tstr) };
ret {ty: t, next: i + 1u};
@ -277,7 +277,7 @@ mod rt {
if 0 <= i {
if have_flag(cv.flags, flag_sign_always) {
s = "+" + s;
} else if (have_flag(cv.flags, flag_space_for_sign)) {
} else if have_flag(cv.flags, flag_space_for_sign) {
s = " " + s;
}
}
@ -404,9 +404,9 @@ mod rt {
// instead.
if signed && zero_padding && str::byte_len(s) > 0u {
let head = s.(0);
let head = s[0];
if head == '+' as u8 || head == '-' as u8 || head == ' ' as u8 {
let headstr = str::unsafe_from_bytes(~[head]);
let headstr = str::unsafe_from_bytes([head]);
let bytelen = str::byte_len(s);
let numpart = str::substr(s, 1u, bytelen - 1u);
ret headstr + padstr + numpart;

View file

@ -35,7 +35,7 @@ fn basename(p: path) -> path {
// FIXME: Need some typestate to avoid bounds check when len(pre) == 0
fn connect(pre: path, post: path) -> path {
let len = str::byte_len(pre);
ret if pre.(len - 1u) == os_fs::path_sep as u8 {
ret if pre[len - 1u] == os_fs::path_sep as u8 {
// Trailing '/'?
pre + post
@ -46,11 +46,11 @@ fn file_is_dir(p: path) -> bool { ret rustrt::rust_file_is_dir(p) != 0; }
fn list_dir(p: path) -> [str] {
let pl = str::byte_len(p);
if pl == 0u || p.(pl - 1u) as char != os_fs::path_sep { p += path_sep(); }
let full_paths: [str] = ~[];
if pl == 0u || p[pl - 1u] as char != os_fs::path_sep { p += path_sep(); }
let full_paths: [str] = [];
for filename: str in os_fs::list_dir(p) {
if !str::eq(filename, ".") {
if !str::eq(filename, "..") { full_paths += ~[p + filename]; }
if !str::eq(filename, "..") { full_paths += [p + filename]; }
}
}
ret full_paths;

View file

@ -28,7 +28,7 @@ fn getenv(n: str) -> option::t<str> {
let res = os::kernel32::GetEnvironmentVariableA(nbuf, vbuf, nsize);
if res == 0u {
ret option::none;
} else if (res < nsize) {
} else if res < nsize {
ret option::some(str::str_from_cstr(vbuf));
} else { nsize = res; }
}

View file

@ -68,7 +68,7 @@ tag optval { val(str); given; }
type match = {opts: [opt], vals: [mutable [optval]], free: [str]};
fn is_arg(arg: str) -> bool {
ret str::byte_len(arg) > 1u && arg.(0) == '-' as u8;
ret str::byte_len(arg) > 1u && arg[0] == '-' as u8;
}
fn name_str(nm: name) -> str {
@ -78,7 +78,7 @@ fn name_str(nm: name) -> str {
fn find_opt(opts: &[opt], nm: name) -> option::t<uint> {
let i = 0u;
let l = vec::len::<opt>(opts);
while i < l { if opts.(i).name == nm { ret some::<uint>(i); } i += 1u; }
while i < l { if opts[i].name == nm { ret some::<uint>(i); } i += 1u; }
ret none::<uint>;
}
@ -108,30 +108,30 @@ tag result { success(match); failure(fail_); }
fn getopts(args: &[str], opts: &[opt]) -> result {
let n_opts = vec::len::<opt>(opts);
fn f(_x: uint) -> [optval] { ret ~[]; }
fn f(_x: uint) -> [optval] { ret []; }
let vals = vec::init_fn_mut::<[optval]>(f, n_opts);
let free: [str] = ~[];
let free: [str] = [];
let l = vec::len::<str>(args);
let i = 0u;
while i < l {
let cur = args.(i);
let cur = args[i];
let curlen = str::byte_len(cur);
if !is_arg(cur) {
free += ~[cur];
} else if (str::eq(cur, "--")) {
free += [cur];
} else if str::eq(cur, "--") {
let j = i + 1u;
while j < l { free += ~[args.(j)]; j += 1u; }
while j < l { free += [args[j]]; j += 1u; }
break;
} else {
let names;
let i_arg = option::none::<str>;
if cur.(1) == '-' as u8 {
if cur[1] == '-' as u8 {
let tail = str::slice(cur, 2u, curlen);
let eq = str::index(tail, '=' as u8);
if eq == -1 {
names = ~[long(tail)];
names = [long(tail)];
} else {
names = ~[long(str::slice(tail, 0u, eq as uint))];
names = [long(str::slice(tail, 0u, eq as uint))];
i_arg =
option::some::<str>(str::slice(tail,
(eq as uint) + 1u,
@ -139,10 +139,10 @@ fn getopts(args: &[str], opts: &[opt]) -> result {
}
} else {
let j = 1u;
names = ~[];
names = [];
while j < curlen {
let range = str::char_range_at(cur, j);
names += ~[short(range.ch)];
names += [short(range.ch)];
j = range.next;
}
}
@ -154,27 +154,27 @@ fn getopts(args: &[str], opts: &[opt]) -> result {
some(id) { optid = id; }
none. { ret failure(unrecognized_option(name_str(nm))); }
}
alt opts.(optid).hasarg {
alt opts[optid].hasarg {
no. {
if !option::is_none::<str>(i_arg) {
ret failure(unexpected_argument(name_str(nm)));
}
vals.(optid) += ~[given];
vals[optid] += [given];
}
maybe. {
if !option::is_none::<str>(i_arg) {
vals.(optid) += ~[val(option::get(i_arg))];
} else if (name_pos < vec::len::<name>(names) ||
i + 1u == l || is_arg(args.(i + 1u))) {
vals.(optid) += ~[given];
} else { i += 1u; vals.(optid) += ~[val(args.(i))]; }
vals[optid] += [val(option::get(i_arg))];
} else if name_pos < vec::len::<name>(names) ||
i + 1u == l || is_arg(args[i + 1u]) {
vals[optid] += [given];
} else { i += 1u; vals[optid] += [val(args[i])]; }
}
yes. {
if !option::is_none::<str>(i_arg) {
vals.(optid) += ~[val(option::get::<str>(i_arg))];
} else if (i + 1u == l) {
vals[optid] += [val(option::get::<str>(i_arg))];
} else if i + 1u == l {
ret failure(argument_missing(name_str(nm)));
} else { i += 1u; vals.(optid) += ~[val(args.(i))]; }
} else { i += 1u; vals[optid] += [val(args[i])]; }
}
}
}
@ -183,16 +183,16 @@ fn getopts(args: &[str], opts: &[opt]) -> result {
}
i = 0u;
while i < n_opts {
let n = vec::len::<optval>(vals.(i));
let occ = opts.(i).occur;
let n = vec::len::<optval>(vals[i]);
let occ = opts[i].occur;
if occ == req {
if n == 0u {
ret failure(option_missing(name_str(opts.(i).name)));
ret failure(option_missing(name_str(opts[i].name)));
}
}
if occ != multi {
if n > 1u {
ret failure(option_duplicated(name_str(opts.(i).name)));
ret failure(option_duplicated(name_str(opts[i].name)));
}
}
i += 1u;
@ -202,12 +202,12 @@ fn getopts(args: &[str], opts: &[opt]) -> result {
fn opt_vals(m: &match, nm: str) -> [optval] {
ret alt find_opt(m.opts, mkname(nm)) {
some(id) { m.vals.(id) }
some(id) { m.vals[id] }
none. { log_err "No option '" + nm + "' defined."; fail }
};
}
fn opt_val(m: &match, nm: str) -> optval { ret opt_vals(m, nm).(0); }
fn opt_val(m: &match, nm: str) -> optval { ret opt_vals(m, nm)[0]; }
fn opt_present(m: &match, nm: str) -> bool {
ret vec::len::<optval>(opt_vals(m, nm)) > 0u;
@ -218,9 +218,9 @@ fn opt_str(m: &match, nm: str) -> str {
}
fn opt_strs(m: &match, nm: str) -> [str] {
let acc: [str] = ~[];
let acc: [str] = [];
for v: optval in opt_vals(m, nm) {
alt v { val(s) { acc += ~[s]; } _ { } }
alt v { val(s) { acc += [s]; } _ { } }
}
ret acc;
}
@ -228,7 +228,7 @@ fn opt_strs(m: &match, nm: str) -> [str] {
fn opt_maybe_str(m: &match, nm: str) -> option::t<str> {
let vals = opt_vals(m, nm);
if vec::len::<optval>(vals) == 0u { ret none::<str>; }
ret alt vals.(0) { val(s) { some::<str>(s) } _ { none::<str> } };
ret alt vals[0] { val(s) { some::<str>(s) } _ { none::<str> } };
}
@ -238,7 +238,7 @@ fn opt_maybe_str(m: &match, nm: str) -> option::t<str> {
fn opt_default(m: &match, nm: str, def: str) -> option::t<str> {
let vals = opt_vals(m, nm);
if vec::len::<optval>(vals) == 0u { ret none::<str>; }
ret alt vals.(0) { val(s) { some::<str>(s) } _ { some::<str>(def) } }
ret alt vals[0] { val(s) { some::<str>(s) } _ { some::<str>(def) } }
}
// Local Variables:
// mode: rust;

View file

@ -52,7 +52,7 @@ fn str(i: int) -> str { ret to_str(i, 10u); }
fn pow(base: int, exponent: uint) -> int {
ret if exponent == 0u {
1
} else if (base == 0) {
} else if base == 0 {
0
} else {
let accum = base;

View file

@ -15,53 +15,49 @@ tag seek_style { seek_set; seek_end; seek_cur; }
// The raw underlying reader class. All readers must implement this.
type buf_reader =
// FIXME: Seekable really should be orthogonal. We will need
// inheritance.
obj {
fn read(uint) -> [u8] ;
fn read_byte() -> int ;
fn unread_byte(int) ;
fn eof() -> bool ;
fn seek(int, seek_style) ;
fn tell() -> uint ;
fn read(uint) -> [u8];
fn read_byte() -> int;
fn unread_byte(int);
fn eof() -> bool;
fn seek(int, seek_style);
fn tell() -> uint;
};
// Convenience methods for reading.
type reader =
// FIXME: This should inherit from buf_reader.
// FIXME: eventually u64
// FIXME: eventually u64
obj {
fn get_buf_reader() -> buf_reader ;
fn read_byte() -> int ;
fn unread_byte(int) ;
fn read_bytes(uint) -> [u8] ;
fn read_char() -> char ;
fn eof() -> bool ;
fn read_line() -> str ;
fn read_c_str() -> str ;
fn read_le_uint(uint) -> uint ;
fn read_le_int(uint) -> int ;
fn read_be_uint(uint) -> uint ;
fn read_whole_stream() -> [u8] ;
fn seek(int, seek_style) ;
fn tell() -> uint ;
fn get_buf_reader() -> buf_reader;
fn read_byte() -> int;
fn unread_byte(int);
fn read_bytes(uint) -> [u8];
fn read_char() -> char;
fn eof() -> bool;
fn read_line() -> str;
fn read_c_str() -> str;
fn read_le_uint(uint) -> uint;
fn read_le_int(uint) -> int;
fn read_be_uint(uint) -> uint;
fn read_whole_stream() -> [u8];
fn seek(int, seek_style);
fn tell() -> uint;
};
fn convert_whence(whence: seek_style) -> int {
ret alt whence { seek_set. { 0 } seek_cur. { 1 } seek_end. { 2 } };
}
resource FILE_res(f: os::libc::FILE) {
os::libc::fclose(f);
}
resource FILE_res(f: os::libc::FILE) { os::libc::fclose(f); }
obj FILE_buf_reader(f: os::libc::FILE, res: option::t<@FILE_res>) {
fn read(len: uint) -> [u8] {
let buf = ~[];
let buf = [];
vec::reserve::<u8>(buf, len);
let read = os::libc::fread(vec::to_ptr::<u8>(buf), 1u, len, f);
vec::unsafe::set_len::<u8>(buf, read);
@ -73,9 +69,7 @@ obj FILE_buf_reader(f: os::libc::FILE, res: option::t<@FILE_res>) {
fn seek(offset: int, whence: seek_style) {
assert (os::libc::fseek(f, offset, convert_whence(whence)) == 0);
}
fn tell() -> uint {
ret os::libc::ftell(f) as uint;
}
fn tell() -> uint { ret os::libc::ftell(f) as uint; }
}
@ -111,7 +105,7 @@ obj new_reader(rdr: buf_reader) {
}
fn eof() -> bool { ret rdr.eof(); }
fn read_line() -> str {
let buf: [u8] = ~[];
let buf: [u8] = [];
// No break yet in rustc
let go_on = true;
@ -119,16 +113,16 @@ obj new_reader(rdr: buf_reader) {
let ch = rdr.read_byte();
if ch == -1 || ch == 10 {
go_on = false;
} else { buf += ~[ch as u8]; }
} else { buf += [ch as u8]; }
}
ret str::unsafe_from_bytes(buf);
}
fn read_c_str() -> str {
let buf: [u8] = ~[];
let buf: [u8] = [];
let go_on = true;
while go_on {
let ch = rdr.read_byte();
if ch < 1 { go_on = false; } else { buf += ~[ch as u8]; }
if ch < 1 { go_on = false; } else { buf += [ch as u8]; }
}
ret str::unsafe_from_bytes(buf);
}
@ -166,7 +160,7 @@ obj new_reader(rdr: buf_reader) {
ret val;
}
fn read_whole_stream() -> [u8] {
let buf: [u8] = ~[];
let buf: [u8] = [];
while !rdr.eof() { buf += rdr.read(2048u); }
ret buf;
}
@ -205,7 +199,7 @@ obj byte_buf_reader(bbuf: byte_buf) {
}
fn read_byte() -> int {
if bbuf.pos == vec::len::<u8>(bbuf.buf) { ret -1; }
let b = bbuf.buf.(bbuf.pos);
let b = bbuf.buf[bbuf.pos];
bbuf.pos += 1u;
ret b as int;
}
@ -232,15 +226,14 @@ fn string_reader(s: &str) -> reader {
tag fileflag { append; create; truncate; none; }
type buf_writer =
// FIXME: Seekable really should be orthogonal. We will need
// inheritance.
// FIXME: eventually u64
// FIXME: eventually u64
obj {
fn write(&[u8]) ;
fn seek(int, seek_style) ;
fn tell() -> uint ;
fn write(&[u8]);
fn seek(int, seek_style);
fn tell() -> uint;
};
obj FILE_writer(f: os::libc::FILE, res: option::t<@FILE_res>) {
@ -253,14 +246,10 @@ obj FILE_writer(f: os::libc::FILE, res: option::t<@FILE_res>) {
fn seek(offset: int, whence: seek_style) {
assert (os::libc::fseek(f, offset, convert_whence(whence)) == 0);
}
fn tell() -> uint {
ret os::libc::ftell(f) as uint;
}
fn tell() -> uint { ret os::libc::ftell(f) as uint; }
}
resource fd_res(fd: int) {
os::libc::close(fd);
}
resource fd_res(fd: int) { os::libc::close(fd); }
obj fd_buf_writer(fd: int, res: option::t<@fd_res>) {
fn write(v: &[u8]) {
@ -312,32 +301,31 @@ fn file_buf_writer(path: str, flags: &[fileflag]) -> buf_writer {
}
type writer =
// write_str will continue to do utf-8 output only. an alternative
// function will be provided for general encoded string output
obj {
fn get_buf_writer() -> buf_writer ;
fn write_str(str) ;
fn write_line(str) ;
fn write_char(char) ;
fn write_int(int) ;
fn write_uint(uint) ;
fn write_bytes(&[u8]) ;
fn write_le_uint(uint, uint) ;
fn write_le_int(int, uint) ;
fn write_be_uint(uint, uint) ;
fn get_buf_writer() -> buf_writer;
fn write_str(str);
fn write_line(str);
fn write_char(char);
fn write_int(int);
fn write_uint(uint);
fn write_bytes(&[u8]);
fn write_le_uint(uint, uint);
fn write_le_int(int, uint);
fn write_be_uint(uint, uint);
};
fn uint_to_le_bytes(n: uint, size: uint) -> [u8] {
let bytes: [u8] = ~[];
while size > 0u { bytes += ~[n & 255u as u8]; n >>= 8u; size -= 1u; }
let bytes: [u8] = [];
while size > 0u { bytes += [n & 255u as u8]; n >>= 8u; size -= 1u; }
ret bytes;
}
fn uint_to_be_bytes(n: uint, size: uint) -> [u8] {
let bytes: [u8] = ~[];
let bytes: [u8] = [];
let i = size - 1u as int;
while i >= 0 { bytes += ~[n >> (i * 8 as uint) & 255u as u8]; i -= 1; }
while i >= 0 { bytes += [n >> (i * 8 as uint) & 255u as u8]; i -= 1; }
ret bytes;
}
@ -354,9 +342,7 @@ obj new_writer(out: buf_writer) {
out.write(str::bytes(str::from_char(ch)));
}
fn write_int(n: int) { out.write(str::bytes(int::to_str(n, 10u))); }
fn write_uint(n: uint) {
out.write(str::bytes(uint::to_str(n, 10u)));
}
fn write_uint(n: uint) { out.write(str::bytes(uint::to_str(n, 10u))); }
fn write_bytes(bytes: &[u8]) { out.write(bytes); }
fn write_le_uint(n: uint, size: uint) {
out.write(uint_to_le_bytes(n, size));
@ -391,8 +377,8 @@ fn stdout() -> writer { ret new_writer(fd_buf_writer(1, option::none)); }
type str_writer =
obj {
fn get_writer() -> writer ;
fn get_str() -> str ;
fn get_writer() -> writer;
fn get_str() -> str;
};
type mutable_byte_buf = @{mutable buf: [mutable u8], mutable pos: uint};
@ -402,7 +388,7 @@ obj byte_buf_writer(buf: mutable_byte_buf) {
// Fast path.
if buf.pos == vec::len(buf.buf) {
for b: u8 in v { buf.buf += ~[mutable b]; }
for b: u8 in v { buf.buf += [mutable b]; }
buf.pos += vec::len::<u8>(v);
ret;
}
@ -411,10 +397,10 @@ obj byte_buf_writer(buf: mutable_byte_buf) {
let vlen = vec::len::<u8>(v);
let vpos = 0u;
while vpos < vlen {
let b = v.(vpos);
let b = v[vpos];
if buf.pos == vec::len(buf.buf) {
buf.buf += ~[mutable b];
} else { buf.buf.(buf.pos) = b; }
buf.buf += [mutable b];
} else { buf.buf[buf.pos] = b; }
buf.pos += 1u;
vpos += 1u;
}
@ -430,7 +416,7 @@ obj byte_buf_writer(buf: mutable_byte_buf) {
fn string_writer() -> str_writer {
// FIXME: yikes, this is bad. Needs fixing of mutable syntax.
let b: [mutable u8] = ~[mutable 0u8];
let b: [mutable u8] = [mutable 0u8];
vec::pop(b);
let buf: mutable_byte_buf = @{mutable buf: b, mutable pos: 0u};
obj str_writer_wrap(wr: writer, buf: mutable_byte_buf) {
@ -451,7 +437,7 @@ fn seek_in_buf(offset: int, pos: uint, len: uint, whence: seek_style) ->
seek_cur. { bpos += offset; }
seek_end. { bpos = blen + offset; }
}
if bpos < 0 { bpos = 0; } else if (bpos > blen) { bpos = blen; }
if bpos < 0 { bpos = 0; } else if bpos > blen { bpos = blen; }
ret bpos as uint;
}
@ -460,6 +446,7 @@ fn read_whole_file_str(file: &str) -> str {
}
fn read_whole_file(file: &str) -> [u8] {
// FIXME: There's a lot of copying here
file_reader(file).read_whole_stream()
}

View file

@ -13,7 +13,7 @@ fn from_vec<@T>(v: &[T]) -> list<T> {
ret l;
}
fn foldl<@T, @U>(ls_: &list<T>, u: &U, f: &block(&T, &U) -> U ) -> U {
fn foldl<@T, @U>(ls_: &list<T>, u: &U, f: &block(&T, &U) -> U) -> U {
let accum: U = u;
let ls = ls_;
while true {
@ -25,8 +25,8 @@ fn foldl<@T, @U>(ls_: &list<T>, u: &U, f: &block(&T, &U) -> U ) -> U {
ret accum;
}
fn find<@T, @U>(ls_: &list<T>, f: &block(&T) -> option::t<U>)
-> option::t<U> {
fn find<@T, @U>(ls_: &list<T>, f: &block(&T) -> option::t<U>) ->
option::t<U> {
let ls = ls_;
while true {
alt ls {
@ -56,26 +56,17 @@ fn length<@T>(ls: &list<T>) -> uint {
}
fn cdr<@T>(ls: &list<T>) -> list<T> {
alt ls {
cons(_, tl) { ret *tl; }
nil. { fail "list empty" }
}
alt ls { cons(_, tl) { ret *tl; } nil. { fail "list empty" } }
}
fn car<@T>(ls: &list<T>) -> T {
alt ls {
cons(hd, _) { ret hd; }
nil. { fail "list empty" }
}
alt ls { cons(hd, _) { ret hd; } nil. { fail "list empty" } }
}
fn append<@T>(l: &list<T>, m: &list<T>) -> list<T> {
alt l {
nil. { ret m; }
cons(x, xs) {
let rest = append(*xs, m);
ret cons(x, @rest);
}
cons(x, xs) { let rest = append(*xs, m); ret cons(x, @rest); }
}
}

View file

@ -1,21 +1,21 @@
/**
* Hashmap implementation.
*/
type hashfn<K> = fn(&K) -> uint ;
type hashfn<K> = fn(&K) -> uint;
type eqfn<K> = fn(&K, &K) -> bool ;
type eqfn<K> = fn(&K, &K) -> bool;
type hashmap<K, V> =
obj {
fn size() -> uint ;
fn insert(&K, &V) -> bool ;
fn contains_key(&K) -> bool ;
fn get(&K) -> V ;
fn find(&K) -> option::t<V> ;
fn remove(&K) -> option::t<V> ;
fn rehash() ;
iter items() -> @{key: K, val: V} ;
iter keys() -> K ;
fn size() -> uint;
fn insert(&K, &V) -> bool;
fn contains_key(&K) -> bool;
fn get(&K) -> V;
fn find(&K) -> option::t<V>;
fn remove(&K) -> option::t<V>;
fn rehash();
iter items() -> @{key: K, val: V};
iter keys() -> K;
};
type hashset<K> = hashmap<K, ()>;
@ -26,7 +26,7 @@ fn mk_hashmap<@K, @V>(hasher: &hashfn<K>, eqer: &eqfn<K>) -> hashmap<K, V> {
let load_factor: util::rational = {num: 3, den: 4};
tag bucket<@K, @V> { nil; deleted; some(K, V); }
fn make_buckets<@K, @V>(nbkts: uint) -> [mutable (bucket<K, V>)] {
fn make_buckets<@K, @V>(nbkts: uint) -> [mutable bucket<K, V>] {
ret vec::init_elt_mut::<bucket<K, V>>(nil::<K, V>, nbkts);
}
// Derive two hash functions from the one given by taking the upper
@ -53,37 +53,36 @@ fn mk_hashmap<@K, @V>(hasher: &hashfn<K>, eqer: &eqfn<K>) -> hashmap<K, V> {
* will fail.
*/
fn insert_common<@K, @V>(hasher: &hashfn<K>, eqer: &eqfn<K>,
bkts: &[mutable bucket<K, V>], nbkts: uint,
key: &K, val: &V) -> bool {
fn insert_common<@K,
@V>(hasher: &hashfn<K>, eqer: &eqfn<K>,
bkts: &[mutable bucket<K, V>], nbkts: uint, key: &K,
val: &V) -> bool {
let i: uint = 0u;
let h: uint = hasher(key);
while i < nbkts {
let j: uint = hash(h, nbkts, i);
alt bkts.(j) {
alt bkts[j] {
some(k, _) {
// Copy key to please alias analysis.
let k_ = k;
if eqer(key, k_) {
bkts.(j) = some(k_, val);
ret false;
}
if eqer(key, k_) { bkts[j] = some(k_, val); ret false; }
i += 1u;
}
_ { bkts.(j) = some(key, val); ret true; }
_ { bkts[j] = some(key, val); ret true; }
}
}
fail; // full table
}
fn find_common<@K, @V>(hasher: &hashfn<K>, eqer: &eqfn<K>,
bkts: &[mutable bucket<K, V>], nbkts: uint,
key: &K) -> option::t<V> {
fn find_common<@K,
@V>(hasher: &hashfn<K>, eqer: &eqfn<K>,
bkts: &[mutable bucket<K, V>], nbkts: uint, key: &K) ->
option::t<V> {
let i: uint = 0u;
let h: uint = hasher(key);
while i < nbkts {
let j: uint = hash(h, nbkts, i);
alt bkts.(j) {
alt bkts[j] {
some(k, v) {
// Copy to please alias analysis.
let k_ = k;
@ -97,9 +96,10 @@ fn mk_hashmap<@K, @V>(hasher: &hashfn<K>, eqer: &eqfn<K>) -> hashmap<K, V> {
}
ret option::none;
}
fn rehash<@K, @V>(hasher: &hashfn<K>, eqer: &eqfn<K>,
oldbkts: &[mutable bucket<K, V>], _noldbkts: uint,
newbkts: &[mutable bucket<K, V>], nnewbkts: uint) {
fn rehash<@K,
@V>(hasher: &hashfn<K>, eqer: &eqfn<K>,
oldbkts: &[mutable bucket<K, V>], _noldbkts: uint,
newbkts: &[mutable bucket<K, V>], nnewbkts: uint) {
for b: bucket<K, V> in oldbkts {
alt b {
some(k_, v_) {
@ -111,12 +111,13 @@ fn mk_hashmap<@K, @V>(hasher: &hashfn<K>, eqer: &eqfn<K>) -> hashmap<K, V> {
}
}
}
obj hashmap<@K, @V>(hasher: hashfn<K>,
eqer: eqfn<K>,
mutable bkts: [mutable bucket<K, V>],
mutable nbkts: uint,
mutable nelts: uint,
lf: util::rational) {
obj hashmap<@K,
@V>(hasher: hashfn<K>,
eqer: eqfn<K>,
mutable bkts: [mutable bucket<K, V>],
mutable nbkts: uint,
mutable nelts: uint,
lf: util::rational) {
fn size() -> uint { ret nelts; }
fn insert(key: &K, val: &V) -> bool {
let load: util::rational =
@ -154,12 +155,12 @@ fn mk_hashmap<@K, @V>(hasher: &hashfn<K>, eqer: &eqfn<K>) -> hashmap<K, V> {
let h: uint = hasher(key);
while i < nbkts {
let j: uint = hash(h, nbkts, i);
alt bkts.(j) {
alt bkts[j] {
some(k, v) {
let k_ = k;
let vo = option::some(v);
if eqer(key, k_) {
bkts.(j) = deleted;
bkts[j] = deleted;
nelts -= 1u;
ret vo;
}

View file

@ -2,29 +2,20 @@ import str;
import vec;
import uint;
tag ip_addr {
ipv4(u8, u8, u8, u8);
}
tag ip_addr { ipv4(u8, u8, u8, u8); }
fn format_addr(ip : ip_addr) -> str {
alt(ip) {
fn format_addr(ip: ip_addr) -> str {
alt ip {
ipv4(a, b, c, d) {
#fmt("%u.%u.%u.%u",
a as uint,
b as uint,
c as uint,
d as uint)
#fmt["%u.%u.%u.%u", a as uint, b as uint, c as uint, d as uint]
}
_ { fail "Unsupported address type"; }
}
}
fn parse_addr(ip : str) -> ip_addr {
let parts = vec::map(uint::from_str, str::split(ip, ".".(0)));
fn parse_addr(ip: str) -> ip_addr {
let parts = vec::map(uint::from_str, str::split(ip, "."[0]));
if vec::len(parts) != 4u { fail "Too many dots in IP address"; }
for i in parts { if i > 255u { fail "Invalid IP Address part."; } }
ipv4(parts.(0) as u8,
parts.(1) as u8,
parts.(2) as u8,
parts.(3) as u8)
ipv4(parts[0] as u8, parts[1] as u8, parts[2] as u8, parts[3] as u8)
}

View file

@ -3,10 +3,7 @@
tag t<@T> { none; some(T); }
fn get<@T>(opt: &t<T>) -> T {
alt opt {
some(x) { x }
none. { fail "option none" }
}
alt opt { some(x) { x } none. { fail "option none" } }
}
fn map<@T, @U>(f: &block(&T) -> U, opt: &t<T>) -> t<U> {

View file

@ -11,17 +11,16 @@ native "rust" mod rustrt {
fn rand_free(c: rctx);
}
type rng = obj { fn next() -> u32; };
type rng =
obj {
fn next() -> u32;
};
resource rand_res(c: rustrt::rctx) {
rustrt::rand_free(c);
}
resource rand_res(c: rustrt::rctx) { rustrt::rand_free(c); }
fn mk_rng() -> rng {
obj rt_rng(c: @rand_res) {
fn next() -> u32 {
ret rustrt::rand_next(**c);
}
fn next() -> u32 { ret rustrt::rand_next(**c); }
}
ret rt_rng(@rand_res(rustrt::rand_new()));
}

View file

@ -13,9 +13,9 @@ native "rust" mod rustrt {
}
fn arg_vec(prog: str, args: &[str]) -> [sbuf] {
let argptrs = ~[str::buf(prog)];
for arg: str in args { argptrs += ~[str::buf(arg)]; }
argptrs += ~[0 as sbuf];
let argptrs = [str::buf(prog)];
for arg: str in args { argptrs += [str::buf(arg)]; }
argptrs += [0 as sbuf];
ret argptrs;
}
@ -24,8 +24,8 @@ fn spawn_process(prog: str, args: &[str], in_fd: int, out_fd: int,
// Note: we have to hold on to this vector reference while we hold a
// pointer to its buffer
let argv = arg_vec(prog, args);
let pid = rustrt::rust_run_program(
vec::to_ptr(argv), in_fd, out_fd, err_fd);
let pid =
rustrt::rust_run_program(vec::to_ptr(argv), in_fd, out_fd, err_fd);
ret pid;
}
@ -44,16 +44,15 @@ type program =
fn destroy();
};
resource program_res(p: program) {
p.destroy();
}
resource program_res(p: program) { p.destroy(); }
fn start_program(prog: str, args: &[str]) -> @program_res {
let pipe_input = os::pipe();
let pipe_output = os::pipe();
let pipe_err = os::pipe();
let pid = spawn_process(prog, args, pipe_input.in, pipe_output.out,
pipe_err.out);
let pid =
spawn_process(prog, args, pipe_input.in, pipe_output.out,
pipe_err.out);
if pid == -1 { fail; }
os::libc::close(pipe_input.in);
@ -66,16 +65,13 @@ fn start_program(prog: str, args: &[str]) -> @program_res {
mutable finished: bool) {
fn get_id() -> int { ret pid; }
fn input() -> io::writer {
ret io::new_writer(
io::fd_buf_writer(in_fd, option::none));
ret io::new_writer(io::fd_buf_writer(in_fd, option::none));
}
fn output() -> io::reader {
ret io::new_reader(
io::FILE_buf_reader(out_file, option::none));
ret io::new_reader(io::FILE_buf_reader(out_file, option::none));
}
fn err() -> io::reader {
ret io::new_reader(
io::FILE_buf_reader(err_file, option::none));
ret io::new_reader(io::FILE_buf_reader(err_file, option::none));
}
fn close_input() {
let invalid_fd = -1;
@ -96,11 +92,9 @@ fn start_program(prog: str, args: &[str]) -> @program_res {
os::libc::fclose(err_file);
}
}
ret @program_res(new_program(pid,
pipe_input.out,
ret @program_res(new_program(pid, pipe_input.out,
os::fd_FILE(pipe_output.in),
os::fd_FILE(pipe_err.in),
false));
os::fd_FILE(pipe_err.in), false));
}
fn read_all(rd: &io::reader) -> str {
@ -112,8 +106,8 @@ fn read_all(rd: &io::reader) -> str {
ret buf;
}
fn program_output(prog: str, args: [str])
-> {status: int, out: str, err: str} {
fn program_output(prog: str, args: [str]) ->
{status: int, out: str, err: str} {
let pr = start_program(prog, args);
pr.close_input();
ret {status: pr.finish(),

View file

@ -9,7 +9,6 @@ export sha1;
export mk_sha1;
type sha1 =
// Provide message input as bytes
@ -25,11 +24,11 @@ type sha1 =
// Reset the sha1 state for reuse. This is called
// automatically during construction
obj {
fn input(&[u8]) ;
fn input_str(&str) ;
fn result() -> [u8] ;
fn result_str() -> str ;
fn reset() ;
fn input(&[u8]);
fn input_str(&str);
fn result() -> [u8];
fn result_str() -> str;
fn reset();
};
@ -65,7 +64,7 @@ fn mk_sha1() -> sha1 {
assert (!st.computed);
for element: u8 in msg {
st.msg_block.(st.msg_block_idx) = element;
st.msg_block[st.msg_block_idx] = element;
st.msg_block_idx += 1u;
st.len_low += 8u32;
if st.len_low == 0u32 {
@ -92,30 +91,29 @@ fn mk_sha1() -> sha1 {
t = 0;
while t < 16 {
let tmp;
tmp = (st.msg_block.(t * 4) as u32) << 24u32;
tmp = tmp | (st.msg_block.(t * 4 + 1) as u32) << 16u32;
tmp = tmp | (st.msg_block.(t * 4 + 2) as u32) << 8u32;
tmp = tmp | (st.msg_block.(t * 4 + 3) as u32);
w.(t) = tmp;
tmp = (st.msg_block[t * 4] as u32) << 24u32;
tmp = tmp | (st.msg_block[t * 4 + 1] as u32) << 16u32;
tmp = tmp | (st.msg_block[t * 4 + 2] as u32) << 8u32;
tmp = tmp | (st.msg_block[t * 4 + 3] as u32);
w[t] = tmp;
t += 1;
}
// Initialize the rest of vector w
while t < 80 {
let val = w.(t - 3) ^ w.(t - 8) ^ w.(t - 14) ^ w.(t - 16);
w.(t) = circular_shift(1u32, val);
let val = w[t - 3] ^ w[t - 8] ^ w[t - 14] ^ w[t - 16];
w[t] = circular_shift(1u32, val);
t += 1;
}
let a = st.h.(0);
let b = st.h.(1);
let c = st.h.(2);
let d = st.h.(3);
let e = st.h.(4);
let a = st.h[0];
let b = st.h[1];
let c = st.h[2];
let d = st.h[3];
let e = st.h[4];
let temp: u32;
t = 0;
while t < 20 {
temp =
circular_shift(5u32, a) + (b & c | !b & d) + e + w.(t) + k0;
temp = circular_shift(5u32, a) + (b & c | !b & d) + e + w[t] + k0;
e = d;
d = c;
c = circular_shift(30u32, b);
@ -124,7 +122,7 @@ fn mk_sha1() -> sha1 {
t += 1;
}
while t < 40 {
temp = circular_shift(5u32, a) + (b ^ c ^ d) + e + w.(t) + k1;
temp = circular_shift(5u32, a) + (b ^ c ^ d) + e + w[t] + k1;
e = d;
d = c;
c = circular_shift(30u32, b);
@ -134,8 +132,8 @@ fn mk_sha1() -> sha1 {
}
while t < 60 {
temp =
circular_shift(5u32, a) + (b & c | b & d | c & d) + e + w.(t)
+ k2;
circular_shift(5u32, a) + (b & c | b & d | c & d) + e + w[t] +
k2;
e = d;
d = c;
c = circular_shift(30u32, b);
@ -144,7 +142,7 @@ fn mk_sha1() -> sha1 {
t += 1;
}
while t < 80 {
temp = circular_shift(5u32, a) + (b ^ c ^ d) + e + w.(t) + k3;
temp = circular_shift(5u32, a) + (b ^ c ^ d) + e + w[t] + k3;
e = d;
d = c;
c = circular_shift(30u32, b);
@ -152,11 +150,11 @@ fn mk_sha1() -> sha1 {
a = temp;
t += 1;
}
st.h.(0) = st.h.(0) + a;
st.h.(1) = st.h.(1) + b;
st.h.(2) = st.h.(2) + c;
st.h.(3) = st.h.(3) + d;
st.h.(4) = st.h.(4) + e;
st.h[0] = st.h[0] + a;
st.h[1] = st.h[1] + b;
st.h[2] = st.h[2] + c;
st.h[3] = st.h[3] + d;
st.h[4] = st.h[4] + e;
st.msg_block_idx = 0u;
}
fn circular_shift(bits: u32, word: u32) -> u32 {
@ -164,13 +162,13 @@ fn mk_sha1() -> sha1 {
}
fn mk_result(st: &sha1state) -> [u8] {
if !st.computed { pad_msg(st); st.computed = true; }
let rs: [u8] = ~[];
let rs: [u8] = [];
for hpart: u32 in st.h {
let a = hpart >> 24u32 & 0xFFu32 as u8;
let b = hpart >> 16u32 & 0xFFu32 as u8;
let c = hpart >> 8u32 & 0xFFu32 as u8;
let d = hpart & 0xFFu32 as u8;
rs += ~[a, b, c, d];
rs += [a, b, c, d];
}
ret rs;
}
@ -195,31 +193,31 @@ fn mk_sha1() -> sha1 {
*/
if st.msg_block_idx > 55u {
st.msg_block.(st.msg_block_idx) = 0x80u8;
st.msg_block[st.msg_block_idx] = 0x80u8;
st.msg_block_idx += 1u;
while st.msg_block_idx < msg_block_len {
st.msg_block.(st.msg_block_idx) = 0u8;
st.msg_block[st.msg_block_idx] = 0u8;
st.msg_block_idx += 1u;
}
process_msg_block(st);
} else {
st.msg_block.(st.msg_block_idx) = 0x80u8;
st.msg_block[st.msg_block_idx] = 0x80u8;
st.msg_block_idx += 1u;
}
while st.msg_block_idx < 56u {
st.msg_block.(st.msg_block_idx) = 0u8;
st.msg_block[st.msg_block_idx] = 0u8;
st.msg_block_idx += 1u;
}
// Store the message length as the last 8 octets
st.msg_block.(56) = st.len_high >> 24u32 & 0xFFu32 as u8;
st.msg_block.(57) = st.len_high >> 16u32 & 0xFFu32 as u8;
st.msg_block.(58) = st.len_high >> 8u32 & 0xFFu32 as u8;
st.msg_block.(59) = st.len_high & 0xFFu32 as u8;
st.msg_block.(60) = st.len_low >> 24u32 & 0xFFu32 as u8;
st.msg_block.(61) = st.len_low >> 16u32 & 0xFFu32 as u8;
st.msg_block.(62) = st.len_low >> 8u32 & 0xFFu32 as u8;
st.msg_block.(63) = st.len_low & 0xFFu32 as u8;
st.msg_block[56] = st.len_high >> 24u32 & 0xFFu32 as u8;
st.msg_block[57] = st.len_high >> 16u32 & 0xFFu32 as u8;
st.msg_block[58] = st.len_high >> 8u32 & 0xFFu32 as u8;
st.msg_block[59] = st.len_high & 0xFFu32 as u8;
st.msg_block[60] = st.len_low >> 24u32 & 0xFFu32 as u8;
st.msg_block[61] = st.len_low >> 16u32 & 0xFFu32 as u8;
st.msg_block[62] = st.len_low >> 8u32 & 0xFFu32 as u8;
st.msg_block[63] = st.len_low & 0xFFu32 as u8;
process_msg_block(st);
}
obj sha1(st: sha1state) {
@ -230,11 +228,11 @@ fn mk_sha1() -> sha1 {
st.len_low = 0u32;
st.len_high = 0u32;
st.msg_block_idx = 0u;
st.h.(0) = 0x67452301u32;
st.h.(1) = 0xEFCDAB89u32;
st.h.(2) = 0x98BADCFEu32;
st.h.(3) = 0x10325476u32;
st.h.(4) = 0xC3D2E1F0u32;
st.h[0] = 0x67452301u32;
st.h[1] = 0xEFCDAB89u32;
st.h[2] = 0x98BADCFEu32;
st.h[3] = 0x10325476u32;
st.h[4] = 0xC3D2E1F0u32;
st.computed = false;
}
fn input(msg: &[u8]) { add_input(st, msg); }

View file

@ -7,25 +7,17 @@ import str;
import net;
type ctx = aio::ctx;
type client = { ctx: ctx, client: aio::client,
evt: _port<aio::socket_event> };
type server = { ctx: ctx, server: aio::server,
evt: _port<aio::server_event> };
type client = {ctx: ctx, client: aio::client, evt: _port<aio::socket_event>};
type server = {ctx: ctx, server: aio::server, evt: _port<aio::server_event>};
fn new() -> ctx {
ret aio::new();
}
fn new() -> ctx { ret aio::new(); }
fn destroy(ctx: ctx) {
send(ctx, aio::quit);
}
fn destroy(ctx: ctx) { send(ctx, aio::quit); }
fn make_socket(ctx: ctx, p: _port<aio::socket_event>) -> client {
let evt: aio::socket_event = p.recv();
alt evt {
aio::connected(client) {
ret { ctx: ctx, client: client, evt: p };
}
aio::connected(client) { ret {ctx: ctx, client: client, evt: p}; }
_ { fail "Could not connect to client"; }
}
}
@ -38,22 +30,17 @@ fn connect_to(ctx: ctx, ip: net::ip_addr, portnum: int) -> client {
fn read(c: client) -> [u8] {
alt c.evt.recv() {
aio::closed. {
ret ~[];
}
aio::received(buf) {
ret buf;
}
aio::closed. { ret []; }
aio::received(buf) { ret buf; }
}
}
fn create_server(ctx: ctx, ip: net::ip_addr, portnum: int) -> server {
let evt: _port<aio::server_event> = mk_port();
let p: _port<aio::server> = mk_port();
send(ctx, aio::serve(ip, portnum,
evt.mk_chan(), p.mk_chan()));
send(ctx, aio::serve(ip, portnum, evt.mk_chan(), p.mk_chan()));
let srv: aio::server = p.recv();
ret { ctx: ctx, server: srv, evt: evt };
ret {ctx: ctx, server: srv, evt: evt};
}
fn accept_from(server: server) -> client {
@ -85,15 +72,8 @@ fn close_server(server: server) {
fn close_client(client: client) {
send(client.ctx, aio::close_client(client.client));
let evt: aio::socket_event;
do {
evt = client.evt.recv();
alt evt {
aio::closed. {
ret;
}
_ {}
}
} while (true);
do { evt = client.evt.recv(); alt evt { aio::closed. { ret; } _ { } } }
while true
}
// Local Variables:

View file

@ -10,7 +10,7 @@ import option::some;
type smallintmap<T> = @{mutable v: [mutable option::t<T>]};
fn mk<@T>() -> smallintmap<T> {
let v: [mutable option::t<T>] = ~[mutable];
let v: [mutable option::t<T>] = [mutable];
ret @{mutable v: v};
}
@ -19,7 +19,7 @@ fn insert<@T>(m: &smallintmap<T>, key: uint, val: &T) {
}
fn find<@T>(m: &smallintmap<T>, key: uint) -> option::t<T> {
if key < vec::len::<option::t<T>>(m.v) { ret m.v.(key); }
if key < vec::len::<option::t<T>>(m.v) { ret m.v[key]; }
ret none::<T>;
}

View file

@ -6,20 +6,20 @@ export merge_sort;
export quick_sort;
export quick_sort3;
type lteq<T> = block(&T, &T) -> bool ;
type lteq<T> = block(&T, &T) -> bool;
fn merge_sort<@T>(le: &lteq<T>, v: &[T]) -> [T] {
fn merge<@T>(le: &lteq<T>, a: &[T], b: &[T]) -> [T] {
let rs: [T] = ~[];
let rs: [T] = [];
let a_len: uint = len::<T>(a);
let a_ix: uint = 0u;
let b_len: uint = len::<T>(b);
let b_ix: uint = 0u;
while a_ix < a_len && b_ix < b_len {
if le(a.(a_ix), b.(b_ix)) {
rs += ~[a.(a_ix)];
if le(a[a_ix], b[b_ix]) {
rs += [a[a_ix]];
a_ix += 1u;
} else { rs += ~[b.(b_ix)]; b_ix += 1u; }
} else { rs += [b[b_ix]]; b_ix += 1u; }
}
rs += slice::<T>(a, a_ix, a_len);
rs += slice::<T>(b, b_ix, b_len);
@ -34,19 +34,19 @@ fn merge_sort<@T>(le: &lteq<T>, v: &[T]) -> [T] {
}
fn swap<@T>(arr: &[mutable T], x: uint, y: uint) {
let a = arr.(x);
arr.(x) = arr.(y);
arr.(y) = a;
let a = arr[x];
arr[x] = arr[y];
arr[y] = a;
}
fn part<@T>(compare_func: &lteq<T>, arr: &[mutable T], left: uint,
right: uint, pivot: uint) -> uint {
let pivot_value = arr.(pivot);
let pivot_value = arr[pivot];
swap::<T>(arr, pivot, right);
let storage_index: uint = left;
let i: uint = left;
while i < right {
if compare_func({ arr.(i) }, pivot_value) {
if compare_func({ arr[i] }, pivot_value) {
swap::<T>(arr, i, storage_index);
storage_index += 1u;
}
@ -82,26 +82,26 @@ fn quick_sort<@T>(compare_func: &lteq<T>, arr: &[mutable T]) {
fn qsort3<@T>(compare_func_lt: &lteq<T>, compare_func_eq: &lteq<T>,
arr: &[mutable T], left: int, right: int) {
if right <= left { ret; }
let v: T = arr.(right);
let v: T = arr[right];
let i: int = left - 1;
let j: int = right;
let p: int = i;
let q: int = j;
while true {
i += 1;
while compare_func_lt({ arr.(i) }, v) { i += 1; }
while compare_func_lt({ arr[i] }, v) { i += 1; }
j -= 1;
while compare_func_lt(v, { arr.(j) }) {
while compare_func_lt(v, { arr[j] }) {
if j == left { break; }
j -= 1;
}
if i >= j { break; }
swap::<T>(arr, i as uint, j as uint);
if compare_func_eq({ arr.(i) }, v) {
if compare_func_eq({ arr[i] }, v) {
p += 1;
swap::<T>(arr, p as uint, i as uint);
}
if compare_func_eq(v, { arr.(j) }) {
if compare_func_eq(v, { arr[j] }) {
q -= 1;
swap::<T>(arr, j as uint, q as uint);
}
@ -131,7 +131,7 @@ fn quick_sort3<@T>(compare_func_lt: &lteq<T>, compare_func_eq: &lteq<T>,
arr: &[mutable T]) {
if len::<T>(arr) == 0u { ret; }
qsort3::<T>(compare_func_lt, compare_func_eq, arr, 0,
(len::<T>(arr) as int) - 1);
(len::<T>(arr) as int) - 1);
}
// Local Variables:

View file

@ -73,8 +73,8 @@ fn eq(a: &str, b: &str) -> bool {
if byte_len(b) != i { ret false; }
while i > 0u {
i -= 1u;
let cha = a.(i);
let chb = b.(i);
let cha = a[i];
let chb = b[i];
if cha != chb { ret false; }
}
ret true;
@ -87,9 +87,9 @@ fn lteq(a: &str, b: &str) -> bool {
if j < n { n = j; }
let x: uint = 0u;
while x < n {
let cha = a.(x);
let chb = b.(x);
if cha < chb { ret true; } else if (cha > chb) { ret false; }
let cha = a[x];
let chb = b[x];
if cha < chb { ret true; } else if cha > chb { ret false; }
x += 1u;
}
ret i <= j;
@ -134,12 +134,12 @@ fn is_utf8(v: &[u8]) -> bool {
let i = 0u;
let total = vec::len::<u8>(v);
while i < total {
let chsize = utf8_char_width(v.(i));
let chsize = utf8_char_width(v[i]);
if chsize == 0u { ret false; }
if i + chsize > total { ret false; }
i += 1u;
while chsize > 1u {
if v.(i) & 192u8 != tag_cont_u8 { ret false; }
if v[i] & 192u8 != tag_cont_u8 { ret false; }
i += 1u;
chsize -= 1u;
}
@ -149,7 +149,7 @@ fn is_utf8(v: &[u8]) -> bool {
fn is_ascii(s: str) -> bool {
let i: uint = byte_len(s);
while i > 0u { i -= 1u; if s.(i) & 128u8 != 0u8 { ret false; } }
while i > 0u { i -= 1u; if s[i] & 128u8 != 0u8 { ret false; } }
ret true;
}
@ -165,9 +165,7 @@ fn is_whitespace(s: str) -> bool {
let i = 0u;
let len = char_len(s);
while i < len {
if !char::is_whitespace(char_at(s, i)) {
ret false;
}
if !char::is_whitespace(char_at(s, i)) { ret false; }
i += 1u
}
ret true;
@ -192,7 +190,7 @@ fn unsafe_from_bytes(v: &[mutable? u8]) -> str {
ret rustrt::str_from_ivec(v);
}
fn unsafe_from_byte(u: u8) -> str { ret rustrt::str_from_ivec(~[u]); }
fn unsafe_from_byte(u: u8) -> str { ret rustrt::str_from_ivec([u]); }
fn str_from_cstr(cstr: sbuf) -> str { ret rustrt::str_from_cstr(cstr); }
@ -204,19 +202,19 @@ fn push_utf8_bytes(s: &mutable str, ch: char) {
let code = ch as uint;
if code < max_one_b {
s = rustrt::str_push_byte(s, code);
} else if (code < max_two_b) {
} else if code < max_two_b {
s = rustrt::str_push_byte(s, code >> 6u & 31u | tag_two_b);
s = rustrt::str_push_byte(s, code & 63u | tag_cont);
} else if (code < max_three_b) {
} else if code < max_three_b {
s = rustrt::str_push_byte(s, code >> 12u & 15u | tag_three_b);
s = rustrt::str_push_byte(s, code >> 6u & 63u | tag_cont);
s = rustrt::str_push_byte(s, code & 63u | tag_cont);
} else if (code < max_four_b) {
} else if code < max_four_b {
s = rustrt::str_push_byte(s, code >> 18u & 7u | tag_four_b);
s = rustrt::str_push_byte(s, code >> 12u & 63u | tag_cont);
s = rustrt::str_push_byte(s, code >> 6u & 63u | tag_cont);
s = rustrt::str_push_byte(s, code & 63u | tag_cont);
} else if (code < max_five_b) {
} else if code < max_five_b {
s = rustrt::str_push_byte(s, code >> 24u & 3u | tag_five_b);
s = rustrt::str_push_byte(s, code >> 18u & 63u | tag_cont);
s = rustrt::str_push_byte(s, code >> 12u & 63u | tag_cont);
@ -259,7 +257,7 @@ fn utf8_char_width(b: u8) -> uint {
}
fn char_range_at(s: str, i: uint) -> {ch: char, next: uint} {
let b0 = s.(i);
let b0 = s[i];
let w = utf8_char_width(b0);
assert (w != 0u);
if w == 1u { ret {ch: b0 as char, next: i + 1u}; }
@ -267,7 +265,7 @@ fn char_range_at(s: str, i: uint) -> {ch: char, next: uint} {
let end = i + w;
i += 1u;
while i < end {
let byte = s.(i);
let byte = s[i];
assert (byte & 192u8 == tag_cont_u8);
val <<= 6u;
val += byte & 63u8 as uint;
@ -288,7 +286,7 @@ fn char_len(s: str) -> uint {
let len = 0u;
let total = byte_len(s);
while i < total {
let chsize = utf8_char_width(s.(i));
let chsize = utf8_char_width(s[i]);
assert (chsize > 0u);
len += 1u;
i += chsize;
@ -298,12 +296,12 @@ fn char_len(s: str) -> uint {
}
fn to_chars(s: str) -> [char] {
let buf: [char] = ~[];
let buf: [char] = [];
let i = 0u;
let len = byte_len(s);
while i < len {
let cur = char_range_at(s, i);
buf += ~[cur.ch];
buf += [cur.ch];
i = cur.next;
}
ret buf;
@ -313,7 +311,7 @@ fn push_char(s: &mutable str, ch: char) { s += from_char(ch); }
fn pop_char(s: &mutable str) -> char {
let end = byte_len(s);
while end > 0u && s.(end - 1u) & 192u8 == tag_cont_u8 { end -= 1u; }
while end > 0u && s[end - 1u] & 192u8 == tag_cont_u8 { end -= 1u; }
assert (end > 0u);
let ch = char_at(s, end - 1u);
s = substr(s, 0u, end - 1u);
@ -343,7 +341,7 @@ fn index(s: str, c: u8) -> int {
fn rindex(s: str, c: u8) -> int {
let n: int = str::byte_len(s) as int;
while n >= 0 { if s.(n) == c { ret n; } n -= 1; }
while n >= 0 { if s[n] == c { ret n; } n -= 1; }
ret n;
}
@ -353,7 +351,7 @@ fn find(haystack: str, needle: str) -> int {
if needle_len == 0 { ret 0; }
fn match_at(haystack: &str, needle: &str, i: int) -> bool {
let j: int = i;
for c: u8 in needle { if haystack.(j) != c { ret false; } j += 1; }
for c: u8 in needle { if haystack[j] != c { ret false; } j += 1; }
ret true;
}
let i: int = 0;
@ -377,7 +375,7 @@ fn ends_with(haystack: str, needle: str) -> bool {
let needle_len: uint = byte_len(needle);
ret if needle_len == 0u {
true
} else if (needle_len > haystack_len) {
} else if needle_len > haystack_len {
false
} else {
eq(substr(haystack, haystack_len - needle_len, needle_len),
@ -397,18 +395,19 @@ fn slice(s: str, begin: uint, end: uint) -> str {
ret rustrt::str_slice(s, begin, end);
}
fn safe_slice(s: str, begin: uint, end: uint): le(begin, end) -> str {
fn safe_slice(s: str, begin: uint, end: uint) : le(begin, end) -> str {
assert (end <=
str::byte_len(s)); // would need some magic to
// make this a precondition
ret rustrt::str_slice(s, begin, end);
}
fn shift_byte(s: &mutable str) -> u8 {
let len = byte_len(s);
assert (len > 0u);
let b = s.(0);
let b = s[0];
s = substr(s, 1u, len - 1u);
ret b;
}
@ -416,7 +415,7 @@ fn shift_byte(s: &mutable str) -> u8 {
fn pop_byte(s: &mutable str) -> u8 {
let len = byte_len(s);
assert (len > 0u);
let b = s.(len - 1u);
let b = s[len - 1u];
s = substr(s, 0u, len - 1u);
ret b;
}
@ -433,17 +432,17 @@ fn unshift_byte(s: &mutable str, b: u8) {
}
fn split(s: str, sep: u8) -> [str] {
let v: [str] = ~[];
let v: [str] = [];
let accum: str = "";
let ends_with_sep: bool = false;
for c: u8 in s {
if c == sep {
v += ~[accum];
v += [accum];
accum = "";
ends_with_sep = true;
} else { accum += unsafe_from_byte(c); ends_with_sep = false; }
}
if str::byte_len(accum) != 0u || ends_with_sep { v += ~[accum]; }
if str::byte_len(accum) != 0u || ends_with_sep { v += [accum]; }
ret v;
}
@ -486,10 +485,10 @@ fn replace(s: str, from: str, to: str) : is_not_empty(from) -> str {
check (is_not_empty(from));
if byte_len(s) == 0u {
ret "";
} else if (starts_with(s, from)) {
} else if starts_with(s, from) {
ret to + replace(slice(s, byte_len(from), byte_len(s)), from, to);
} else {
ret unsafe_from_byte(s.(0)) +
ret unsafe_from_byte(s[0]) +
replace(slice(s, 1u, byte_len(s)), from, to);
}
}
@ -503,9 +502,7 @@ fn trim_left(s: &str) -> str {
fn count_whities(s: &[char]) -> uint {
let i = 0u;
while i < vec::len(s) {
if !char::is_whitespace(s.(i)) {
break;
}
if !char::is_whitespace(s[i]) { break; }
i += 1u;
}
ret i;
@ -519,9 +516,7 @@ fn trim_right(s: &str) -> str {
fn count_whities(s: &[char]) -> uint {
let i = vec::len(s);
while 0u < i {
if !char::is_whitespace(s.(i - 1u)) {
break;
}
if !char::is_whitespace(s[i - 1u]) { break; }
i -= 1u;
}
ret i;
@ -531,9 +526,7 @@ fn trim_right(s: &str) -> str {
ret from_chars(vec::slice(chars, 0u, whities));
}
fn trim(s: &str) -> str {
trim_left(trim_right(s))
}
fn trim(s: &str) -> str { trim_left(trim_right(s)) }
// Local Variables:
// mode: rust;

View file

@ -19,43 +19,35 @@ native "rust" mod rustrt {
fn set_min_stack(stack_size: uint);
fn new_task() -> task_id;
fn drop_task(task : *rust_task);
fn get_task_pointer(id : task_id) -> *rust_task;
fn start_task(id : task_id);
fn drop_task(task: *rust_task);
fn get_task_pointer(id: task_id) -> *rust_task;
fn start_task(id: task_id);
fn get_task_trampoline() -> u32;
fn migrate_alloc(alloc : *u8, target : task_id);
fn migrate_alloc(alloc: *u8, target: task_id);
fn leak<@T>(thing : -T);
fn leak<@T>(thing: -T);
}
type rust_task = {
id : task,
mutable notify_enabled : u8,
mutable notify_chan : comm::chan_handle<task_notification>,
ctx : task_context,
stack_ptr : *u8
};
type rust_task =
{id: task,
mutable notify_enabled: u8,
mutable notify_chan: comm::chan_handle<task_notification>,
ctx: task_context,
stack_ptr: *u8};
type task_context = {
regs : x86_registers,
next : *u8
};
type task_context = {regs: x86_registers, next: *u8};
resource rust_task_ptr(task : *rust_task) {
rustrt::drop_task(task);
}
resource rust_task_ptr(task: *rust_task) { rustrt::drop_task(task); }
fn get_task_ptr(id : task) -> rust_task_ptr {
fn get_task_ptr(id: task) -> rust_task_ptr {
ret rust_task_ptr(rustrt::get_task_pointer(id));
}
type task = int;
type task_id = task;
fn get_task_id() -> task_id {
rustrt::get_task_id()
}
fn get_task_id() -> task_id { rustrt::get_task_id() }
/**
* Hints the scheduler to yield this task for a specified ammount of time.
@ -68,22 +60,20 @@ fn yield() { ret rustrt::task_yield(); }
tag task_result { tr_success; tr_failure; }
tag task_notification {
exit(task, task_result);
}
tag task_notification { exit(task, task_result); }
fn join(task_port : (task_id, comm::port<task_notification>))
-> task_result {
fn join(task_port: (task_id, comm::port<task_notification>)) -> task_result {
let (id, port) = task_port;
alt comm::recv::<task_notification>(port) {
exit(_id, res) {
if _id == id { ret res }
else { fail #fmt("join received id %d, expected %d", _id, id) }
if _id == id {
ret res
} else { fail #fmt["join received id %d, expected %d", _id, id] }
}
}
}
fn join_id(t : task_id) -> task_result {
fn join_id(t: task_id) -> task_result {
alt rustrt::task_join(t) { 0 { tr_success } _ { tr_failure } }
}
@ -93,33 +83,25 @@ fn pin() { rustrt::pin_task(); }
fn unpin() { rustrt::unpin_task(); }
fn set_min_stack(stack_size : uint) {
rustrt::set_min_stack(stack_size);
}
fn set_min_stack(stack_size: uint) { rustrt::set_min_stack(stack_size); }
fn _spawn(thunk : -fn() -> ()) -> task {
spawn(thunk)
}
fn _spawn(thunk: -fn()) -> task { spawn(thunk) }
fn spawn(thunk : -fn() -> ()) -> task {
spawn_inner(thunk, none)
}
fn spawn(thunk: -fn()) -> task { spawn_inner(thunk, none) }
fn spawn_notify(thunk : -fn() -> (), notify : comm::chan<task_notification>)
-> task {
fn spawn_notify(thunk: -fn(), notify: comm::chan<task_notification>) -> task {
spawn_inner(thunk, some(notify))
}
fn spawn_joinable(thunk : -fn()) -> (task_id, comm::port<task_notification>) {
fn spawn_joinable(thunk: -fn()) -> (task_id, comm::port<task_notification>) {
let p = comm::port::<task_notification>();
let id = spawn_notify(thunk, comm::chan::<task_notification>(p));
ret (id, p);
}
// FIXME: make this a fn~ once those are supported.
fn spawn_inner(thunk : -fn() -> (),
notify : option<comm::chan<task_notification>>)
-> task_id {
fn spawn_inner(thunk: -fn(), notify: option<comm::chan<task_notification>>) ->
task_id {
let id = rustrt::new_task();
// the order of arguments are outptr, taskptr, envptr.
@ -129,21 +111,21 @@ fn spawn_inner(thunk : -fn() -> (),
// set up the task pointer
let task_ptr = get_task_ptr(id);
let regs = ptr::addr_of((**task_ptr).ctx.regs);
(*regs).edx = cast(*task_ptr);
(*regs).edx = cast(*task_ptr);;
(*regs).esp = cast((**task_ptr).stack_ptr);
assert ptr::null() != (**task_ptr).stack_ptr;
assert (ptr::null() != (**task_ptr).stack_ptr);
let raw_thunk : { code: u32, env: u32 } = cast(thunk);
let raw_thunk: {code: u32, env: u32} = cast(thunk);
(*regs).eip = raw_thunk.code;
// set up notifications if they are enabled.
alt notify {
some(c) {
(**task_ptr).notify_enabled = 1u8;
(**task_ptr).notify_enabled = 1u8;;
(**task_ptr).notify_chan = *c;
}
none {}
none { }
};
// okay, now we align the stack and add the environment pointer and a fake
@ -153,15 +135,15 @@ fn spawn_inner(thunk : -fn() -> (),
// -4 for the return address.
(*regs).esp = align_down((*regs).esp - 12u32) - 4u32;
let ra : *mutable u32 = cast((*regs).esp);
let env : *mutable u32 = cast((*regs).esp+4u32);
let tptr : *mutable u32 = cast((*regs).esp+12u32);
let ra: *mutable u32 = cast((*regs).esp);
let env: *mutable u32 = cast((*regs).esp + 4u32);
let tptr: *mutable u32 = cast((*regs).esp + 12u32);
// put the return pointer in ecx.
(*regs).ecx = (*regs).esp + 8u32;
(*regs).ecx = (*regs).esp + 8u32;;
*tptr = cast(*task_ptr);
*env = raw_thunk.env;
*tptr = cast(*task_ptr);;
*env = raw_thunk.env;;
*ra = rustrt::get_task_trampoline();
rustrt::migrate_alloc(cast(raw_thunk.env), id);
@ -173,31 +155,31 @@ fn spawn_inner(thunk : -fn() -> (),
}
// Who says we can't write an operating system in Rust?
type x86_registers = {
type x86_registers =
// This needs to match the structure in context.h
mutable eax : u32,
mutable ebx : u32,
mutable ecx : u32,
mutable edx : u32,
mutable ebp : u32,
mutable esi : u32,
mutable edi : u32,
mutable esp : u32,
mutable cs : u16,
mutable ds : u16,
mutable ss : u16,
mutable es : u16,
mutable fs : u16,
mutable gs : u16,
mutable eflags : u32,
mutable eip : u32
};
{mutable eax: u32,
mutable ebx: u32,
mutable ecx: u32,
mutable edx: u32,
mutable ebp: u32,
mutable esi: u32,
mutable edi: u32,
mutable esp: u32,
mutable cs: u16,
mutable ds: u16,
mutable ss: u16,
mutable es: u16,
mutable fs: u16,
mutable gs: u16,
mutable eflags: u32,
mutable eip: u32};
fn align_down(x: u32) -> u32 {
fn align_down(x : u32) -> u32 {
// Aligns x down to 16 bytes
x & !(15u32)
x & !15u32
}
// Local Variables:

View file

@ -40,15 +40,15 @@ const color_bright_cyan: u8 = 14u8;
const color_bright_white: u8 = 15u8;
fn esc(writer: io::buf_writer) { writer.write(~[0x1bu8, '[' as u8]); }
fn esc(writer: io::buf_writer) { writer.write([0x1bu8, '[' as u8]); }
fn reset(writer: io::buf_writer) {
esc(writer);
writer.write(~['0' as u8, 'm' as u8]);
writer.write(['0' as u8, 'm' as u8]);
}
fn color_supported() -> bool {
let supported_terms = ~["xterm-color", "xterm", "screen-bce"];
let supported_terms = ["xterm-color", "xterm", "screen-bce"];
ret alt generic_os::getenv("TERM") {
option::some(env) {
for term: str in supported_terms {
@ -63,8 +63,8 @@ fn color_supported() -> bool {
fn set_color(writer: io::buf_writer, first_char: u8, color: u8) {
assert (color < 16u8);
esc(writer);
if color >= 8u8 { writer.write(~['1' as u8, ';' as u8]); color -= 8u8; }
writer.write(~[first_char, ('0' as u8) + color, 'm' as u8]);
if color >= 8u8 { writer.write(['1' as u8, ';' as u8]); color -= 8u8; }
writer.write([first_char, ('0' as u8) + color, 'm' as u8]);
}
fn fg(writer: io::buf_writer, color: u8) {

View file

@ -41,7 +41,7 @@ type test_name = str;
// the test succeeds; if the function fails then the test fails. We
// may need to come up with a more clever definition of test in order
// to support isolation of tests into tasks.
type test_fn = fn() ;
type test_fn = fn();
// The definition of a single test. A test runner will run a list of
// these.
@ -69,7 +69,7 @@ fn parse_opts(args: &[str]) : vec::is_not_empty(args) -> opt_res {
// FIXME (#649): Shouldn't have to check here
check (vec::is_not_empty(args));
let args_ = vec::tail(args);
let opts = ~[getopts::optflag("ignored")];
let opts = [getopts::optflag("ignored")];
let match =
alt getopts::getopts(args_, opts) {
getopts::success(m) { m }
@ -78,7 +78,7 @@ fn parse_opts(args: &[str]) : vec::is_not_empty(args) -> opt_res {
let filter =
if vec::len(match.free) > 0u {
option::some(match.free.(0))
option::some(match.free[0])
} else { option::none };
let run_ignored = getopts::opt_present(match, "ignored");
@ -106,25 +106,22 @@ fn run_tests_console(opts: &test_opts, tests: &[test_desc]) -> bool {
fn run_tests_console_(opts: &test_opts, tests: &[test_desc],
to_task: &test_to_task) -> bool {
type test_state = @{
out: io::writer,
use_color: bool,
mutable total: uint,
mutable passed: uint,
mutable failed: uint,
mutable ignored: uint,
mutable failures: [test_desc]
};
type test_state =
@{out: io::writer,
use_color: bool,
mutable total: uint,
mutable passed: uint,
mutable failed: uint,
mutable ignored: uint,
mutable failures: [test_desc]};
fn callback(event: testevent, st: test_state) {
alt event {
te_filtered(filtered_tests) {
st.total = vec::len(filtered_tests);
st.out.write_line(#fmt("\nrunning %u tests", st.total));
}
te_wait(test) {
st.out.write_str(#fmt("test %s ... ", test.name));
st.out.write_line(#fmt["\nrunning %u tests", st.total]);
}
te_wait(test) { st.out.write_str(#fmt["test %s ... ", test.name]); }
te_result(test, result) {
alt result {
tr_ok. {
@ -136,7 +133,7 @@ fn run_tests_console_(opts: &test_opts, tests: &[test_desc],
st.failed += 1u;
write_failed(st.out, st.use_color);
st.out.write_line("");
st.failures += ~[test];
st.failures += [test];
}
tr_ignored. {
st.ignored += 1u;
@ -148,37 +145,35 @@ fn run_tests_console_(opts: &test_opts, tests: &[test_desc],
}
}
let st = @{
out: io::stdout(),
use_color: use_color(),
mutable total: 0u,
mutable passed: 0u,
mutable failed: 0u,
mutable ignored: 0u,
mutable failures: ~[]
};
let st =
@{out: io::stdout(),
use_color: use_color(),
mutable total: 0u,
mutable passed: 0u,
mutable failed: 0u,
mutable ignored: 0u,
mutable failures: []};
run_tests(opts, tests, to_task,
bind callback(_, st));
run_tests(opts, tests, to_task, bind callback(_, st));
assert st.passed + st.failed + st.ignored == st.total;
assert (st.passed + st.failed + st.ignored == st.total);
let success = st.failed == 0u;
if !success {
st.out.write_line("\nfailures:");
for test: test_desc in st.failures {
let testname = test.name; // Satisfy alias analysis
st.out.write_line(#fmt(" %s", testname));
st.out.write_line(#fmt[" %s", testname]);
}
}
st.out.write_str(#fmt("\nresult: "));
st.out.write_str(#fmt["\nresult: "]);
if success {
// There's no parallelism at this point so it's safe to use color
write_ok(st.out, true);
} else { write_failed(st.out, true); }
st.out.write_str(#fmt(". %u passed; %u failed; %u ignored\n\n",
st.passed, st.failed, st.ignored));
st.out.write_str(#fmt[". %u passed; %u failed; %u ignored\n\n", st.passed,
st.failed, st.ignored]);
ret success;
@ -206,9 +201,7 @@ fn run_tests_console_(opts: &test_opts, tests: &[test_desc],
}
}
fn use_color() -> bool {
ret get_concurrency() == 1u;
}
fn use_color() -> bool { ret get_concurrency() == 1u; }
tag testevent {
te_filtered([test_desc]);
@ -216,8 +209,8 @@ tag testevent {
te_result(test_desc, test_result);
}
fn run_tests(opts: &test_opts, tests: &[test_desc],
to_task: &test_to_task, callback: fn(testevent)) {
fn run_tests(opts: &test_opts, tests: &[test_desc], to_task: &test_to_task,
callback: fn(testevent)) {
let filtered_tests = filter_tests(opts, tests);
@ -227,19 +220,19 @@ fn run_tests(opts: &test_opts, tests: &[test_desc],
// provide a great user experience because you might sit waiting for the
// result of a particular test for an unusually long amount of time.
let concurrency = get_concurrency();
log #fmt("using %u test tasks", concurrency);
log #fmt["using %u test tasks", concurrency];
let total = vec::len(filtered_tests);
let run_idx = 0u;
let wait_idx = 0u;
let futures = ~[];
let futures = [];
while wait_idx < total {
while vec::len(futures) < concurrency && run_idx < total {
futures += ~[run_test(filtered_tests.(run_idx), to_task)];
futures += [run_test(filtered_tests[run_idx], to_task)];
run_idx += 1u;
}
let future = futures.(0);
let future = futures[0];
callback(te_wait(future.test));
let result = future.wait();
callback(te_result(future.test, result));
@ -306,33 +299,26 @@ fn filter_tests(opts: &test_opts, tests: &[test_desc]) -> [test_desc] {
ret filtered;
}
type test_future =
{test: test_desc, wait: fn() -> test_result };
type test_future = {test: test_desc, wait: fn() -> test_result};
fn run_test(test: &test_desc, to_task: &test_to_task) -> test_future {
if !test.ignore {
let test_task = to_task(test.fn);
ret {test: test,
wait:
bind fn (test_task: joinable)-> test_result {
alt task::join(test_task) {
task::tr_success. { tr_ok }
task::tr_failure. { tr_failed }
}
}(test_task)};
} else {
ret {test: test,
wait: fn () -> test_result { tr_ignored }};
}
bind fn (test_task: joinable) -> test_result {
alt task::join(test_task) {
task::tr_success. { tr_ok }
task::tr_failure. { tr_failed }
}
}(test_task)};
} else { ret {test: test, wait: fn () -> test_result { tr_ignored }}; }
}
// We need to run our tests in another task in order to trap test failures.
// This function only works with functions that don't contain closures.
fn default_test_to_task(f: &fn()) -> joinable {
fn run_task(f: fn()) {
configure_test_task();
f();
}
fn run_task(f: fn()) { configure_test_task(); f(); }
ret task::spawn_joinable(bind run_task(f));
}

View file

@ -18,4 +18,4 @@ fn precise_time_ns() -> u64 { let ns = 0u64; rustrt::nano_time(ns); ret ns; }
fn precise_time_s() -> float {
ret (precise_time_ns() as float) / 1000000000.;
}
}

View file

@ -33,4 +33,4 @@ fn to_str(n: u64, radix: uint) -> str {
ret s;
}
fn str(n: u64) -> str { ret to_str(n, 10u); }
fn str(n: u64) -> str { ret to_str(n, 10u); }

View file

@ -10,11 +10,11 @@ type node = option::t<uint>;
type ufind = {mutable nodes: [mutable node]};
fn make() -> ufind { ret {mutable nodes: ~[mutable]}; }
fn make() -> ufind { ret {mutable nodes: [mutable]}; }
fn make_set(ufnd: &ufind) -> uint {
let idx = vec::len(ufnd.nodes);
ufnd.nodes += ~[mutable none::<uint>];
ufnd.nodes += [mutable none::<uint>];
ret idx;
}
@ -26,7 +26,7 @@ fn grow(ufnd: &ufind, n: uint) {
}
fn find(ufnd: &ufind, n: uint) -> uint {
alt ufnd.nodes.(n) {
alt ufnd.nodes[n] {
none. { ret n; }
some(m) { let m_ = m; be find(ufnd, m_); }
}
@ -36,10 +36,8 @@ fn union(ufnd: &ufind, m: uint, n: uint) {
let m_root = find(ufnd, m);
let n_root = find(ufnd, n);
if m_root < n_root {
ufnd.nodes.(n_root) = some::<uint>(m_root);
} else if (m_root > n_root) {
ufnd.nodes.(m_root) = some::<uint>(n_root);
}
ufnd.nodes[n_root] = some::<uint>(m_root);
} else if m_root > n_root { ufnd.nodes[m_root] = some::<uint>(n_root); }
}
fn set_count(ufnd: &ufind) -> uint { ret vec::len::<node>(ufnd.nodes); }

View file

@ -51,7 +51,7 @@ fn parse_buf(buf: &[u8], radix: uint) -> uint {
let power = 1u;
let n = 0u;
while true {
n += (buf.(i) - ('0' as u8) as uint) * power;
n += (buf[i] - ('0' as u8) as uint) * power;
power *= radix;
if i == 0u { ret n; }
i -= 1u;
@ -59,9 +59,7 @@ fn parse_buf(buf: &[u8], radix: uint) -> uint {
fail;
}
fn from_str(s : &str) -> uint {
parse_buf(str::bytes(s), 10u)
}
fn from_str(s: &str) -> uint { parse_buf(str::bytes(s), 10u) }
fn to_str(num: uint, radix: uint) -> str {
let n = num;
@ -95,7 +93,7 @@ fn to_str(num: uint, radix: uint) -> str {
}
let s1: str = "";
let len: uint = str::byte_len(s);
while len != 0u { len -= 1u; s1 += str::unsafe_from_byte(s.(len)); }
while len != 0u { len -= 1u; s1 += str::unsafe_from_byte(s[len]); }
ret s1;
}
fn str(i: uint) -> str { ret to_str(i, 10u); }

View file

@ -28,51 +28,51 @@ fn to_ptr<T>(v: &[T]) -> *T { ret rustrt::ivec_to_ptr(v); }
fn len<T>(v: &[mutable? T]) -> uint { ret rusti::ivec_len(v); }
type init_op<T> = fn(uint) -> T ;
type init_op<T> = fn(uint) -> T;
fn init_fn<@T>(op: &init_op<T>, n_elts: uint) -> [T] {
let v = ~[];
let v = [];
reserve(v, n_elts);
let i: uint = 0u;
while i < n_elts { v += ~[op(i)]; i += 1u; }
while i < n_elts { v += [op(i)]; i += 1u; }
ret v;
}
// TODO: Remove me once we have slots.
fn init_fn_mut<@T>(op: &init_op<T>, n_elts: uint) -> [mutable T] {
let v = ~[mutable];
let v = [mutable];
reserve(v, n_elts);
let i: uint = 0u;
while i < n_elts { v += ~[mutable op(i)]; i += 1u; }
while i < n_elts { v += [mutable op(i)]; i += 1u; }
ret v;
}
fn init_elt<@T>(t: &T, n_elts: uint) -> [T] {
let v = ~[];
let v = [];
reserve(v, n_elts);
let i: uint = 0u;
while i < n_elts { v += ~[t]; i += 1u; }
while i < n_elts { v += [t]; i += 1u; }
ret v;
}
// TODO: Remove me once we have slots.
fn init_elt_mut<@T>(t: &T, n_elts: uint) -> [mutable T] {
let v = ~[mutable];
let v = [mutable];
reserve(v, n_elts);
let i: uint = 0u;
while i < n_elts { v += ~[mutable t]; i += 1u; }
while i < n_elts { v += [mutable t]; i += 1u; }
ret v;
}
fn to_mut<@T>(v: &[T]) -> [mutable T] {
let vres = ~[mutable];
for t: T in v { vres += ~[mutable t]; }
let vres = [mutable];
for t: T in v { vres += [mutable t]; }
ret vres;
}
fn from_mut<@T>(v: &[mutable T]) -> [T] {
let vres = ~[];
for t: T in v { vres += ~[t]; }
let vres = [];
for t: T in v { vres += [t]; }
ret vres;
}
@ -88,7 +88,7 @@ pred is_not_empty<T>(v: &[mutable? T]) -> bool { ret !is_empty(v); }
// Accessors
/// Returns the first element of a vector
fn head<@T>(v: &[mutable? T]) : is_not_empty(v) -> T { ret v.(0); }
fn head<@T>(v: &[mutable? T]) : is_not_empty(v) -> T { ret v[0]; }
/// Returns all but the first element of a vector
fn tail<@T>(v: &[mutable? T]) : is_not_empty(v) -> [mutable? T] {
@ -98,17 +98,17 @@ fn tail<@T>(v: &[mutable? T]) : is_not_empty(v) -> [mutable? T] {
/// Returns the last element of `v`.
fn last<@T>(v: &[mutable? T]) -> option::t<T> {
if len(v) == 0u { ret none; }
ret some(v.(len(v) - 1u));
ret some(v[len(v) - 1u]);
}
/// Returns a copy of the elements from [`start`..`end`) from `v`.
fn slice<@T>(v: &[mutable? T], start: uint, end: uint) -> [T] {
assert (start <= end);
assert (end <= len(v));
let result = ~[];
let result = [];
reserve(result, end - start);
let i = start;
while i < end { result += ~[v.(i)]; i += 1u; }
while i < end { result += [v[i]]; i += 1u; }
ret result;
}
@ -116,10 +116,10 @@ fn slice<@T>(v: &[mutable? T], start: uint, end: uint) -> [T] {
fn slice_mut<@T>(v: &[mutable? T], start: uint, end: uint) -> [mutable T] {
assert (start <= end);
assert (end <= len(v));
let result = ~[mutable];
let result = [mutable];
reserve(result, end - start);
let i = start;
while i < end { result += ~[mutable v.(i)]; i += 1u; }
while i < end { result += [mutable v[i]]; i += 1u; }
ret result;
}
@ -129,7 +129,7 @@ fn slice_mut<@T>(v: &[mutable? T], start: uint, end: uint) -> [mutable T] {
fn shift<@T>(v: &mutable [mutable? T]) -> T {
let ln = len::<T>(v);
assert (ln > 0u);
let e = v.(0);
let e = v[0];
v = slice::<T>(v, 1u, ln);
ret e;
}
@ -139,7 +139,7 @@ fn pop<@T>(v: &mutable [mutable? T]) -> T {
let ln = len(v);
assert (ln > 0u);
ln -= 1u;
let e = v.(ln);
let e = v[ln];
v = slice(v, 0u, ln);
ret e;
}
@ -153,22 +153,22 @@ fn pop<@T>(v: &mutable [mutable? T]) -> T {
fn grow<@T>(v: &mutable [T], n: uint, initval: &T) {
reserve(v, next_power_of_two(len(v) + n));
let i: uint = 0u;
while i < n { v += ~[initval]; i += 1u; }
while i < n { v += [initval]; i += 1u; }
}
// TODO: Remove me once we have slots.
fn grow_mut<@T>(v: &mutable [mutable T], n: uint, initval: &T) {
reserve(v, next_power_of_two(len(v) + n));
let i: uint = 0u;
while i < n { v += ~[mutable initval]; i += 1u; }
while i < n { v += [mutable initval]; i += 1u; }
}
/// Calls `f` `n` times and appends the results of these calls to the given
/// vector.
fn grow_fn<@T>(v: &mutable [T], n: uint, init_fn: fn(uint) -> T ) {
fn grow_fn<@T>(v: &mutable [T], n: uint, init_fn: fn(uint) -> T) {
reserve(v, next_power_of_two(len(v) + n));
let i: uint = 0u;
while i < n { v += ~[init_fn(i)]; i += 1u; }
while i < n { v += [init_fn(i)]; i += 1u; }
}
/// Sets the element at position `index` to `val`. If `index` is past the end
@ -176,49 +176,48 @@ fn grow_fn<@T>(v: &mutable [T], n: uint, init_fn: fn(uint) -> T ) {
/// intervening space.
fn grow_set<@T>(v: &mutable [mutable T], index: uint, initval: &T, val: &T) {
if index >= len(v) { grow_mut(v, index - len(v) + 1u, initval); }
v.(index) = val;
v[index] = val;
}
// Functional utilities
fn map<@T, @U>(f: &block(&T) -> U , v: &[mutable? T]) -> [U] {
let result = ~[];
fn map<@T, @U>(f: &block(&T) -> U, v: &[mutable? T]) -> [U] {
let result = [];
reserve(result, len(v));
for elem: T in v {
let elem2 = elem; // satisfies alias checker
result += ~[f(elem2)];
result += [f(elem2)];
}
ret result;
}
fn map2<@T, @U, @V>(f: &block(&T, &U) -> V, v0: &[T], v1: &[U])
-> [V] {
fn map2<@T, @U, @V>(f: &block(&T, &U) -> V, v0: &[T], v1: &[U]) -> [V] {
let v0_len = len::<T>(v0);
if v0_len != len::<U>(v1) { fail; }
let u: [V] = ~[];
let u: [V] = [];
let i = 0u;
while i < v0_len { u += ~[f({ v0.(i) }, { v1.(i) })]; i += 1u; }
while i < v0_len { u += [f({ v0[i] }, { v1[i] })]; i += 1u; }
ret u;
}
fn filter_map<@T, @U>(f: &block(&T) -> option::t<U>,
v: &[mutable? T]) -> [U] {
let result = ~[];
fn filter_map<@T, @U>(f: &block(&T) -> option::t<U>, v: &[mutable? T]) ->
[U] {
let result = [];
for elem: T in v {
let elem2 = elem; // satisfies alias checker
alt f(elem2) {
none. {/* no-op */ }
some(result_elem) { result += ~[result_elem]; }
some(result_elem) { result += [result_elem]; }
}
}
ret result;
}
fn foldl<@T, @U>(p: &block(&U, &T) -> U , z: &U, v: &[mutable? T]) -> U {
fn foldl<@T, @U>(p: &block(&U, &T) -> U, z: &U, v: &[mutable? T]) -> U {
let sz = len(v);
if sz == 0u { ret z; }
let first = v.(0);
let first = v[0];
let rest = slice(v, 1u, sz);
ret p(foldl(p, z, rest), first);
}
@ -251,42 +250,36 @@ fn find<@T>(f: &block(&T) -> bool, v: &[T]) -> option::t<T> {
fn position<@T>(x: &T, v: &[T]) -> option::t<uint> {
let i: uint = 0u;
while i < len(v) { if x == v.(i) { ret some::<uint>(i); } i += 1u; }
while i < len(v) { if x == v[i] { ret some::<uint>(i); } i += 1u; }
ret none;
}
fn position_pred<T>(f: fn(&T) -> bool, v: &[T]) -> option::t<uint> {
let i: uint = 0u;
while i < len(v) { if f(v.(i)) { ret some::<uint>(i); } i += 1u; }
while i < len(v) { if f(v[i]) { ret some::<uint>(i); } i += 1u; }
ret none;
}
fn unzip<@T, @U>(v: &[(T, U)]) -> ([T], [U]) {
let as = ~[], bs = ~[];
for (a, b) in v {
as += ~[a];
bs += ~[b];
}
let as = [], bs = [];
for (a, b) in v { as += [a]; bs += [b]; }
ret (as, bs);
}
// FIXME make the lengths being equal a constraint
fn zip<@T, @U>(v: &[T], u: &[U]) -> [(T, U)] {
let zipped = ~[];
let zipped = [];
let sz = len(v), i = 0u;
assert (sz == len(u));
while i < sz {
zipped += ~[(v.(i), u.(i))];
i += 1u;
}
while i < sz { zipped += [(v[i], u[i])]; i += 1u; }
ret zipped;
}
// Swaps two elements in a vector
fn swap<@T>(v: &[mutable T], a: uint, b: uint) {
let t: T = v.(a);
v.(a) = v.(b);
v.(b) = t;
let t: T = v[a];
v[a] = v[b];
v[b] = t;
}
// In place vector reversal
@ -299,11 +292,11 @@ fn reverse<@T>(v: &[mutable T]) {
// Functional vector reversal. Returns a reversed copy of v.
fn reversed<@T>(v: &[T]) -> [T] {
let rs: [T] = ~[];
let rs: [T] = [];
let i = len::<T>(v);
if i == 0u { ret rs; } else { i -= 1u; }
while i != 0u { rs += ~[v.(i)]; i -= 1u; }
rs += ~[v.(0)];
while i != 0u { rs += [v[i]]; i -= 1u; }
rs += [v[0]];
ret rs;
}
@ -328,7 +321,7 @@ mod unsafe {
}
fn from_buf<T>(ptr: *T, bytes: uint) -> [T] {
let v = ~[];
let v = [];
copy_from_buf(v, ptr, bytes);
ret v;
}

View file

@ -5,9 +5,7 @@ native "rust" mod rustrt {
fn rust_file_is_dir(path: str) -> int;
}
fn list_dir(path: str) -> [str] {
ret *rustrt::rust_list_files(path + "*");
}
fn list_dir(path: str) -> [str] { ret *rustrt::rust_list_files(path + "*"); }
fn path_is_absolute(p: str) -> bool {
ret str::char_at(p, 0u) == '/' ||

View file

@ -30,7 +30,7 @@ mod libc_constants {
fn O_TRUNC() -> int { ret 512; }
fn O_TEXT() -> int { ret 16384; }
fn O_BINARY() -> int { ret 32768; }
fn O_NOINHERIT() -> int { ret 0x0080; }
fn O_NOINHERIT() -> int { ret 128; }
fn S_IRUSR() -> uint {
ret 256u; // really _S_IREAD in win32
@ -59,12 +59,13 @@ fn pipe() -> {in: int, out: int} {
// which means to pass it to a subprocess they need to be duplicated
// first, as in rust_run_program.
let fds = {mutable in: 0, mutable out: 0};
let res = os::libc::_pipe(ptr::addr_of(fds.in), 1024u,
libc_constants::O_BINARY()
| libc_constants::O_NOINHERIT());
assert res == 0;
assert fds.in != -1 && fds.in != 0;
assert fds.out != -1 && fds.in != 0;
let res =
os::libc::_pipe(ptr::addr_of(fds.in), 1024u,
libc_constants::O_BINARY() |
libc_constants::O_NOINHERIT());
assert (res == 0);
assert (fds.in != -1 && fds.in != 0);
assert (fds.out != -1 && fds.in != 0);
ret {in: fds.in, out: fds.out};
}

View file

@ -32,7 +32,7 @@ fn sub(t: str, n: int) -> str {
_ { ns = int::to_str(n, 10u) + " bottles"; }
}
while i < str::byte_len(t) {
if t.(i) == '#' as u8 { b += ns; } else { str::push_byte(b, t.(i)); }
if t[i] == '#' as u8 { b += ns; } else { str::push_byte(b, t[i]); }
i += 1u;
}
ret b;

View file

@ -56,4 +56,4 @@ fn main() {
let b: bottle = multiple(99);
let running: bool = true;
while running { show(b); log ""; running = more(b); b = next(b); }
}
}

View file

@ -32,7 +32,7 @@ fn sub(t: str, n: int) -> str {
_ { ns = int::to_str(n, 10u) + " bottles"; }
}
while i < str::byte_len(t) {
if t.(i) == '#' as u8 { b += ns; } else { str::push_byte(b, t.(i)); }
if t[i] == '#' as u8 { b += ns; } else { str::push_byte(b, t[i]); }
i += 1u;
}
ret b;
@ -45,4 +45,4 @@ fn main() {
while n > 0 { log sub(b1(), n); log sub(b2(), n - 1); log ""; n -= 1; }
log b7();
log sub(b8(), 99);
}
}

View file

@ -36,4 +36,4 @@ fn main() {
log "";
}
multiple(99);
}
}

View file

@ -22,4 +22,4 @@ fn main() {
// assert (ack(4,1) == 65533);
}
}

View file

@ -28,8 +28,8 @@ fn main() {
} else { max_depth = n; }
let stretch_depth = max_depth + 1;
let stretch_tree = bottom_up_tree(0, stretch_depth);
log #fmt("stretch tree of depth %d\t check: %d", stretch_depth,
item_check(stretch_tree));
log #fmt["stretch tree of depth %d\t check: %d", stretch_depth,
item_check(stretch_tree)];
let long_lived_tree = bottom_up_tree(0, max_depth);
let depth = min_depth;
while depth <= max_depth {
@ -43,10 +43,10 @@ fn main() {
chk += item_check(temp_tree);
i += 1;
}
log #fmt("%d\t trees of depth %d\t check: %d", iterations * 2, depth,
chk);
log #fmt["%d\t trees of depth %d\t check: %d", iterations * 2, depth,
chk];
depth += 2;
}
log #fmt("long lived trees of depth %d\t check: %d", max_depth,
item_check(long_lived_tree));
}
log #fmt["long lived trees of depth %d\t check: %d", max_depth,
item_check(long_lived_tree)];
}

Some files were not shown because too many files have changed in this diff Show more